Page MenuHomeFreeBSD

D38124.diff
No OneTemporary

D38124.diff

diff --git a/lib/libvmmapi/Makefile b/lib/libvmmapi/Makefile
--- a/lib/libvmmapi/Makefile
+++ b/lib/libvmmapi/Makefile
@@ -2,6 +2,7 @@
PACKAGE=lib${LIB}
LIB= vmmapi
+SHLIB_MAJOR= 6
SRCS= vmmapi.c vmmapi_freebsd.c
INCS= vmmapi.h
diff --git a/lib/libvmmapi/internal.h b/lib/libvmmapi/internal.h
new file mode 100644
--- /dev/null
+++ b/lib/libvmmapi/internal.h
@@ -0,0 +1,17 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 John Baldwin <jhb@FreeBSD.org>
+ */
+
+#ifndef __VMMAPI_INTERNAL_H__
+#define __VMMAPI_INTERNAL_H__
+
+struct vmctx;
+
+struct vcpu {
+ struct vmctx *ctx;
+ int vcpuid;
+};
+
+#endif /* !__VMMAPI_INTERNAL_H__ */
diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h
--- a/lib/libvmmapi/vmmapi.h
+++ b/lib/libvmmapi/vmmapi.h
@@ -43,9 +43,10 @@
* API version for out-of-tree consumers like grub-bhyve for making compile
* time decisions.
*/
-#define VMMAPI_VERSION 0104 /* 2 digit major followed by 2 digit minor */
+#define VMMAPI_VERSION 0200 /* 2 digit major followed by 2 digit minor */
struct iovec;
+struct vcpu;
struct vmctx;
struct vm_snapshot_meta;
enum x2apic_state;
@@ -122,15 +123,18 @@
void vm_close(struct vmctx *ctx);
void vm_destroy(struct vmctx *ctx);
int vm_limit_rights(struct vmctx *ctx);
+struct vcpu *vm_vcpu_open(struct vmctx *ctx, int vcpuid);
+void vm_vcpu_close(struct vcpu *vcpu);
+int vcpu_id(struct vcpu *vcpu);
int vm_parse_memsize(const char *optarg, size_t *memsize);
int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s);
void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len);
/* inverse operation to vm_map_gpa - extract guest address from host pointer */
vm_paddr_t vm_rev_map_gpa(struct vmctx *ctx, void *addr);
int vm_get_gpa_pmap(struct vmctx *, uint64_t gpa, uint64_t *pte, int *num);
-int vm_gla2gpa(struct vmctx *, int vcpuid, struct vm_guest_paging *paging,
+int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *fault);
-int vm_gla2gpa_nofault(struct vmctx *, int vcpuid,
+int vm_gla2gpa_nofault(struct vcpu *vcpu,
struct vm_guest_paging *paging, uint64_t gla, int prot,
uint64_t *gpa, int *fault);
uint32_t vm_get_lowmem_limit(struct vmctx *ctx);
@@ -140,44 +144,43 @@
const char *vm_get_name(struct vmctx *ctx);
size_t vm_get_lowmem_size(struct vmctx *ctx);
size_t vm_get_highmem_size(struct vmctx *ctx);
-int vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
+int vm_set_desc(struct vcpu *vcpu, int reg,
uint64_t base, uint32_t limit, uint32_t access);
-int vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
+int vm_get_desc(struct vcpu *vcpu, int reg,
uint64_t *base, uint32_t *limit, uint32_t *access);
-int vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg,
- struct seg_desc *seg_desc);
-int vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val);
-int vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *retval);
-int vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
+int vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc);
+int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
+int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
+int vm_set_register_set(struct vcpu *vcpu, unsigned int count,
const int *regnums, uint64_t *regvals);
-int vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
+int vm_get_register_set(struct vcpu *vcpu, unsigned int count,
const int *regnums, uint64_t *regvals);
-int vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *ret_vmexit);
+int vm_run(struct vcpu *vcpu, struct vm_exit *ret_vmexit);
int vm_suspend(struct vmctx *ctx, enum vm_suspend_how how);
int vm_reinit(struct vmctx *ctx);
int vm_apicid2vcpu(struct vmctx *ctx, int apicid);
-int vm_inject_exception(struct vmctx *ctx, int vcpu, int vector,
+int vm_inject_exception(struct vcpu *vcpu, int vector,
int errcode_valid, uint32_t errcode, int restart_instruction);
-int vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector);
-int vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector);
+int vm_lapic_irq(struct vcpu *vcpu, int vector);
+int vm_lapic_local_irq(struct vcpu *vcpu, int vector);
int vm_lapic_msi(struct vmctx *ctx, uint64_t addr, uint64_t msg);
int vm_ioapic_assert_irq(struct vmctx *ctx, int irq);
int vm_ioapic_deassert_irq(struct vmctx *ctx, int irq);
int vm_ioapic_pulse_irq(struct vmctx *ctx, int irq);
int vm_ioapic_pincount(struct vmctx *ctx, int *pincount);
-int vm_readwrite_kernemu_device(struct vmctx *ctx, int vcpu,
+int vm_readwrite_kernemu_device(struct vcpu *vcpu,
vm_paddr_t gpa, bool write, int size, uint64_t *value);
int vm_isa_assert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
int vm_isa_deassert_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
int vm_isa_pulse_irq(struct vmctx *ctx, int atpic_irq, int ioapic_irq);
int vm_isa_set_irq_trigger(struct vmctx *ctx, int atpic_irq,
enum vm_intr_trigger trigger);
-int vm_inject_nmi(struct vmctx *ctx, int vcpu);
+int vm_inject_nmi(struct vcpu *vcpu);
int vm_capability_name2type(const char *capname);
const char *vm_capability_type2name(int type);
-int vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
+int vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap,
int *retval);
-int vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
+int vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap,
int val);
int vm_assign_pptdev(struct vmctx *ctx, int bus, int slot, int func);
int vm_unassign_pptdev(struct vmctx *ctx, int bus, int slot, int func);
@@ -185,25 +188,25 @@
vm_paddr_t gpa, size_t len, vm_paddr_t hpa);
int vm_unmap_pptdev_mmio(struct vmctx *ctx, int bus, int slot, int func,
vm_paddr_t gpa, size_t len);
-int vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot,
+int vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot,
int func, uint64_t addr, uint64_t msg, int numvec);
-int vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot,
+int vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot,
int func, int idx, uint64_t addr, uint64_t msg,
uint32_t vector_control);
int vm_disable_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func);
-int vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *i1, uint64_t *i2);
-int vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t exit_intinfo);
+int vm_get_intinfo(struct vcpu *vcpu, uint64_t *i1, uint64_t *i2);
+int vm_set_intinfo(struct vcpu *vcpu, uint64_t exit_intinfo);
/*
* Return a pointer to the statistics buffer. Note that this is not MT-safe.
*/
-uint64_t *vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
+uint64_t *vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv,
int *ret_entries);
const char *vm_get_stat_desc(struct vmctx *ctx, int index);
-int vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *s);
-int vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state s);
+int vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *s);
+int vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state s);
int vm_get_hpet_capabilities(struct vmctx *ctx, uint32_t *capabilities);
@@ -216,7 +219,7 @@
* 0 1 An exception was injected into the guest
* EFAULT N/A Error
*/
-int vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *pg,
+int vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *pg,
uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
int *fault);
void vm_copyin(struct iovec *guest_iov, void *host_dst, size_t len);
@@ -230,15 +233,17 @@
int vm_rtc_gettime(struct vmctx *ctx, time_t *secs);
/* Reset vcpu register state */
-int vcpu_reset(struct vmctx *ctx, int vcpu);
+int vcpu_reset(struct vcpu *vcpu);
int vm_active_cpus(struct vmctx *ctx, cpuset_t *cpus);
int vm_suspended_cpus(struct vmctx *ctx, cpuset_t *cpus);
int vm_debug_cpus(struct vmctx *ctx, cpuset_t *cpus);
-int vm_activate_cpu(struct vmctx *ctx, int vcpu);
-int vm_suspend_cpu(struct vmctx *ctx, int vcpu);
-int vm_resume_cpu(struct vmctx *ctx, int vcpu);
-int vm_restart_instruction(struct vmctx *vmctx, int vcpu);
+int vm_activate_cpu(struct vcpu *vcpu);
+int vm_suspend_all_cpus(struct vmctx *ctx);
+int vm_suspend_cpu(struct vcpu *vcpu);
+int vm_resume_all_cpus(struct vmctx *ctx);
+int vm_resume_cpu(struct vcpu *vcpu);
+int vm_restart_instruction(struct vcpu *vcpu);
/* CPU topology */
int vm_set_topology(struct vmctx *ctx, uint16_t sockets, uint16_t cores,
@@ -249,10 +254,10 @@
/*
* FreeBSD specific APIs
*/
-int vm_setup_freebsd_registers(struct vmctx *ctx, int vcpu,
+int vm_setup_freebsd_registers(struct vcpu *vcpu,
uint64_t rip, uint64_t cr3, uint64_t gdtbase,
uint64_t rsp);
-int vm_setup_freebsd_registers_i386(struct vmctx *vmctx, int vcpu,
+int vm_setup_freebsd_registers_i386(struct vcpu *vcpu,
uint32_t eip, uint32_t gdtbase,
uint32_t esp);
void vm_setup_freebsd_gdt(uint64_t *gdtr);
diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c
--- a/lib/libvmmapi/vmmapi.c
+++ b/lib/libvmmapi/vmmapi.c
@@ -62,6 +62,7 @@
#include <machine/vmm_snapshot.h>
#include "vmmapi.h"
+#include "internal.h"
#define MB (1024 * 1024UL)
#define GB (1024 * 1024 * 1024UL)
@@ -163,6 +164,29 @@
free(vm);
}
+struct vcpu *
+vm_vcpu_open(struct vmctx *ctx, int vcpuid)
+{
+ struct vcpu *vcpu;
+
+ vcpu = malloc(sizeof(*vcpu));
+ vcpu->ctx = ctx;
+ vcpu->vcpuid = vcpuid;
+ return (vcpu);
+}
+
+void
+vm_vcpu_close(struct vcpu *vcpu)
+{
+ free(vcpu);
+}
+
+int
+vcpu_id(struct vcpu *vcpu)
+{
+ return (vcpu->vcpuid);
+}
+
int
vm_parse_memsize(const char *opt, size_t *ret_memsize)
{
@@ -578,36 +602,46 @@
return (ptr);
}
+static int
+vcpu_ioctl(struct vcpu *vcpu, u_long cmd, void *arg)
+{
+ /*
+ * XXX: fragile, handle with care
+ * Assumes that the first field of the ioctl data
+ * is the vcpuid.
+ */
+ *(int *)arg = vcpu->vcpuid;
+ return (ioctl(vcpu->ctx->fd, cmd, arg));
+}
+
int
-vm_set_desc(struct vmctx *ctx, int vcpu, int reg,
+vm_set_desc(struct vcpu *vcpu, int reg,
uint64_t base, uint32_t limit, uint32_t access)
{
int error;
struct vm_seg_desc vmsegdesc;
bzero(&vmsegdesc, sizeof(vmsegdesc));
- vmsegdesc.cpuid = vcpu;
vmsegdesc.regnum = reg;
vmsegdesc.desc.base = base;
vmsegdesc.desc.limit = limit;
vmsegdesc.desc.access = access;
- error = ioctl(ctx->fd, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
+ error = vcpu_ioctl(vcpu, VM_SET_SEGMENT_DESCRIPTOR, &vmsegdesc);
return (error);
}
int
-vm_get_desc(struct vmctx *ctx, int vcpu, int reg,
- uint64_t *base, uint32_t *limit, uint32_t *access)
+vm_get_desc(struct vcpu *vcpu, int reg, uint64_t *base, uint32_t *limit,
+ uint32_t *access)
{
int error;
struct vm_seg_desc vmsegdesc;
bzero(&vmsegdesc, sizeof(vmsegdesc));
- vmsegdesc.cpuid = vcpu;
vmsegdesc.regnum = reg;
- error = ioctl(ctx->fd, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
+ error = vcpu_ioctl(vcpu, VM_GET_SEGMENT_DESCRIPTOR, &vmsegdesc);
if (error == 0) {
*base = vmsegdesc.desc.base;
*limit = vmsegdesc.desc.limit;
@@ -617,89 +651,84 @@
}
int
-vm_get_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *seg_desc)
+vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *seg_desc)
{
int error;
- error = vm_get_desc(ctx, vcpu, reg, &seg_desc->base, &seg_desc->limit,
+ error = vm_get_desc(vcpu, reg, &seg_desc->base, &seg_desc->limit,
&seg_desc->access);
return (error);
}
int
-vm_set_register(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
+vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
{
int error;
struct vm_register vmreg;
bzero(&vmreg, sizeof(vmreg));
- vmreg.cpuid = vcpu;
vmreg.regnum = reg;
vmreg.regval = val;
- error = ioctl(ctx->fd, VM_SET_REGISTER, &vmreg);
+ error = vcpu_ioctl(vcpu, VM_SET_REGISTER, &vmreg);
return (error);
}
int
-vm_get_register(struct vmctx *ctx, int vcpu, int reg, uint64_t *ret_val)
+vm_get_register(struct vcpu *vcpu, int reg, uint64_t *ret_val)
{
int error;
struct vm_register vmreg;
bzero(&vmreg, sizeof(vmreg));
- vmreg.cpuid = vcpu;
vmreg.regnum = reg;
- error = ioctl(ctx->fd, VM_GET_REGISTER, &vmreg);
+ error = vcpu_ioctl(vcpu, VM_GET_REGISTER, &vmreg);
*ret_val = vmreg.regval;
return (error);
}
int
-vm_set_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
+vm_set_register_set(struct vcpu *vcpu, unsigned int count,
const int *regnums, uint64_t *regvals)
{
int error;
struct vm_register_set vmregset;
bzero(&vmregset, sizeof(vmregset));
- vmregset.cpuid = vcpu;
vmregset.count = count;
vmregset.regnums = regnums;
vmregset.regvals = regvals;
- error = ioctl(ctx->fd, VM_SET_REGISTER_SET, &vmregset);
+ error = vcpu_ioctl(vcpu, VM_SET_REGISTER_SET, &vmregset);
return (error);
}
int
-vm_get_register_set(struct vmctx *ctx, int vcpu, unsigned int count,
+vm_get_register_set(struct vcpu *vcpu, unsigned int count,
const int *regnums, uint64_t *regvals)
{
int error;
struct vm_register_set vmregset;
bzero(&vmregset, sizeof(vmregset));
- vmregset.cpuid = vcpu;
vmregset.count = count;
vmregset.regnums = regnums;
vmregset.regvals = regvals;
- error = ioctl(ctx->fd, VM_GET_REGISTER_SET, &vmregset);
+ error = vcpu_ioctl(vcpu, VM_GET_REGISTER_SET, &vmregset);
return (error);
}
int
-vm_run(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
+vm_run(struct vcpu *vcpu, struct vm_exit *vmexit)
{
int error;
struct vm_run vmrun;
bzero(&vmrun, sizeof(vmrun));
- vmrun.cpuid = vcpu;
- error = ioctl(ctx->fd, VM_RUN, &vmrun);
+ error = vcpu_ioctl(vcpu, VM_RUN, &vmrun);
bcopy(&vmrun.vm_exit, vmexit, sizeof(struct vm_exit));
return (error);
}
@@ -722,18 +751,17 @@
}
int
-vm_inject_exception(struct vmctx *ctx, int vcpu, int vector, int errcode_valid,
+vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
uint32_t errcode, int restart_instruction)
{
struct vm_exception exc;
- exc.cpuid = vcpu;
exc.vector = vector;
exc.error_code = errcode;
exc.error_code_valid = errcode_valid;
exc.restart_instruction = restart_instruction;
- return (ioctl(ctx->fd, VM_INJECT_EXCEPTION, &exc));
+ return (vcpu_ioctl(vcpu, VM_INJECT_EXCEPTION, &exc));
}
int
@@ -747,27 +775,25 @@
}
int
-vm_lapic_irq(struct vmctx *ctx, int vcpu, int vector)
+vm_lapic_irq(struct vcpu *vcpu, int vector)
{
struct vm_lapic_irq vmirq;
bzero(&vmirq, sizeof(vmirq));
- vmirq.cpuid = vcpu;
vmirq.vector = vector;
- return (ioctl(ctx->fd, VM_LAPIC_IRQ, &vmirq));
+ return (vcpu_ioctl(vcpu, VM_LAPIC_IRQ, &vmirq));
}
int
-vm_lapic_local_irq(struct vmctx *ctx, int vcpu, int vector)
+vm_lapic_local_irq(struct vcpu *vcpu, int vector)
{
struct vm_lapic_irq vmirq;
bzero(&vmirq, sizeof(vmirq));
- vmirq.cpuid = vcpu;
vmirq.vector = vector;
- return (ioctl(ctx->fd, VM_LAPIC_LOCAL_IRQ, &vmirq));
+ return (vcpu_ioctl(vcpu, VM_LAPIC_LOCAL_IRQ, &vmirq));
}
int
@@ -823,11 +849,10 @@
}
int
-vm_readwrite_kernemu_device(struct vmctx *ctx, int vcpu, vm_paddr_t gpa,
+vm_readwrite_kernemu_device(struct vcpu *vcpu, vm_paddr_t gpa,
bool write, int size, uint64_t *value)
{
struct vm_readwrite_kernemu_device irp = {
- .vcpuid = vcpu,
.access_width = fls(size) - 1,
.gpa = gpa,
.value = write ? *value : ~0ul,
@@ -835,7 +860,7 @@
long cmd = (write ? VM_SET_KERNEMU_DEV : VM_GET_KERNEMU_DEV);
int rc;
- rc = ioctl(ctx->fd, cmd, &irp);
+ rc = vcpu_ioctl(vcpu, cmd, &irp);
if (rc == 0 && !write)
*value = irp.value;
return (rc);
@@ -891,14 +916,13 @@
}
int
-vm_inject_nmi(struct vmctx *ctx, int vcpu)
+vm_inject_nmi(struct vcpu *vcpu)
{
struct vm_nmi vmnmi;
bzero(&vmnmi, sizeof(vmnmi));
- vmnmi.cpuid = vcpu;
- return (ioctl(ctx->fd, VM_INJECT_NMI, &vmnmi));
+ return (vcpu_ioctl(vcpu, VM_INJECT_NMI, &vmnmi));
}
static const char *capstrmap[] = {
@@ -933,32 +957,29 @@
}
int
-vm_get_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap,
- int *retval)
+vm_get_capability(struct vcpu *vcpu, enum vm_cap_type cap, int *retval)
{
int error;
struct vm_capability vmcap;
bzero(&vmcap, sizeof(vmcap));
- vmcap.cpuid = vcpu;
vmcap.captype = cap;
- error = ioctl(ctx->fd, VM_GET_CAPABILITY, &vmcap);
+ error = vcpu_ioctl(vcpu, VM_GET_CAPABILITY, &vmcap);
*retval = vmcap.capval;
return (error);
}
int
-vm_set_capability(struct vmctx *ctx, int vcpu, enum vm_cap_type cap, int val)
+vm_set_capability(struct vcpu *vcpu, enum vm_cap_type cap, int val)
{
struct vm_capability vmcap;
bzero(&vmcap, sizeof(vmcap));
- vmcap.cpuid = vcpu;
vmcap.captype = cap;
vmcap.capval = val;
- return (ioctl(ctx->fd, VM_SET_CAPABILITY, &vmcap));
+ return (vcpu_ioctl(vcpu, VM_SET_CAPABILITY, &vmcap));
}
int
@@ -1021,13 +1042,12 @@
}
int
-vm_setup_pptdev_msi(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
+vm_setup_pptdev_msi(struct vmctx *ctx, int bus, int slot, int func,
uint64_t addr, uint64_t msg, int numvec)
{
struct vm_pptdev_msi pptmsi;
bzero(&pptmsi, sizeof(pptmsi));
- pptmsi.vcpu = vcpu;
pptmsi.bus = bus;
pptmsi.slot = slot;
pptmsi.func = func;
@@ -1039,13 +1059,12 @@
}
int
-vm_setup_pptdev_msix(struct vmctx *ctx, int vcpu, int bus, int slot, int func,
+vm_setup_pptdev_msix(struct vmctx *ctx, int bus, int slot, int func,
int idx, uint64_t addr, uint64_t msg, uint32_t vector_control)
{
struct vm_pptdev_msix pptmsix;
bzero(&pptmsix, sizeof(pptmsix));
- pptmsix.vcpu = vcpu;
pptmsix.bus = bus;
pptmsix.slot = slot;
pptmsix.func = func;
@@ -1071,7 +1090,7 @@
}
uint64_t *
-vm_get_stats(struct vmctx *ctx, int vcpu, struct timeval *ret_tv,
+vm_get_stats(struct vcpu *vcpu, struct timeval *ret_tv,
int *ret_entries)
{
static _Thread_local uint64_t *stats_buf;
@@ -1082,11 +1101,10 @@
bool have_stats;
have_stats = false;
- vmstats.cpuid = vcpu;
count = 0;
for (index = 0;; index += nitems(vmstats.statbuf)) {
vmstats.index = index;
- if (ioctl(ctx->fd, VM_STATS, &vmstats) != 0)
+ if (vcpu_ioctl(vcpu, VM_STATS, &vmstats) != 0)
break;
if (stats_count < index + vmstats.num_entries) {
new_stats = realloc(stats_buf,
@@ -1129,30 +1147,28 @@
}
int
-vm_get_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state *state)
+vm_get_x2apic_state(struct vcpu *vcpu, enum x2apic_state *state)
{
int error;
struct vm_x2apic x2apic;
bzero(&x2apic, sizeof(x2apic));
- x2apic.cpuid = vcpu;
- error = ioctl(ctx->fd, VM_GET_X2APIC_STATE, &x2apic);
+ error = vcpu_ioctl(vcpu, VM_GET_X2APIC_STATE, &x2apic);
*state = x2apic.state;
return (error);
}
int
-vm_set_x2apic_state(struct vmctx *ctx, int vcpu, enum x2apic_state state)
+vm_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
{
int error;
struct vm_x2apic x2apic;
bzero(&x2apic, sizeof(x2apic));
- x2apic.cpuid = vcpu;
x2apic.state = state;
- error = ioctl(ctx->fd, VM_SET_X2APIC_STATE, &x2apic);
+ error = vcpu_ioctl(vcpu, VM_SET_X2APIC_STATE, &x2apic);
return (error);
}
@@ -1162,7 +1178,7 @@
* Table 9-1. IA-32 Processor States Following Power-up, Reset or INIT
*/
int
-vcpu_reset(struct vmctx *vmctx, int vcpu)
+vcpu_reset(struct vcpu *vcpu)
{
int error;
uint64_t rflags, rip, cr0, cr4, zero, desc_base, rdx;
@@ -1172,12 +1188,12 @@
zero = 0;
rflags = 0x2;
- error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags);
if (error)
goto done;
rip = 0xfff0;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip)) != 0)
goto done;
/*
@@ -1186,17 +1202,17 @@
* guests like Windows.
*/
cr0 = CR0_NE;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR2, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR2, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR3, zero)) != 0)
goto done;
cr4 = 0;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
goto done;
/*
@@ -1205,13 +1221,13 @@
desc_base = 0xffff0000;
desc_limit = 0xffff;
desc_access = 0x0093;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
sel = 0xf000;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, sel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CS, sel)) != 0)
goto done;
/*
@@ -1220,91 +1236,91 @@
desc_base = 0;
desc_limit = 0xffff;
desc_access = 0x0093;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
sel = 0;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, sel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_SS, sel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, sel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_DS, sel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, sel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_ES, sel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, sel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_FS, sel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, sel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_GS, sel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_EFER, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, zero)) != 0)
goto done;
/* General purpose registers */
rdx = 0xf00;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RAX, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RAX, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBX, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBX, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RCX, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RCX, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDX, rdx)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSI, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSI, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RDI, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RDI, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RBP, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RBP, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSP, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R8, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_R8, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R9, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_R9, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R10, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_R10, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R11, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_R11, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R12, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_R12, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R13, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_R13, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R14, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_R14, zero)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_R15, zero)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_R15, zero)) != 0)
goto done;
/* GDTR, IDTR */
desc_base = 0;
desc_limit = 0xffff;
desc_access = 0;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
desc_base, desc_limit, desc_access);
if (error != 0)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_IDTR,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_IDTR,
desc_base, desc_limit, desc_access);
if (error != 0)
goto done;
@@ -1313,35 +1329,35 @@
desc_base = 0;
desc_limit = 0xffff;
desc_access = 0x0000008b;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
+ error = vm_set_desc(vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
if (error)
goto done;
sel = 0;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, sel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_TR, sel)) != 0)
goto done;
/* LDTR */
desc_base = 0;
desc_limit = 0xffff;
desc_access = 0x00000082;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, desc_base,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR, desc_base,
desc_limit, desc_access);
if (error)
goto done;
sel = 0;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DR6,
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR6,
0xffff0ff0)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DR7, 0x400)) !=
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_DR7, 0x400)) !=
0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_INTR_SHADOW,
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW,
zero)) != 0)
goto done;
@@ -1384,19 +1400,18 @@
}
int
-vm_gla2gpa(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *fault)
{
struct vm_gla2gpa gg;
int error;
bzero(&gg, sizeof(struct vm_gla2gpa));
- gg.vcpuid = vcpu;
gg.prot = prot;
gg.gla = gla;
gg.paging = *paging;
- error = ioctl(ctx->fd, VM_GLA2GPA, &gg);
+ error = vcpu_ioctl(vcpu, VM_GLA2GPA, &gg);
if (error == 0) {
*fault = gg.fault;
*gpa = gg.gpa;
@@ -1405,19 +1420,18 @@
}
int
-vm_gla2gpa_nofault(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *fault)
{
struct vm_gla2gpa gg;
int error;
bzero(&gg, sizeof(struct vm_gla2gpa));
- gg.vcpuid = vcpu;
gg.prot = prot;
gg.gla = gla;
gg.paging = *paging;
- error = ioctl(ctx->fd, VM_GLA2GPA_NOFAULT, &gg);
+ error = vcpu_ioctl(vcpu, VM_GLA2GPA_NOFAULT, &gg);
if (error == 0) {
*fault = gg.fault;
*gpa = gg.gpa;
@@ -1430,7 +1444,7 @@
#endif
int
-vm_copy_setup(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, size_t len, int prot, struct iovec *iov, int iovcnt,
int *fault)
{
@@ -1445,14 +1459,14 @@
while (len) {
assert(iovcnt > 0);
- error = vm_gla2gpa(ctx, vcpu, paging, gla, prot, &gpa, fault);
+ error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
if (error || *fault)
return (error);
off = gpa & PAGE_MASK;
n = MIN(len, PAGE_SIZE - off);
- va = vm_map_gpa(ctx, gpa, n);
+ va = vm_map_gpa(vcpu->ctx, gpa, n);
if (va == NULL)
return (EFAULT);
@@ -1554,50 +1568,70 @@
}
int
-vm_activate_cpu(struct vmctx *ctx, int vcpu)
+vm_activate_cpu(struct vcpu *vcpu)
{
struct vm_activate_cpu ac;
int error;
bzero(&ac, sizeof(struct vm_activate_cpu));
- ac.vcpuid = vcpu;
- error = ioctl(ctx->fd, VM_ACTIVATE_CPU, &ac);
+ error = vcpu_ioctl(vcpu, VM_ACTIVATE_CPU, &ac);
return (error);
}
int
-vm_suspend_cpu(struct vmctx *ctx, int vcpu)
+vm_suspend_all_cpus(struct vmctx *ctx)
{
struct vm_activate_cpu ac;
int error;
bzero(&ac, sizeof(struct vm_activate_cpu));
- ac.vcpuid = vcpu;
+ ac.vcpuid = -1;
error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac);
return (error);
}
int
-vm_resume_cpu(struct vmctx *ctx, int vcpu)
+vm_suspend_cpu(struct vcpu *vcpu)
+{
+ struct vm_activate_cpu ac;
+ int error;
+
+ bzero(&ac, sizeof(struct vm_activate_cpu));
+ error = vcpu_ioctl(vcpu, VM_SUSPEND_CPU, &ac);
+ return (error);
+}
+
+int
+vm_resume_cpu(struct vcpu *vcpu)
+{
+ struct vm_activate_cpu ac;
+ int error;
+
+ bzero(&ac, sizeof(struct vm_activate_cpu));
+ error = vcpu_ioctl(vcpu, VM_RESUME_CPU, &ac);
+ return (error);
+}
+
+int
+vm_resume_all_cpus(struct vmctx *ctx)
{
struct vm_activate_cpu ac;
int error;
bzero(&ac, sizeof(struct vm_activate_cpu));
- ac.vcpuid = vcpu;
+ ac.vcpuid = -1;
error = ioctl(ctx->fd, VM_RESUME_CPU, &ac);
return (error);
}
int
-vm_get_intinfo(struct vmctx *ctx, int vcpu, uint64_t *info1, uint64_t *info2)
+vm_get_intinfo(struct vcpu *vcpu, uint64_t *info1, uint64_t *info2)
{
struct vm_intinfo vmii;
int error;
bzero(&vmii, sizeof(struct vm_intinfo));
- vmii.vcpuid = vcpu;
- error = ioctl(ctx->fd, VM_GET_INTINFO, &vmii);
+ error = vcpu_ioctl(vcpu, VM_GET_INTINFO, &vmii);
if (error == 0) {
*info1 = vmii.info1;
*info2 = vmii.info2;
@@ -1606,15 +1640,14 @@
}
int
-vm_set_intinfo(struct vmctx *ctx, int vcpu, uint64_t info1)
+vm_set_intinfo(struct vcpu *vcpu, uint64_t info1)
{
struct vm_intinfo vmii;
int error;
bzero(&vmii, sizeof(struct vm_intinfo));
- vmii.vcpuid = vcpu;
vmii.info1 = info1;
- error = ioctl(ctx->fd, VM_SET_INTINFO, &vmii);
+ error = vcpu_ioctl(vcpu, VM_SET_INTINFO, &vmii);
return (error);
}
@@ -1671,10 +1704,11 @@
}
int
-vm_restart_instruction(struct vmctx *ctx, int vcpu)
+vm_restart_instruction(struct vcpu *vcpu)
{
+ int arg;
- return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
+ return (vcpu_ioctl(vcpu, VM_RESTART_INSTRUCTION, &arg));
}
int
diff --git a/lib/libvmmapi/vmmapi_freebsd.c b/lib/libvmmapi/vmmapi_freebsd.c
--- a/lib/libvmmapi/vmmapi_freebsd.c
+++ b/lib/libvmmapi/vmmapi_freebsd.c
@@ -41,6 +41,7 @@
#include <string.h>
#include "vmmapi.h"
+#include "internal.h"
#define I386_TSS_SIZE 104
@@ -71,7 +72,7 @@
* 'eip' in flat mode.
*/
int
-vm_setup_freebsd_registers_i386(struct vmctx *vmctx, int vcpu, uint32_t eip,
+vm_setup_freebsd_registers_i386(struct vcpu *vcpu, uint32_t eip,
uint32_t gdtbase, uint32_t esp)
{
uint64_t cr0, rflags, desc_base;
@@ -81,34 +82,34 @@
int error, tmp;
/* A 32-bit guest requires unrestricted mode. */
- error = vm_get_capability(vmctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp);
+ error = vm_get_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp);
if (error)
goto done;
- error = vm_set_capability(vmctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
+ error = vm_set_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
if (error)
goto done;
cr0 = CR0_PE | CR0_NE;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, 0)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, 0)) != 0)
goto done;
/*
* Forcing EFER to 0 causes bhyve to clear the "IA-32e guest
* mode" entry control.
*/
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_EFER, 0)))
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, 0)))
goto done;
- gdt = vm_map_gpa(vmctx, gdtbase, 0x1000);
+ gdt = vm_map_gpa(vcpu->ctx, gdtbase, 0x1000);
if (gdt == NULL)
return (EFAULT);
memcpy(gdt, i386_gdt, sizeof(i386_gdt));
desc_base = gdtbase;
desc_limit = sizeof(i386_gdt) - 1;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
desc_base, desc_limit, 0);
if (error != 0)
goto done;
@@ -118,38 +119,38 @@
gdt[3].sd_lobase = tssbase;
rflags = 0x2;
- error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags);
if (error)
goto done;
desc_base = 0;
desc_limit = 0xffffffff;
desc_access = DESC_GRAN | DESC_DEF32 | DESC_PRESENT | SDT_MEMERA;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
desc_access = DESC_GRAN | DESC_DEF32 | DESC_PRESENT | SDT_MEMRWA;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
@@ -157,50 +158,50 @@
desc_base = tssbase;
desc_limit = I386_TSS_SIZE - 1;
desc_access = DESC_PRESENT | SDT_SYS386BSY;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_TR,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, 0, 0,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR, 0, 0,
DESC_UNUSABLE);
if (error)
goto done;
gsel = GSEL(GUEST_CODE_SEL, SEL_KPL);
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CS, gsel)) != 0)
goto done;
gsel = GSEL(GUEST_DATA_SEL, SEL_KPL);
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_DS, gsel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_ES, gsel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_FS, gsel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_GS, gsel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_SS, gsel)) != 0)
goto done;
gsel = GSEL(GUEST_TSS_SEL, SEL_KPL);
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_TR, gsel)) != 0)
goto done;
/* LDTR is pointing to the null selector */
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
goto done;
/* entry point */
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, eip)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RIP, eip)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, esp)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSP, esp)) != 0)
goto done;
error = 0;
@@ -221,7 +222,7 @@
* 'rip' in long mode.
*/
int
-vm_setup_freebsd_registers(struct vmctx *vmctx, int vcpu,
+vm_setup_freebsd_registers(struct vcpu *vcpu,
uint64_t rip, uint64_t cr3, uint64_t gdtbase,
uint64_t rsp)
{
@@ -231,52 +232,52 @@
uint16_t gsel;
cr0 = CR0_PE | CR0_PG | CR0_NE;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0)) != 0)
goto done;
cr4 = CR4_PAE;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4)) != 0)
goto done;
efer = EFER_LME | EFER_LMA;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_EFER, efer)))
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_EFER, efer)))
goto done;
rflags = 0x2;
- error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RFLAGS, rflags);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS, rflags);
if (error)
goto done;
desc_base = 0;
desc_limit = 0;
desc_access = 0x0000209B;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_CS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
desc_access = 0x00000093;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_DS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_ES,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_FS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_SS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
desc_base, desc_limit, desc_access);
if (error)
goto done;
@@ -286,59 +287,59 @@
* TSS segment to be usable with a base address and limit of 0.
*/
desc_access = 0x0000008b;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
+ error = vm_set_desc(vcpu, VM_REG_GUEST_TR, 0, 0, desc_access);
if (error)
goto done;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_LDTR, 0, 0,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR, 0, 0,
DESC_UNUSABLE);
if (error)
goto done;
gsel = GSEL(GUEST_CODE_SEL, SEL_KPL);
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CS, gsel)) != 0)
goto done;
gsel = GSEL(GUEST_DATA_SEL, SEL_KPL);
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_DS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_DS, gsel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_ES, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_ES, gsel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_FS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_FS, gsel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_GS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_GS, gsel)) != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_SS, gsel)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_SS, gsel)) != 0)
goto done;
/* XXX TR is pointing to the null selector */
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_TR, 0)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_TR, 0)) != 0)
goto done;
/* LDTR is pointing to the null selector */
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, 0)) != 0)
goto done;
/* entry point */
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RIP, rip)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip)) != 0)
goto done;
/* page table base */
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_CR3, cr3)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_CR3, cr3)) != 0)
goto done;
desc_base = gdtbase;
desc_limit = GUEST_GDTR_LIMIT64;
- error = vm_set_desc(vmctx, vcpu, VM_REG_GUEST_GDTR,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
desc_base, desc_limit, 0);
if (error != 0)
goto done;
- if ((error = vm_set_register(vmctx, vcpu, VM_REG_GUEST_RSP, rsp)) != 0)
+ if ((error = vm_set_register(vcpu, VM_REG_GUEST_RSP, rsp)) != 0)
goto done;
error = 0;
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -35,6 +35,7 @@
#include <sys/sdt.h>
#include <x86/segments.h>
+struct vcpu;
struct vm_snapshot_meta;
#ifdef _KERNEL
@@ -143,7 +144,6 @@
#ifdef _KERNEL
CTASSERT(VM_MAX_NAMELEN >= VM_MIN_NAMELEN);
-struct vcpu;
struct vm;
struct vm_exception;
struct seg_desc;
@@ -762,7 +762,6 @@
};
/* APIs to inject faults into the guest */
-#ifdef _KERNEL
void vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
int errcode);
@@ -791,35 +790,5 @@
}
void vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2);
-#else
-void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
- int errcode);
-
-static __inline void
-vm_inject_ud(void *vm, int vcpuid)
-{
- vm_inject_fault(vm, vcpuid, IDT_UD, 0, 0);
-}
-
-static __inline void
-vm_inject_gp(void *vm, int vcpuid)
-{
- vm_inject_fault(vm, vcpuid, IDT_GP, 1, 0);
-}
-
-static __inline void
-vm_inject_ac(void *vm, int vcpuid, int errcode)
-{
- vm_inject_fault(vm, vcpuid, IDT_AC, 1, errcode);
-}
-
-static __inline void
-vm_inject_ss(void *vm, int vcpuid, int errcode)
-{
- vm_inject_fault(vm, vcpuid, IDT_SS, 1, errcode);
-}
-
-void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
-#endif
#endif /* _VMM_H_ */
diff --git a/sys/amd64/include/vmm_instruction_emul.h b/sys/amd64/include/vmm_instruction_emul.h
--- a/sys/amd64/include/vmm_instruction_emul.h
+++ b/sys/amd64/include/vmm_instruction_emul.h
@@ -33,26 +33,13 @@
#include <sys/mman.h>
-/*
- * Allow for different arguments to identify vCPUs in userspace vs the
- * kernel. Eventually we should add struct vcpu in userland and
- * always use the kernel arguments removing these macros.
- */
-#ifdef _KERNEL
-#define VCPU_DECL struct vcpu *vcpu
-#define VCPU_ARGS vcpu
-#else
-#define VCPU_DECL void *vm, int vcpuid
-#define VCPU_ARGS vm, vcpuid
-#endif
-
/*
* Callback functions to read and write memory regions.
*/
-typedef int (*mem_region_read_t)(VCPU_DECL, uint64_t gpa,
+typedef int (*mem_region_read_t)(struct vcpu *vcpu, uint64_t gpa,
uint64_t *rval, int rsize, void *arg);
-typedef int (*mem_region_write_t)(VCPU_DECL, uint64_t gpa,
+typedef int (*mem_region_write_t)(struct vcpu *vcpu, uint64_t gpa,
uint64_t wval, int wsize, void *arg);
/*
@@ -66,11 +53,11 @@
* 'struct vmctx *' when called from user context.
* s
*/
-int vmm_emulate_instruction(VCPU_DECL, uint64_t gpa, struct vie *vie,
+int vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t mrr,
mem_region_write_t mrw, void *mrarg);
-int vie_update_register(VCPU_DECL, enum vm_reg_name reg,
+int vie_update_register(struct vcpu *vcpu, enum vm_reg_name reg,
uint64_t val, int size);
/*
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -290,11 +290,11 @@
};
static int
-vie_read_register(VCPU_DECL, enum vm_reg_name reg, uint64_t *rval)
+vie_read_register(struct vcpu *vcpu, enum vm_reg_name reg, uint64_t *rval)
{
int error;
- error = vm_get_register(VCPU_ARGS, reg, rval);
+ error = vm_get_register(vcpu, reg, rval);
return (error);
}
@@ -326,14 +326,14 @@
}
static int
-vie_read_bytereg(VCPU_DECL, struct vie *vie, uint8_t *rval)
+vie_read_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t *rval)
{
uint64_t val;
int error, lhbr;
enum vm_reg_name reg;
vie_calc_bytereg(vie, &reg, &lhbr);
- error = vm_get_register(VCPU_ARGS, reg, &val);
+ error = vm_get_register(vcpu, reg, &val);
/*
* To obtain the value of a legacy high byte register shift the
@@ -347,14 +347,14 @@
}
static int
-vie_write_bytereg(VCPU_DECL, struct vie *vie, uint8_t byte)
+vie_write_bytereg(struct vcpu *vcpu, struct vie *vie, uint8_t byte)
{
uint64_t origval, val, mask;
int error, lhbr;
enum vm_reg_name reg;
vie_calc_bytereg(vie, &reg, &lhbr);
- error = vm_get_register(VCPU_ARGS, reg, &origval);
+ error = vm_get_register(vcpu, reg, &origval);
if (error == 0) {
val = byte;
mask = 0xff;
@@ -367,13 +367,13 @@
mask <<= 8;
}
val |= origval & ~mask;
- error = vm_set_register(VCPU_ARGS, reg, val);
+ error = vm_set_register(vcpu, reg, val);
}
return (error);
}
int
-vie_update_register(VCPU_DECL, enum vm_reg_name reg,
+vie_update_register(struct vcpu *vcpu, enum vm_reg_name reg,
uint64_t val, int size)
{
int error;
@@ -382,7 +382,7 @@
switch (size) {
case 1:
case 2:
- error = vie_read_register(VCPU_ARGS, reg, &origval);
+ error = vie_read_register(vcpu, reg, &origval);
if (error)
return (error);
val &= size2mask[size];
@@ -397,7 +397,7 @@
return (EINVAL);
}
- error = vm_set_register(VCPU_ARGS, reg, val);
+ error = vm_set_register(vcpu, reg, val);
return (error);
}
@@ -509,7 +509,7 @@
}
static int
-emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_mov(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
{
int error, size;
@@ -528,9 +528,9 @@
* REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
*/
size = 1; /* override for byte operation */
- error = vie_read_bytereg(VCPU_ARGS, vie, &byte);
+ error = vie_read_bytereg(vcpu, vie, &byte);
if (error == 0)
- error = memwrite(VCPU_ARGS, gpa, byte, size, arg);
+ error = memwrite(vcpu, gpa, byte, size, arg);
break;
case 0x89:
/*
@@ -540,10 +540,10 @@
* REX.W + 89/r mov r/m64, r64
*/
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val);
+ error = vie_read_register(vcpu, reg, &val);
if (error == 0) {
val &= size2mask[size];
- error = memwrite(VCPU_ARGS, gpa, val, size, arg);
+ error = memwrite(vcpu, gpa, val, size, arg);
}
break;
case 0x8A:
@@ -553,9 +553,9 @@
* REX + 8A/r: mov r8, r/m8
*/
size = 1; /* override for byte operation */
- error = memread(VCPU_ARGS, gpa, &val, size, arg);
+ error = memread(vcpu, gpa, &val, size, arg);
if (error == 0)
- error = vie_write_bytereg(VCPU_ARGS, vie, val);
+ error = vie_write_bytereg(vcpu, vie, val);
break;
case 0x8B:
/*
@@ -564,10 +564,10 @@
* 8B/r: mov r32, r/m32
* REX.W 8B/r: mov r64, r/m64
*/
- error = memread(VCPU_ARGS, gpa, &val, size, arg);
+ error = memread(vcpu, gpa, &val, size, arg);
if (error == 0) {
reg = gpr_map[vie->reg];
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
}
break;
case 0xA1:
@@ -577,10 +577,10 @@
* A1: mov EAX, moffs32
* REX.W + A1: mov RAX, moffs64
*/
- error = memread(VCPU_ARGS, gpa, &val, size, arg);
+ error = memread(vcpu, gpa, &val, size, arg);
if (error == 0) {
reg = VM_REG_GUEST_RAX;
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
}
break;
case 0xA3:
@@ -590,10 +590,10 @@
* A3: mov moffs32, EAX
* REX.W + A3: mov moffs64, RAX
*/
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RAX, &val);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val);
if (error == 0) {
val &= size2mask[size];
- error = memwrite(VCPU_ARGS, gpa, val, size, arg);
+ error = memwrite(vcpu, gpa, val, size, arg);
}
break;
case 0xC6:
@@ -603,7 +603,7 @@
* REX + C6/0 mov r/m8, imm8
*/
size = 1; /* override for byte operation */
- error = memwrite(VCPU_ARGS, gpa, vie->immediate, size, arg);
+ error = memwrite(vcpu, gpa, vie->immediate, size, arg);
break;
case 0xC7:
/*
@@ -613,7 +613,7 @@
* REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
*/
val = vie->immediate & size2mask[size];
- error = memwrite(VCPU_ARGS, gpa, val, size, arg);
+ error = memwrite(vcpu, gpa, val, size, arg);
break;
default:
break;
@@ -623,7 +623,7 @@
}
static int
-emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_movx(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -645,7 +645,7 @@
*/
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &val, 1, arg);
+ error = memread(vcpu, gpa, &val, 1, arg);
if (error)
break;
@@ -656,7 +656,7 @@
val = (uint8_t)val;
/* write the result */
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
break;
case 0xB7:
/*
@@ -666,7 +666,7 @@
* 0F B7/r movzx r32, r/m16
* REX.W + 0F B7/r movzx r64, r/m16
*/
- error = memread(VCPU_ARGS, gpa, &val, 2, arg);
+ error = memread(vcpu, gpa, &val, 2, arg);
if (error)
return (error);
@@ -675,7 +675,7 @@
/* zero-extend word */
val = (uint16_t)val;
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
break;
case 0xBE:
/*
@@ -688,7 +688,7 @@
*/
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &val, 1, arg);
+ error = memread(vcpu, gpa, &val, 1, arg);
if (error)
break;
@@ -699,7 +699,7 @@
val = (int8_t)val;
/* write the result */
- error = vie_update_register(VCPU_ARGS, reg, val, size);
+ error = vie_update_register(vcpu, reg, val, size);
break;
default:
break;
@@ -711,7 +711,7 @@
* Helper function to calculate and validate a linear address.
*/
static int
-get_gla(VCPU_DECL, struct vie *vie __unused,
+get_gla(struct vcpu *vcpu, struct vie *vie __unused,
struct vm_guest_paging *paging, int opsize, int addrsize, int prot,
enum vm_reg_name seg, enum vm_reg_name gpr, uint64_t *gla, int *fault)
{
@@ -719,39 +719,39 @@
uint64_t cr0, val, rflags;
int error __diagused;
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_CR0, &cr0);
+ error = vie_read_register(vcpu, VM_REG_GUEST_CR0, &cr0);
KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
- error = vm_get_seg_desc(VCPU_ARGS, seg, &desc);
+ error = vm_get_seg_desc(vcpu, seg, &desc);
KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
__func__, error, seg));
- error = vie_read_register(VCPU_ARGS, gpr, &val);
+ error = vie_read_register(vcpu, gpr, &val);
KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
error, gpr));
if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
addrsize, prot, gla)) {
if (seg == VM_REG_GUEST_SS)
- vm_inject_ss(VCPU_ARGS, 0);
+ vm_inject_ss(vcpu, 0);
else
- vm_inject_gp(VCPU_ARGS);
+ vm_inject_gp(vcpu);
goto guest_fault;
}
if (vie_canonical_check(paging->cpu_mode, *gla)) {
if (seg == VM_REG_GUEST_SS)
- vm_inject_ss(VCPU_ARGS, 0);
+ vm_inject_ss(vcpu, 0);
else
- vm_inject_gp(VCPU_ARGS);
+ vm_inject_gp(vcpu);
goto guest_fault;
}
if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
- vm_inject_ac(VCPU_ARGS, 0);
+ vm_inject_ac(vcpu, 0);
goto guest_fault;
}
@@ -764,7 +764,7 @@
}
static int
-emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_movs(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -791,7 +791,7 @@
repeat = vie->repz_present | vie->repnz_present;
if (repeat) {
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RCX, &rcx);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RCX, &rcx);
KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
/*
@@ -821,12 +821,12 @@
*/
seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
- error = get_gla(VCPU_ARGS, vie, paging, opsize, vie->addrsize,
+ error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr, &fault);
if (error || fault)
goto done;
- error = vm_copy_setup(VCPU_ARGS, paging, srcaddr, opsize, PROT_READ,
+ error = vm_copy_setup(vcpu, paging, srcaddr, opsize, PROT_READ,
copyinfo, nitems(copyinfo), &fault);
if (error == 0) {
if (fault)
@@ -837,7 +837,7 @@
*/
vm_copyin(copyinfo, &val, opsize);
vm_copy_teardown(copyinfo, nitems(copyinfo));
- error = memwrite(VCPU_ARGS, gpa, val, opsize, arg);
+ error = memwrite(vcpu, gpa, val, opsize, arg);
if (error)
goto done;
} else {
@@ -846,13 +846,13 @@
* if 'srcaddr' is in the mmio space.
*/
- error = get_gla(VCPU_ARGS, vie, paging, opsize, vie->addrsize,
+ error = get_gla(vcpu, vie, paging, opsize, vie->addrsize,
PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr,
&fault);
if (error || fault)
goto done;
- error = vm_copy_setup(VCPU_ARGS, paging, dstaddr, opsize,
+ error = vm_copy_setup(vcpu, paging, dstaddr, opsize,
PROT_WRITE, copyinfo, nitems(copyinfo), &fault);
if (error == 0) {
if (fault)
@@ -867,7 +867,7 @@
* injected into the guest then it will happen
* before the MMIO read is attempted.
*/
- error = memread(VCPU_ARGS, gpa, &val, opsize, arg);
+ error = memread(vcpu, gpa, &val, opsize, arg);
if (error)
goto done;
@@ -882,33 +882,33 @@
* instruction is not going to be restarted due
* to address translation faults.
*/
- error = vm_gla2gpa(VCPU_ARGS, paging, srcaddr,
+ error = vm_gla2gpa(vcpu, paging, srcaddr,
PROT_READ, &srcgpa, &fault);
if (error || fault)
goto done;
- error = vm_gla2gpa(VCPU_ARGS, paging, dstaddr,
+ error = vm_gla2gpa(vcpu, paging, dstaddr,
PROT_WRITE, &dstgpa, &fault);
if (error || fault)
goto done;
- error = memread(VCPU_ARGS, srcgpa, &val, opsize, arg);
+ error = memread(vcpu, srcgpa, &val, opsize, arg);
if (error)
goto done;
- error = memwrite(VCPU_ARGS, dstgpa, val, opsize, arg);
+ error = memwrite(vcpu, dstgpa, val, opsize, arg);
if (error)
goto done;
}
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RSI, &rsi);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RSI, &rsi);
KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RDI, &rdi);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RDI, &rdi);
KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
if (rflags & PSL_D) {
@@ -919,17 +919,17 @@
rdi += opsize;
}
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RSI, rsi,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RSI, rsi,
vie->addrsize);
KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RDI, rdi,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RDI, rdi,
vie->addrsize);
KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
if (repeat) {
rcx = rcx - 1;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RCX,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RCX,
rcx, vie->addrsize);
KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
@@ -937,7 +937,7 @@
* Repeat the instruction if the count register is not zero.
*/
if ((rcx & vie_size2mask(vie->addrsize)) != 0)
- vm_restart_instruction(VCPU_ARGS);
+ vm_restart_instruction(vcpu);
}
done:
KASSERT(error == 0 || error == EFAULT, ("%s: unexpected error %d",
@@ -946,7 +946,7 @@
}
static int
-emulate_stos(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_stos(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging __unused, mem_region_read_t memread __unused,
mem_region_write_t memwrite, void *arg)
{
@@ -958,7 +958,7 @@
repeat = vie->repz_present | vie->repnz_present;
if (repeat) {
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RCX, &rcx);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RCX, &rcx);
KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
/*
@@ -969,17 +969,17 @@
return (0);
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RAX, &val);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RAX, &val);
KASSERT(!error, ("%s: error %d getting rax", __func__, error));
- error = memwrite(VCPU_ARGS, gpa, val, opsize, arg);
+ error = memwrite(vcpu, gpa, val, opsize, arg);
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RDI, &rdi);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RDI, &rdi);
KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
if (rflags & PSL_D)
@@ -987,13 +987,13 @@
else
rdi += opsize;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RDI, rdi,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RDI, rdi,
vie->addrsize);
KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
if (repeat) {
rcx = rcx - 1;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RCX,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RCX,
rcx, vie->addrsize);
KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
@@ -1001,14 +1001,14 @@
* Repeat the instruction if the count register is not zero.
*/
if ((rcx & vie_size2mask(vie->addrsize)) != 0)
- vm_restart_instruction(VCPU_ARGS);
+ vm_restart_instruction(vcpu);
}
return (0);
}
static int
-emulate_and(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_and(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
{
int error, size;
@@ -1031,18 +1031,18 @@
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val1);
+ error = vie_read_register(vcpu, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(VCPU_ARGS, gpa, &val2, size, arg);
+ error = memread(vcpu, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
result = val1 & val2;
- error = vie_update_register(VCPU_ARGS, reg, result, size);
+ error = vie_update_register(vcpu, reg, result, size);
break;
case 0x81:
case 0x83:
@@ -1060,7 +1060,7 @@
*/
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &val1, size, arg);
+ error = memread(vcpu, gpa, &val1, size, arg);
if (error)
break;
@@ -1069,7 +1069,7 @@
* operand and write the result
*/
result = val1 & vie->immediate;
- error = memwrite(VCPU_ARGS, gpa, result, size, arg);
+ error = memwrite(vcpu, gpa, result, size, arg);
break;
default:
break;
@@ -1077,7 +1077,7 @@
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1091,12 +1091,12 @@
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_or(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_or(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
{
int error, size;
@@ -1119,18 +1119,18 @@
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val1);
+ error = vie_read_register(vcpu, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(VCPU_ARGS, gpa, &val2, size, arg);
+ error = memread(vcpu, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
result = val1 | val2;
- error = vie_update_register(VCPU_ARGS, reg, result, size);
+ error = vie_update_register(vcpu, reg, result, size);
break;
case 0x81:
case 0x83:
@@ -1148,7 +1148,7 @@
*/
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &val1, size, arg);
+ error = memread(vcpu, gpa, &val1, size, arg);
if (error)
break;
@@ -1157,7 +1157,7 @@
* operand and write the result
*/
result = val1 | vie->immediate;
- error = memwrite(VCPU_ARGS, gpa, result, size, arg);
+ error = memwrite(vcpu, gpa, result, size, arg);
break;
default:
break;
@@ -1165,7 +1165,7 @@
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1179,12 +1179,12 @@
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_cmp(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_cmp(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1212,12 +1212,12 @@
/* Get the register operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &regop);
+ error = vie_read_register(vcpu, reg, &regop);
if (error)
return (error);
/* Get the memory operand */
- error = memread(VCPU_ARGS, gpa, &memop, size, arg);
+ error = memread(vcpu, gpa, &memop, size, arg);
if (error)
return (error);
@@ -1256,7 +1256,7 @@
size = 1;
/* get the first operand */
- error = memread(VCPU_ARGS, gpa, &op1, size, arg);
+ error = memread(vcpu, gpa, &op1, size, arg);
if (error)
return (error);
@@ -1265,18 +1265,18 @@
default:
return (EINVAL);
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & RFLAGS_STATUS_BITS;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_test(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_test(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1300,7 +1300,7 @@
if ((vie->reg & 7) != 0)
return (EINVAL);
- error = memread(VCPU_ARGS, gpa, &op1, size, arg);
+ error = memread(vcpu, gpa, &op1, size, arg);
if (error)
return (error);
@@ -1309,7 +1309,7 @@
default:
return (EINVAL);
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1320,12 +1320,12 @@
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_bextr(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_bextr(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite __unused, void *arg)
{
@@ -1353,13 +1353,13 @@
* operand) using an index and length specified in the second /source/
* operand (third operand).
*/
- error = memread(VCPU_ARGS, gpa, &src1, size, arg);
+ error = memread(vcpu, gpa, &src1, size, arg);
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, gpr_map[vie->vex_reg], &src2);
+ error = vie_read_register(vcpu, gpr_map[vie->vex_reg], &src2);
if (error)
return (error);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1385,7 +1385,7 @@
dst = src1;
done:
- error = vie_update_register(VCPU_ARGS, gpr_map[vie->reg], dst, size);
+ error = vie_update_register(vcpu, gpr_map[vie->reg], dst, size);
if (error)
return (error);
@@ -1396,13 +1396,13 @@
rflags &= ~RFLAGS_STATUS_BITS;
if (dst == 0)
rflags |= PSL_Z;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags,
8);
return (error);
}
static int
-emulate_add(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_add(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1424,18 +1424,18 @@
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val1);
+ error = vie_read_register(vcpu, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(VCPU_ARGS, gpa, &val2, size, arg);
+ error = memread(vcpu, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
nval = val1 + val2;
- error = vie_update_register(VCPU_ARGS, reg, nval, size);
+ error = vie_update_register(vcpu, reg, nval, size);
break;
default:
break;
@@ -1443,14 +1443,14 @@
if (!error) {
rflags2 = getaddflags(size, val1, val2);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS,
&rflags);
if (error)
return (error);
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & RFLAGS_STATUS_BITS;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS,
rflags, 8);
}
@@ -1458,7 +1458,7 @@
}
static int
-emulate_sub(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_sub(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1480,18 +1480,18 @@
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(VCPU_ARGS, reg, &val1);
+ error = vie_read_register(vcpu, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(VCPU_ARGS, gpa, &val2, size, arg);
+ error = memread(vcpu, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
nval = val1 - val2;
- error = vie_update_register(VCPU_ARGS, reg, nval, size);
+ error = vie_update_register(vcpu, reg, nval, size);
break;
default:
break;
@@ -1499,14 +1499,14 @@
if (!error) {
rflags2 = getcc(size, val1, val2);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS,
&rflags);
if (error)
return (error);
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & RFLAGS_STATUS_BITS;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS,
rflags, 8);
}
@@ -1514,7 +1514,7 @@
}
static int
-emulate_stack_op(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
+emulate_stack_op(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -1552,7 +1552,7 @@
* stack-segment descriptor determines the size of the
* stack pointer.
*/
- error = vm_get_seg_desc(VCPU_ARGS, VM_REG_GUEST_SS, &ss_desc);
+ error = vm_get_seg_desc(vcpu, VM_REG_GUEST_SS, &ss_desc);
KASSERT(error == 0, ("%s: error %d getting SS descriptor",
__func__, error));
if (SEG_DESC_DEF32(ss_desc.access))
@@ -1561,13 +1561,13 @@
stackaddrsize = 2;
}
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_CR0, &cr0);
+ error = vie_read_register(vcpu, VM_REG_GUEST_CR0, &cr0);
KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RSP, &rsp);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RSP, &rsp);
KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
if (pushop) {
rsp -= size;
@@ -1576,39 +1576,39 @@
if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
&stack_gla)) {
- vm_inject_ss(VCPU_ARGS, 0);
+ vm_inject_ss(vcpu, 0);
return (0);
}
if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
- vm_inject_ss(VCPU_ARGS, 0);
+ vm_inject_ss(vcpu, 0);
return (0);
}
if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
- vm_inject_ac(VCPU_ARGS, 0);
+ vm_inject_ac(vcpu, 0);
return (0);
}
- error = vm_copy_setup(VCPU_ARGS, paging, stack_gla, size,
+ error = vm_copy_setup(vcpu, paging, stack_gla, size,
pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo),
&fault);
if (error || fault)
return (error);
if (pushop) {
- error = memread(VCPU_ARGS, mmio_gpa, &val, size, arg);
+ error = memread(vcpu, mmio_gpa, &val, size, arg);
if (error == 0)
vm_copyout(&val, copyinfo, size);
} else {
vm_copyin(copyinfo, &val, size);
- error = memwrite(VCPU_ARGS, mmio_gpa, val, size, arg);
+ error = memwrite(vcpu, mmio_gpa, val, size, arg);
rsp += size;
}
vm_copy_teardown(copyinfo, nitems(copyinfo));
if (error == 0) {
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RSP, rsp,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RSP, rsp,
stackaddrsize);
KASSERT(error == 0, ("error %d updating rsp", error));
}
@@ -1616,7 +1616,7 @@
}
static int
-emulate_push(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
+emulate_push(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -1631,13 +1631,13 @@
if ((vie->reg & 7) != 6)
return (EINVAL);
- error = emulate_stack_op(VCPU_ARGS, mmio_gpa, vie, paging, memread,
+ error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
memwrite, arg);
return (error);
}
static int
-emulate_pop(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
+emulate_pop(struct vcpu *vcpu, uint64_t mmio_gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -1652,13 +1652,13 @@
if ((vie->reg & 7) != 0)
return (EINVAL);
- error = emulate_stack_op(VCPU_ARGS, mmio_gpa, vie, paging, memread,
+ error = emulate_stack_op(vcpu, mmio_gpa, vie, paging, memread,
memwrite, arg);
return (error);
}
static int
-emulate_group1(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_group1(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging __unused, mem_region_read_t memread,
mem_region_write_t memwrite, void *memarg)
{
@@ -1666,15 +1666,15 @@
switch (vie->reg & 7) {
case 0x1: /* OR */
- error = emulate_or(VCPU_ARGS, gpa, vie,
+ error = emulate_or(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case 0x4: /* AND */
- error = emulate_and(VCPU_ARGS, gpa, vie,
+ error = emulate_and(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case 0x7: /* CMP */
- error = emulate_cmp(VCPU_ARGS, gpa, vie,
+ error = emulate_cmp(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
default:
@@ -1686,7 +1686,7 @@
}
static int
-emulate_bittest(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_bittest(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused,
void *memarg)
{
@@ -1702,10 +1702,10 @@
if ((vie->reg & 7) != 4)
return (EINVAL);
- error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(vcpu, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
- error = memread(VCPU_ARGS, gpa, &val, vie->opsize, memarg);
+ error = memread(vcpu, gpa, &val, vie->opsize, memarg);
if (error)
return (error);
@@ -1722,14 +1722,14 @@
else
rflags &= ~PSL_C;
- error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(vcpu, VM_REG_GUEST_RFLAGS, rflags, 8);
KASSERT(error == 0, ("%s: error %d updating rflags", __func__, error));
return (0);
}
static int
-emulate_twob_group15(VCPU_DECL, uint64_t gpa, struct vie *vie,
+emulate_twob_group15(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused,
void *memarg)
{
@@ -1749,7 +1749,7 @@
* CLFLUSH, CLFLUSHOPT. Only check for access
* rights.
*/
- error = memread(VCPU_ARGS, gpa, &buf, 1, memarg);
+ error = memread(vcpu, gpa, &buf, 1, memarg);
}
break;
default:
@@ -1761,7 +1761,7 @@
}
int
-vmm_emulate_instruction(VCPU_DECL, uint64_t gpa, struct vie *vie,
+vmm_emulate_instruction(struct vcpu *vcpu, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *memarg)
{
@@ -1772,68 +1772,68 @@
switch (vie->op.op_type) {
case VIE_OP_TYPE_GROUP1:
- error = emulate_group1(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_group1(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_POP:
- error = emulate_pop(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_pop(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_PUSH:
- error = emulate_push(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_push(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_CMP:
- error = emulate_cmp(VCPU_ARGS, gpa, vie,
+ error = emulate_cmp(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_MOV:
- error = emulate_mov(VCPU_ARGS, gpa, vie,
+ error = emulate_mov(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_MOVSX:
case VIE_OP_TYPE_MOVZX:
- error = emulate_movx(VCPU_ARGS, gpa, vie,
+ error = emulate_movx(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_MOVS:
- error = emulate_movs(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_movs(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_STOS:
- error = emulate_stos(VCPU_ARGS, gpa, vie, paging, memread,
+ error = emulate_stos(vcpu, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_AND:
- error = emulate_and(VCPU_ARGS, gpa, vie,
+ error = emulate_and(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_OR:
- error = emulate_or(VCPU_ARGS, gpa, vie,
+ error = emulate_or(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_SUB:
- error = emulate_sub(VCPU_ARGS, gpa, vie,
+ error = emulate_sub(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_BITTEST:
- error = emulate_bittest(VCPU_ARGS, gpa, vie,
+ error = emulate_bittest(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_TWOB_GRP15:
- error = emulate_twob_group15(VCPU_ARGS, gpa, vie,
+ error = emulate_twob_group15(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_ADD:
- error = emulate_add(VCPU_ARGS, gpa, vie, memread,
+ error = emulate_add(vcpu, gpa, vie, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_TEST:
- error = emulate_test(VCPU_ARGS, gpa, vie,
+ error = emulate_test(vcpu, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_BEXTR:
- error = emulate_bextr(VCPU_ARGS, gpa, vie, paging,
+ error = emulate_bextr(vcpu, gpa, vie, paging,
memread, memwrite, memarg);
break;
default:
diff --git a/usr.sbin/bhyve/bhyverun.h b/usr.sbin/bhyve/bhyverun.h
--- a/usr.sbin/bhyve/bhyverun.h
+++ b/usr.sbin/bhyve/bhyverun.h
@@ -37,6 +37,7 @@
extern int guest_ncpus;
extern uint16_t cpu_cores, cpu_sockets, cpu_threads;
+struct vcpu;
struct vmctx;
struct vm_exit;
@@ -47,6 +48,6 @@
int fbsdrun_virtio_msix(void);
-int vmexit_task_switch(struct vmctx *, struct vm_exit *, int *vcpu);
+int vmexit_task_switch(struct vmctx *, struct vcpu *, struct vm_exit *);
#endif
diff --git a/usr.sbin/bhyve/bhyverun.c b/usr.sbin/bhyve/bhyverun.c
--- a/usr.sbin/bhyve/bhyverun.c
+++ b/usr.sbin/bhyve/bhyverun.c
@@ -183,7 +183,7 @@
[EXIT_REASON_XRSTORS] = "XRSTORS"
};
-typedef int (*vmexit_handler_t)(struct vmctx *, struct vm_exit *, int *vcpu);
+typedef int (*vmexit_handler_t)(struct vmctx *, struct vcpu *, struct vm_exit *);
int guest_ncpus;
uint16_t cpu_cores, cpu_sockets, cpu_threads;
@@ -195,7 +195,7 @@
static cpuset_t cpumask;
-static void vm_loop(struct vmctx *ctx, int vcpu);
+static void vm_loop(struct vmctx *ctx, struct vcpu *vcpu);
static struct bhyvestats {
uint64_t vmexit_bogus;
@@ -208,11 +208,11 @@
uint64_t cpu_switch_direct;
} stats;
-static struct mt_vmm_info {
- pthread_t mt_thr;
- struct vmctx *mt_ctx;
- int mt_vcpu;
-} *mt_vmm_info;
+static struct vcpu_info {
+ struct vmctx *ctx;
+ struct vcpu *vcpu;
+ int vcpuid;
+} *vcpu_info;
static cpuset_t **vcpumap;
@@ -485,16 +485,14 @@
}
void
-vm_inject_fault(void *arg, int vcpu, int vector, int errcode_valid,
+vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
int errcode)
{
- struct vmctx *ctx;
int error, restart_instruction;
- ctx = arg;
restart_instruction = 1;
- error = vm_inject_exception(ctx, vcpu, vector, errcode_valid, errcode,
+ error = vm_inject_exception(vcpu, vector, errcode_valid, errcode,
restart_instruction);
assert(error == 0);
}
@@ -525,27 +523,24 @@
fbsdrun_start_thread(void *param)
{
char tname[MAXCOMLEN + 1];
- struct mt_vmm_info *mtp;
- int error, vcpu;
-
- mtp = param;
- vcpu = mtp->mt_vcpu;
+ struct vcpu_info *vi = param;
+ int error;
- snprintf(tname, sizeof(tname), "vcpu %d", vcpu);
- pthread_set_name_np(mtp->mt_thr, tname);
+ snprintf(tname, sizeof(tname), "vcpu %d", vi->vcpuid);
+ pthread_set_name_np(pthread_self(), tname);
- if (vcpumap[vcpu] != NULL) {
- error = pthread_setaffinity_np(mtp->mt_thr, sizeof(cpuset_t),
- vcpumap[vcpu]);
+ if (vcpumap[vi->vcpuid] != NULL) {
+ error = pthread_setaffinity_np(pthread_self(),
+ sizeof(cpuset_t), vcpumap[vi->vcpuid]);
assert(error == 0);
}
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_add(vcpu);
+ checkpoint_cpu_add(vi->vcpuid);
#endif
- gdb_cpu_add(vcpu);
+ gdb_cpu_add(vi->vcpu);
- vm_loop(mtp->mt_ctx, vcpu);
+ vm_loop(vi->ctx, vi->vcpu);
/* not reached */
exit(1);
@@ -553,23 +548,20 @@
}
static void
-fbsdrun_addcpu(struct vmctx *ctx, int newcpu)
+fbsdrun_addcpu(struct vcpu_info *vi)
{
+ pthread_t thr;
int error;
- error = vm_activate_cpu(ctx, newcpu);
+ error = vm_activate_cpu(vi->vcpu);
if (error != 0)
- err(EX_OSERR, "could not activate CPU %d", newcpu);
-
- CPU_SET_ATOMIC(newcpu, &cpumask);
+ err(EX_OSERR, "could not activate CPU %d", vi->vcpuid);
- vm_suspend_cpu(ctx, newcpu);
+ CPU_SET_ATOMIC(vi->vcpuid, &cpumask);
- mt_vmm_info[newcpu].mt_ctx = ctx;
- mt_vmm_info[newcpu].mt_vcpu = newcpu;
+ vm_suspend_cpu(vi->vcpu);
- error = pthread_create(&mt_vmm_info[newcpu].mt_thr, NULL,
- fbsdrun_start_thread, &mt_vmm_info[newcpu]);
+ error = pthread_create(&thr, NULL, fbsdrun_start_thread, vi);
assert(error == 0);
}
@@ -587,8 +579,8 @@
}
static int
-vmexit_handle_notify(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
- int *pvcpu __unused, uint32_t eax __unused)
+vmexit_handle_notify(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme __unused, uint32_t eax __unused)
{
#if BHYVE_DEBUG
/*
@@ -599,13 +591,10 @@
}
static int
-vmexit_inout(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vme)
{
int error;
int bytes, port, in, out;
- int vcpu;
-
- vcpu = *pvcpu;
port = vme->u.inout.port;
bytes = vme->u.inout.bytes;
@@ -614,7 +603,7 @@
/* Extra-special case of host notifications */
if (out && port == GUEST_NIO_PORT) {
- error = vmexit_handle_notify(ctx, vme, pvcpu, vme->u.inout.eax);
+ error = vmexit_handle_notify(ctx, vcpu, vme, vme->u.inout.eax);
return (error);
}
@@ -631,45 +620,45 @@
}
static int
-vmexit_rdmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_rdmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
{
uint64_t val;
uint32_t eax, edx;
int error;
val = 0;
- error = emulate_rdmsr(ctx, *pvcpu, vme->u.msr.code, &val);
+ error = emulate_rdmsr(vcpu, vme->u.msr.code, &val);
if (error != 0) {
fprintf(stderr, "rdmsr to register %#x on vcpu %d\n",
- vme->u.msr.code, *pvcpu);
+ vme->u.msr.code, vcpu_id(vcpu));
if (get_config_bool("x86.strictmsr")) {
- vm_inject_gp(ctx, *pvcpu);
+ vm_inject_gp(vcpu);
return (VMEXIT_CONTINUE);
}
}
eax = val;
- error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RAX, eax);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RAX, eax);
assert(error == 0);
edx = val >> 32;
- error = vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RDX, edx);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RDX, edx);
assert(error == 0);
return (VMEXIT_CONTINUE);
}
static int
-vmexit_wrmsr(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_wrmsr(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
{
int error;
- error = emulate_wrmsr(ctx, *pvcpu, vme->u.msr.code, vme->u.msr.wval);
+ error = emulate_wrmsr(vcpu, vme->u.msr.code, vme->u.msr.wval);
if (error != 0) {
fprintf(stderr, "wrmsr to register %#x(%#lx) on vcpu %d\n",
- vme->u.msr.code, vme->u.msr.wval, *pvcpu);
+ vme->u.msr.code, vme->u.msr.wval, vcpu_id(vcpu));
if (get_config_bool("x86.strictmsr")) {
- vm_inject_gp(ctx, *pvcpu);
+ vm_inject_gp(vcpu);
return (VMEXIT_CONTINUE);
}
}
@@ -695,10 +684,10 @@
}
static int
-vmexit_vmx(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_vmx(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vme)
{
- fprintf(stderr, "vm exit[%d]\n", *pvcpu);
+ fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu));
fprintf(stderr, "\treason\t\tVMX\n");
fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip);
fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length);
@@ -711,7 +700,7 @@
fprintf(stderr, "\tinst_error\t\t%d\n", vme->u.vmx.inst_error);
#ifdef DEBUG_EPT_MISCONFIG
if (vme->u.vmx.exit_reason == EXIT_REASON_EPT_MISCONFIG) {
- vm_get_register(ctx, *pvcpu,
+ vm_get_register(vcpu,
VMCS_IDENT(VMCS_GUEST_PHYSICAL_ADDRESS),
&ept_misconfig_gpa);
vm_get_gpa_pmap(ctx, ept_misconfig_gpa, ept_misconfig_pte,
@@ -728,10 +717,10 @@
}
static int
-vmexit_svm(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu)
+vmexit_svm(struct vmctx *ctx __unused, struct vcpu *vcpu, struct vm_exit *vme)
{
- fprintf(stderr, "vm exit[%d]\n", *pvcpu);
+ fprintf(stderr, "vm exit[%d]\n", vcpu_id(vcpu));
fprintf(stderr, "\treason\t\tSVM\n");
fprintf(stderr, "\trip\t\t0x%016lx\n", vme->rip);
fprintf(stderr, "\tinst_length\t%d\n", vme->inst_length);
@@ -742,8 +731,8 @@
}
static int
-vmexit_bogus(struct vmctx *ctx __unused, struct vm_exit *vme,
- int *pvcpu __unused)
+vmexit_bogus(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme)
{
assert(vme->inst_length == 0);
@@ -754,8 +743,8 @@
}
static int
-vmexit_reqidle(struct vmctx *ctx __unused, struct vm_exit *vme,
- int *pvcpu __unused)
+vmexit_reqidle(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme)
{
assert(vme->inst_length == 0);
@@ -766,8 +755,8 @@
}
static int
-vmexit_hlt(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
- int *pvcpu __unused)
+vmexit_hlt(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme __unused)
{
stats.vmexit_hlt++;
@@ -781,8 +770,8 @@
}
static int
-vmexit_pause(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
- int *pvcpu __unused)
+vmexit_pause(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme __unused)
{
stats.vmexit_pause++;
@@ -791,7 +780,8 @@
}
static int
-vmexit_mtrap(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu)
+vmexit_mtrap(struct vmctx *ctx __unused, struct vcpu *vcpu,
+ struct vm_exit *vme)
{
assert(vme->inst_length == 0);
@@ -799,18 +789,19 @@
stats.vmexit_mtrap++;
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_suspend(*pvcpu);
+ checkpoint_cpu_suspend(vcpu_id(vcpu));
#endif
- gdb_cpu_mtrap(*pvcpu);
+ gdb_cpu_mtrap(vcpu);
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_resume(*pvcpu);
+ checkpoint_cpu_resume(vcpu_id(vcpu));
#endif
return (VMEXIT_CONTINUE);
}
static int
-vmexit_inst_emul(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_inst_emul(struct vmctx *ctx __unused, struct vcpu *vcpu,
+ struct vm_exit *vme)
{
int err, i, cs_d;
struct vie *vie;
@@ -831,13 +822,13 @@
cs_d = vme->u.inst_emul.cs_d;
if (vmm_decode_instruction(mode, cs_d, vie) != 0)
goto fail;
- if (vm_set_register(ctx, *pvcpu, VM_REG_GUEST_RIP,
+ if (vm_set_register(vcpu, VM_REG_GUEST_RIP,
vme->rip + vie->num_processed) != 0)
goto fail;
}
- err = emulate_mem(ctx, *pvcpu, vme->u.inst_emul.gpa,
- vie, &vme->u.inst_emul.paging);
+ err = emulate_mem(vcpu, vme->u.inst_emul.gpa, vie,
+ &vme->u.inst_emul.paging);
if (err) {
if (err == ESRCH) {
EPRINTLN("Unhandled memory access to 0x%lx\n",
@@ -860,15 +851,16 @@
static pthread_cond_t resetcpu_cond = PTHREAD_COND_INITIALIZER;
static int
-vmexit_suspend(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu)
+vmexit_suspend(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vme)
{
enum vm_suspend_how how;
+ int vcpuid = vcpu_id(vcpu);
how = vme->u.suspended.how;
- fbsdrun_deletecpu(*pvcpu);
+ fbsdrun_deletecpu(vcpuid);
- if (*pvcpu != BSP) {
+ if (vcpuid != BSP) {
pthread_mutex_lock(&resetcpu_mtx);
pthread_cond_signal(&resetcpu_cond);
pthread_mutex_unlock(&resetcpu_mtx);
@@ -900,16 +892,16 @@
}
static int
-vmexit_debug(struct vmctx *ctx __unused, struct vm_exit *vme __unused,
- int *pvcpu)
+vmexit_debug(struct vmctx *ctx __unused, struct vcpu *vcpu,
+ struct vm_exit *vme __unused)
{
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_suspend(*pvcpu);
+ checkpoint_cpu_suspend(vcpu_id(vcpu));
#endif
- gdb_cpu_suspend(*pvcpu);
+ gdb_cpu_suspend(vcpu);
#ifdef BHYVE_SNAPSHOT
- checkpoint_cpu_resume(*pvcpu);
+ checkpoint_cpu_resume(vcpu_id(vcpu));
#endif
/*
* XXX-MJ sleep for a short period to avoid chewing up the CPU in the
@@ -920,22 +912,24 @@
}
static int
-vmexit_breakpoint(struct vmctx *ctx __unused, struct vm_exit *vme, int *pvcpu)
+vmexit_breakpoint(struct vmctx *ctx __unused, struct vcpu *vcpu,
+ struct vm_exit *vme)
{
- gdb_cpu_breakpoint(*pvcpu, vme);
+ gdb_cpu_breakpoint(vcpu, vme);
return (VMEXIT_CONTINUE);
}
static int
-vmexit_ipi(struct vmctx *ctx, struct vm_exit *vme, int *pvcpu __unused)
+vmexit_ipi(struct vmctx *ctx __unused, struct vcpu *vcpu __unused,
+ struct vm_exit *vme)
{
int error = -1;
int i;
switch (vme->u.ipi.mode) {
case APIC_DELMODE_INIT:
CPU_FOREACH_ISSET(i, &vme->u.ipi.dmask) {
- error = vm_suspend_cpu(ctx, i);
+ error = vm_suspend_cpu(vcpu_info[i].vcpu);
if (error) {
warnx("%s: failed to suspend cpu %d\n",
__func__, i);
@@ -945,7 +939,8 @@
break;
case APIC_DELMODE_STARTUP:
CPU_FOREACH_ISSET(i, &vme->u.ipi.dmask) {
- spinup_ap(ctx, i, vme->u.ipi.vector << PAGE_SHIFT);
+ spinup_ap(vcpu_info[i].vcpu,
+ vme->u.ipi.vector << PAGE_SHIFT);
}
error = 0;
break;
@@ -975,7 +970,7 @@
};
static void
-vm_loop(struct vmctx *ctx, int vcpu)
+vm_loop(struct vmctx *ctx, struct vcpu *vcpu)
{
struct vm_exit vme;
int error, rc;
@@ -983,10 +978,10 @@
cpuset_t active_cpus;
error = vm_active_cpus(ctx, &active_cpus);
- assert(CPU_ISSET(vcpu, &active_cpus));
+ assert(CPU_ISSET(vcpu_id(vcpu), &active_cpus));
while (1) {
- error = vm_run(ctx, vcpu, &vme);
+ error = vm_run(vcpu, &vme);
if (error != 0)
break;
@@ -997,7 +992,7 @@
exit(4);
}
- rc = (*handler[exitcode])(ctx, &vme, &vcpu);
+ rc = (*handler[exitcode])(ctx, vcpu, &vme);
switch (rc) {
case VMEXIT_CONTINUE:
@@ -1012,7 +1007,7 @@
}
static int
-num_vcpus_allowed(struct vmctx *ctx)
+num_vcpus_allowed(struct vmctx *ctx, struct vcpu *vcpu)
{
uint16_t sockets, cores, threads, maxcpus;
int tmp, error;
@@ -1021,7 +1016,7 @@
* The guest is allowed to spinup more than one processor only if the
* UNRESTRICTED_GUEST capability is available.
*/
- error = vm_get_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, &tmp);
+ error = vm_get_capability(vcpu, VM_CAP_UNRESTRICTED_GUEST, &tmp);
if (error != 0)
return (1);
@@ -1033,18 +1028,18 @@
}
static void
-fbsdrun_set_capabilities(struct vmctx *ctx, int cpu)
+fbsdrun_set_capabilities(struct vcpu *vcpu, bool bsp)
{
int err, tmp;
if (get_config_bool_default("x86.vmexit_on_hlt", false)) {
- err = vm_get_capability(ctx, cpu, VM_CAP_HALT_EXIT, &tmp);
+ err = vm_get_capability(vcpu, VM_CAP_HALT_EXIT, &tmp);
if (err < 0) {
fprintf(stderr, "VM exit on HLT not supported\n");
exit(4);
}
- vm_set_capability(ctx, cpu, VM_CAP_HALT_EXIT, 1);
- if (cpu == BSP)
+ vm_set_capability(vcpu, VM_CAP_HALT_EXIT, 1);
+ if (bsp)
handler[VM_EXITCODE_HLT] = vmexit_hlt;
}
@@ -1052,30 +1047,30 @@
/*
* pause exit support required for this mode
*/
- err = vm_get_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, &tmp);
+ err = vm_get_capability(vcpu, VM_CAP_PAUSE_EXIT, &tmp);
if (err < 0) {
fprintf(stderr,
"SMP mux requested, no pause support\n");
exit(4);
}
- vm_set_capability(ctx, cpu, VM_CAP_PAUSE_EXIT, 1);
- if (cpu == BSP)
+ vm_set_capability(vcpu, VM_CAP_PAUSE_EXIT, 1);
+ if (bsp)
handler[VM_EXITCODE_PAUSE] = vmexit_pause;
}
if (get_config_bool_default("x86.x2apic", false))
- err = vm_set_x2apic_state(ctx, cpu, X2APIC_ENABLED);
+ err = vm_set_x2apic_state(vcpu, X2APIC_ENABLED);
else
- err = vm_set_x2apic_state(ctx, cpu, X2APIC_DISABLED);
+ err = vm_set_x2apic_state(vcpu, X2APIC_DISABLED);
if (err) {
fprintf(stderr, "Unable to set x2apic state (%d)\n", err);
exit(4);
}
- vm_set_capability(ctx, cpu, VM_CAP_ENABLE_INVPCID, 1);
+ vm_set_capability(vcpu, VM_CAP_ENABLE_INVPCID, 1);
- err = vm_set_capability(ctx, cpu, VM_CAP_IPI_EXIT, 1);
+ err = vm_set_capability(vcpu, VM_CAP_IPI_EXIT, 1);
assert(err == 0);
}
@@ -1143,23 +1138,23 @@
}
static void
-spinup_vcpu(struct vmctx *ctx, int vcpu)
+spinup_vcpu(struct vcpu_info *vi, bool bsp)
{
int error;
- if (vcpu != BSP) {
- fbsdrun_set_capabilities(ctx, vcpu);
+ if (!bsp) {
+ fbsdrun_set_capabilities(vi->vcpu, false);
/*
* Enable the 'unrestricted guest' mode for APs.
*
* APs startup in power-on 16-bit mode.
*/
- error = vm_set_capability(ctx, vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
+ error = vm_set_capability(vi->vcpu, VM_CAP_UNRESTRICTED_GUEST, 1);
assert(error == 0);
}
- fbsdrun_addcpu(ctx, vcpu);
+ fbsdrun_addcpu(vi);
}
static bool
@@ -1245,6 +1240,7 @@
{
int c, error;
int max_vcpus, memflags;
+ struct vcpu *bsp;
struct vmctx *ctx;
uint64_t rip;
size_t memsize;
@@ -1429,14 +1425,26 @@
}
#endif
- max_vcpus = num_vcpus_allowed(ctx);
+ bsp = vm_vcpu_open(ctx, BSP);
+ max_vcpus = num_vcpus_allowed(ctx, bsp);
if (guest_ncpus > max_vcpus) {
fprintf(stderr, "%d vCPUs requested but only %d available\n",
guest_ncpus, max_vcpus);
exit(4);
}
- fbsdrun_set_capabilities(ctx, BSP);
+ fbsdrun_set_capabilities(bsp, true);
+
+ /* Allocate per-VCPU resources. */
+ vcpu_info = calloc(guest_ncpus, sizeof(*vcpu_info));
+ for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++) {
+ vcpu_info[vcpuid].ctx = ctx;
+ vcpu_info[vcpuid].vcpuid = vcpuid;
+ if (vcpuid == BSP)
+ vcpu_info[vcpuid].vcpu = bsp;
+ else
+ vcpu_info[vcpuid].vcpu = vm_vcpu_open(ctx, vcpuid);
+ }
memflags = 0;
if (get_config_bool_default("memory.wired", false))
@@ -1496,24 +1504,20 @@
init_gdb(ctx);
if (lpc_bootrom()) {
- if (vm_set_capability(ctx, BSP, VM_CAP_UNRESTRICTED_GUEST, 1)) {
+ if (vm_set_capability(bsp, VM_CAP_UNRESTRICTED_GUEST, 1)) {
fprintf(stderr, "ROM boot failed: unrestricted guest "
"capability not available\n");
exit(4);
}
- error = vcpu_reset(ctx, BSP);
+ error = vcpu_reset(bsp);
assert(error == 0);
}
- /* Allocate per-VCPU resources. */
- mt_vmm_info = calloc(guest_ncpus, sizeof(*mt_vmm_info));
-
/*
* Add all vCPUs.
*/
- for (int vcpu = 0; vcpu < guest_ncpus; vcpu++) {
- spinup_vcpu(ctx, vcpu);
- }
+ for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++)
+ spinup_vcpu(&vcpu_info[vcpuid], vcpuid == BSP);
#ifdef BHYVE_SNAPSHOT
if (restore_file != NULL) {
@@ -1549,7 +1553,7 @@
}
#endif
- error = vm_get_register(ctx, BSP, VM_REG_GUEST_RIP, &rip);
+ error = vm_get_register(bsp, VM_REG_GUEST_RIP, &rip);
assert(error == 0);
/*
@@ -1608,14 +1612,11 @@
if (vm_restore_time(ctx) < 0)
err(EX_OSERR, "Unable to restore time");
- for (int i = 0; i < guest_ncpus; i++) {
- if (i == BSP)
- continue;
- vm_resume_cpu(ctx, i);
- }
- }
+ for (int vcpuid = 0; vcpuid < guest_ncpus; vcpuid++)
+ vm_resume_cpu(vcpu_info[vcpuid].vcpu);
+ } else
#endif
- vm_resume_cpu(ctx, BSP);
+ vm_resume_cpu(bsp);
/*
* Head off to the main event dispatch loop
diff --git a/usr.sbin/bhyve/bootrom.c b/usr.sbin/bhyve/bootrom.c
--- a/usr.sbin/bhyve/bootrom.c
+++ b/usr.sbin/bhyve/bootrom.c
@@ -84,9 +84,8 @@
* that the Firmware Volume area is writable and persistent.
*/
static int
-bootrom_var_mem_handler(struct vmctx *ctx __unused, int vcpu __unused, int dir,
- uint64_t addr, int size, uint64_t *val, void *arg1 __unused,
- long arg2 __unused)
+bootrom_var_mem_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr,
+ int size, uint64_t *val, void *arg1 __unused, long arg2 __unused)
{
off_t offset;
diff --git a/usr.sbin/bhyve/gdb.h b/usr.sbin/bhyve/gdb.h
--- a/usr.sbin/bhyve/gdb.h
+++ b/usr.sbin/bhyve/gdb.h
@@ -30,10 +30,10 @@
#ifndef __GDB_H__
#define __GDB_H__
-void gdb_cpu_add(int vcpu);
-void gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit);
-void gdb_cpu_mtrap(int vcpu);
-void gdb_cpu_suspend(int vcpu);
+void gdb_cpu_add(struct vcpu *vcpu);
+void gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit);
+void gdb_cpu_mtrap(struct vcpu *vcpu);
+void gdb_cpu_suspend(struct vcpu *vcpu);
void init_gdb(struct vmctx *ctx);
#endif /* !__GDB_H__ */
diff --git a/usr.sbin/bhyve/gdb.c b/usr.sbin/bhyve/gdb.c
--- a/usr.sbin/bhyve/gdb.c
+++ b/usr.sbin/bhyve/gdb.c
@@ -132,6 +132,7 @@
static int cur_fd = -1;
static TAILQ_HEAD(, breakpoint) breakpoints;
static struct vcpu_state *vcpu_state;
+static struct vcpu **vcpus;
static int cur_vcpu, stopped_vcpu;
static bool gdb_active = false;
@@ -223,7 +224,7 @@
static void remove_all_sw_breakpoints(void);
static int
-guest_paging_info(int vcpu, struct vm_guest_paging *paging)
+guest_paging_info(struct vcpu *vcpu, struct vm_guest_paging *paging)
{
uint64_t regs[4];
const int regset[4] = {
@@ -233,7 +234,7 @@
VM_REG_GUEST_EFER
};
- if (vm_get_register_set(ctx, vcpu, nitems(regset), regset, regs) == -1)
+ if (vm_get_register_set(vcpu, nitems(regset), regset, regs) == -1)
return (-1);
/*
@@ -268,7 +269,7 @@
* return -1.
*/
static int
-guest_vaddr2paddr(int vcpu, uint64_t vaddr, uint64_t *paddr)
+guest_vaddr2paddr(struct vcpu *vcpu, uint64_t vaddr, uint64_t *paddr)
{
struct vm_guest_paging paging;
int fault;
@@ -280,7 +281,7 @@
* Always use PROT_READ. We really care if the VA is
* accessible, not if the current vCPU can write.
*/
- if (vm_gla2gpa_nofault(ctx, vcpu, &paging, vaddr, PROT_READ, paddr,
+ if (vm_gla2gpa_nofault(vcpu, &paging, vaddr, PROT_READ, paddr,
&fault) == -1)
return (-1);
if (fault)
@@ -730,17 +731,18 @@
* as long as the debug server keeps them suspended.
*/
static void
-_gdb_cpu_suspend(int vcpu, bool report_stop)
+_gdb_cpu_suspend(struct vcpu *vcpu, bool report_stop)
{
+ int vcpuid = vcpu_id(vcpu);
- debug("$vCPU %d suspending\n", vcpu);
- CPU_SET(vcpu, &vcpus_waiting);
+ debug("$vCPU %d suspending\n", vcpuid);
+ CPU_SET(vcpuid, &vcpus_waiting);
if (report_stop && CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
gdb_finish_suspend_vcpus();
- while (CPU_ISSET(vcpu, &vcpus_suspended))
+ while (CPU_ISSET(vcpuid, &vcpus_suspended))
pthread_cond_wait(&idle_vcpus, &gdb_lock);
- CPU_CLR(vcpu, &vcpus_waiting);
- debug("$vCPU %d resuming\n", vcpu);
+ CPU_CLR(vcpuid, &vcpus_waiting);
+ debug("$vCPU %d resuming\n", vcpuid);
}
/*
@@ -748,17 +750,21 @@
* debug server about the new thread.
*/
void
-gdb_cpu_add(int vcpu)
+gdb_cpu_add(struct vcpu *vcpu)
{
+ int vcpuid;
if (!gdb_active)
return;
- debug("$vCPU %d starting\n", vcpu);
+ vcpuid = vcpu_id(vcpu);
+ debug("$vCPU %d starting\n", vcpuid);
pthread_mutex_lock(&gdb_lock);
- assert(vcpu < guest_ncpus);
- CPU_SET(vcpu, &vcpus_active);
+ assert(vcpuid < guest_ncpus);
+ assert(vcpus[vcpuid] == NULL);
+ vcpus[vcpuid] = vcpu;
+ CPU_SET(vcpuid, &vcpus_active);
if (!TAILQ_EMPTY(&breakpoints)) {
- vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT, 1);
+ vm_set_capability(vcpu, VM_CAP_BPT_EXIT, 1);
debug("$vCPU %d enabled breakpoint exits\n", vcpu);
}
@@ -768,7 +774,7 @@
* executing the first instruction.
*/
if (!CPU_EMPTY(&vcpus_suspended)) {
- CPU_SET(vcpu, &vcpus_suspended);
+ CPU_SET(vcpuid, &vcpus_suspended);
_gdb_cpu_suspend(vcpu, false);
}
pthread_mutex_unlock(&gdb_lock);
@@ -779,12 +785,12 @@
* if the vCPU is marked as stepping.
*/
static void
-gdb_cpu_resume(int vcpu)
+gdb_cpu_resume(struct vcpu *vcpu)
{
struct vcpu_state *vs;
int error;
- vs = &vcpu_state[vcpu];
+ vs = &vcpu_state[vcpu_id(vcpu)];
/*
* Any pending event should already be reported before
@@ -793,7 +799,7 @@
assert(vs->hit_swbreak == false);
assert(vs->stepped == false);
if (vs->stepping) {
- error = vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 1);
+ error = vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 1);
assert(error == 0);
}
}
@@ -804,7 +810,7 @@
* to a guest-wide suspend such as Ctrl-C or the stop on attach.
*/
void
-gdb_cpu_suspend(int vcpu)
+gdb_cpu_suspend(struct vcpu *vcpu)
{
if (!gdb_active)
@@ -822,7 +828,7 @@
assert(pthread_mutex_isowned_np(&gdb_lock));
debug("suspending all CPUs\n");
vcpus_suspended = vcpus_active;
- vm_suspend_cpu(ctx, -1);
+ vm_suspend_all_cpus(ctx);
if (CPU_CMP(&vcpus_waiting, &vcpus_suspended) == 0)
gdb_finish_suspend_vcpus();
}
@@ -832,23 +838,25 @@
* the VT-x-specific MTRAP exit.
*/
void
-gdb_cpu_mtrap(int vcpu)
+gdb_cpu_mtrap(struct vcpu *vcpu)
{
struct vcpu_state *vs;
+ int vcpuid;
if (!gdb_active)
return;
- debug("$vCPU %d MTRAP\n", vcpu);
+ vcpuid = vcpu_id(vcpu);
+ debug("$vCPU %d MTRAP\n", vcpuid);
pthread_mutex_lock(&gdb_lock);
- vs = &vcpu_state[vcpu];
+ vs = &vcpu_state[vcpuid];
if (vs->stepping) {
vs->stepping = false;
vs->stepped = true;
- vm_set_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, 0);
+ vm_set_capability(vcpu, VM_CAP_MTRAP_EXIT, 0);
while (vs->stepped) {
if (stopped_vcpu == -1) {
- debug("$vCPU %d reporting step\n", vcpu);
- stopped_vcpu = vcpu;
+ debug("$vCPU %d reporting step\n", vcpuid);
+ stopped_vcpu = vcpuid;
gdb_suspend_vcpus();
}
_gdb_cpu_suspend(vcpu, true);
@@ -871,33 +879,34 @@
}
void
-gdb_cpu_breakpoint(int vcpu, struct vm_exit *vmexit)
+gdb_cpu_breakpoint(struct vcpu *vcpu, struct vm_exit *vmexit)
{
struct breakpoint *bp;
struct vcpu_state *vs;
uint64_t gpa;
- int error;
+ int error, vcpuid;
if (!gdb_active) {
fprintf(stderr, "vm_loop: unexpected VMEXIT_DEBUG\n");
exit(4);
}
+ vcpuid = vcpu_id(vcpu);
pthread_mutex_lock(&gdb_lock);
error = guest_vaddr2paddr(vcpu, vmexit->rip, &gpa);
assert(error == 1);
bp = find_breakpoint(gpa);
if (bp != NULL) {
- vs = &vcpu_state[vcpu];
+ vs = &vcpu_state[vcpuid];
assert(vs->stepping == false);
assert(vs->stepped == false);
assert(vs->hit_swbreak == false);
vs->hit_swbreak = true;
- vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, vmexit->rip);
+ vm_set_register(vcpu, VM_REG_GUEST_RIP, vmexit->rip);
for (;;) {
if (stopped_vcpu == -1) {
- debug("$vCPU %d reporting breakpoint at rip %#lx\n", vcpu,
- vmexit->rip);
- stopped_vcpu = vcpu;
+ debug("$vCPU %d reporting breakpoint at rip %#lx\n",
+ vcpuid, vmexit->rip);
+ stopped_vcpu = vcpuid;
gdb_suspend_vcpus();
}
_gdb_cpu_suspend(vcpu, true);
@@ -914,31 +923,32 @@
}
gdb_cpu_resume(vcpu);
} else {
- debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpu,
+ debug("$vCPU %d injecting breakpoint at rip %#lx\n", vcpuid,
vmexit->rip);
- error = vm_set_register(ctx, vcpu,
- VM_REG_GUEST_ENTRY_INST_LENGTH, vmexit->u.bpt.inst_length);
+ error = vm_set_register(vcpu, VM_REG_GUEST_ENTRY_INST_LENGTH,
+ vmexit->u.bpt.inst_length);
assert(error == 0);
- error = vm_inject_exception(ctx, vcpu, IDT_BP, 0, 0, 0);
+ error = vm_inject_exception(vcpu, IDT_BP, 0, 0, 0);
assert(error == 0);
}
pthread_mutex_unlock(&gdb_lock);
}
static bool
-gdb_step_vcpu(int vcpu)
+gdb_step_vcpu(struct vcpu *vcpu)
{
- int error, val;
+ int error, val, vcpuid;
- debug("$vCPU %d step\n", vcpu);
- error = vm_get_capability(ctx, vcpu, VM_CAP_MTRAP_EXIT, &val);
+ vcpuid = vcpu_id(vcpu);
+ debug("$vCPU %d step\n", vcpuid);
+ error = vm_get_capability(vcpu, VM_CAP_MTRAP_EXIT, &val);
if (error < 0)
return (false);
discard_stop();
- vcpu_state[vcpu].stepping = true;
- vm_resume_cpu(ctx, vcpu);
- CPU_CLR(vcpu, &vcpus_suspended);
+ vcpu_state[vcpuid].stepping = true;
+ vm_resume_cpu(vcpu);
+ CPU_CLR(vcpuid, &vcpus_suspended);
pthread_cond_broadcast(&idle_vcpus);
return (true);
}
@@ -948,7 +958,7 @@
{
assert(pthread_mutex_isowned_np(&gdb_lock));
- vm_resume_cpu(ctx, -1);
+ vm_resume_all_cpus(ctx);
debug("resuming all CPUs\n");
CPU_ZERO(&vcpus_suspended);
pthread_cond_broadcast(&idle_vcpus);
@@ -959,7 +969,7 @@
{
uint64_t regvals[nitems(gdb_regset)];
- if (vm_get_register_set(ctx, cur_vcpu, nitems(gdb_regset),
+ if (vm_get_register_set(vcpus[cur_vcpu], nitems(gdb_regset),
gdb_regset, regvals) == -1) {
send_error(errno);
return;
@@ -998,7 +1008,7 @@
started = false;
while (resid > 0) {
- error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
+ error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
if (error == -1) {
if (started)
finish_packet();
@@ -1050,7 +1060,7 @@
bytes = 2;
else
bytes = 4;
- error = read_mem(ctx, cur_vcpu, gpa, &val,
+ error = read_mem(vcpus[cur_vcpu], gpa, &val,
bytes);
if (error == 0) {
if (!started) {
@@ -1121,7 +1131,7 @@
}
while (resid > 0) {
- error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
+ error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
if (error == -1) {
send_error(errno);
return;
@@ -1170,7 +1180,7 @@
bytes = 4;
val = be32toh(parse_integer(data, 8));
}
- error = write_mem(ctx, cur_vcpu, gpa, val,
+ error = write_mem(vcpus[cur_vcpu], gpa, val,
bytes);
if (error == 0) {
gpa += bytes;
@@ -1201,7 +1211,7 @@
while (!CPU_EMPTY(&mask)) {
vcpu = CPU_FFS(&mask) - 1;
CPU_CLR(vcpu, &mask);
- if (vm_set_capability(ctx, vcpu, VM_CAP_BPT_EXIT,
+ if (vm_set_capability(vcpus[vcpu], VM_CAP_BPT_EXIT,
enable ? 1 : 0) < 0)
return (false);
debug("$vCPU %d %sabled breakpoint exits\n", vcpu,
@@ -1243,7 +1253,7 @@
return;
}
- error = guest_vaddr2paddr(cur_vcpu, gva, &gpa);
+ error = guest_vaddr2paddr(vcpus[cur_vcpu], gva, &gpa);
if (error == -1) {
send_error(errno);
return;
@@ -1587,7 +1597,7 @@
}
/* Don't send a reply until a stop occurs. */
- if (!gdb_step_vcpu(cur_vcpu)) {
+ if (!gdb_step_vcpu(vcpus[cur_vcpu])) {
send_error(EOPNOTSUPP);
break;
}
@@ -1880,6 +1890,7 @@
stopped_vcpu = -1;
TAILQ_INIT(&breakpoints);
+ vcpus = calloc(guest_ncpus, sizeof(*vcpus));
vcpu_state = calloc(guest_ncpus, sizeof(*vcpu_state));
if (wait) {
/*
diff --git a/usr.sbin/bhyve/inout.h b/usr.sbin/bhyve/inout.h
--- a/usr.sbin/bhyve/inout.h
+++ b/usr.sbin/bhyve/inout.h
@@ -33,6 +33,7 @@
#include <sys/linker_set.h>
+struct vcpu;
struct vmctx;
struct vm_exit;
@@ -72,7 +73,7 @@
DATA_SET(inout_port_set, __CONCAT(__inout_port, __LINE__))
void init_inout(void);
-int emulate_inout(struct vmctx *, int vcpu, struct vm_exit *vmexit);
+int emulate_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vmexit);
int register_inout(struct inout_port *iop);
int unregister_inout(struct inout_port *iop);
diff --git a/usr.sbin/bhyve/inout.c b/usr.sbin/bhyve/inout.c
--- a/usr.sbin/bhyve/inout.c
+++ b/usr.sbin/bhyve/inout.c
@@ -104,7 +104,7 @@
}
int
-emulate_inout(struct vmctx *ctx, int vcpu, struct vm_exit *vmexit)
+emulate_inout(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vmexit)
{
int addrsize, bytes, flags, in, port, prot, rep;
uint32_t eax, val;
@@ -162,11 +162,11 @@
if (vie_calculate_gla(vis->paging.cpu_mode,
vis->seg_name, &vis->seg_desc, index, bytes,
addrsize, prot, &gla)) {
- vm_inject_gp(ctx, vcpu);
+ vm_inject_gp(vcpu);
break;
}
- error = vm_copy_setup(ctx, vcpu, &vis->paging, gla,
+ error = vm_copy_setup(vcpu, &vis->paging, gla,
bytes, prot, iov, nitems(iov), &fault);
if (error) {
retval = -1; /* Unrecoverable error */
@@ -178,7 +178,7 @@
if (vie_alignment_check(vis->paging.cpl, bytes,
vis->cr0, vis->rflags, gla)) {
- vm_inject_ac(ctx, vcpu, 0);
+ vm_inject_ac(vcpu, 0);
break;
}
@@ -204,7 +204,7 @@
}
/* Update index register */
- error = vie_update_register(ctx, vcpu, idxreg, index, addrsize);
+ error = vie_update_register(vcpu, idxreg, index, addrsize);
assert(error == 0);
/*
@@ -212,14 +212,14 @@
* prefix.
*/
if (rep) {
- error = vie_update_register(ctx, vcpu, VM_REG_GUEST_RCX,
+ error = vie_update_register(vcpu, VM_REG_GUEST_RCX,
count, addrsize);
assert(error == 0);
}
/* Restart the instruction if more iterations remain */
if (retval == 0 && count != 0) {
- error = vm_restart_instruction(ctx, vcpu);
+ error = vm_restart_instruction(vcpu);
assert(error == 0);
}
} else {
@@ -229,7 +229,7 @@
if (retval == 0 && in) {
eax &= ~vie_size2mask(bytes);
eax |= val & vie_size2mask(bytes);
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX,
+ error = vm_set_register(vcpu, VM_REG_GUEST_RAX,
eax);
assert(error == 0);
}
diff --git a/usr.sbin/bhyve/kernemu_dev.c b/usr.sbin/bhyve/kernemu_dev.c
--- a/usr.sbin/bhyve/kernemu_dev.c
+++ b/usr.sbin/bhyve/kernemu_dev.c
@@ -46,10 +46,10 @@
#include "mem.h"
static int
-apic_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, int size,
+apic_handler(struct vcpu *vcpu, int dir, uint64_t addr, int size,
uint64_t *val, void *arg1 __unused, long arg2 __unused)
{
- if (vm_readwrite_kernemu_device(ctx, vcpu, addr, (dir == MEM_F_WRITE),
+ if (vm_readwrite_kernemu_device(vcpu, addr, (dir == MEM_F_WRITE),
size, val) != 0)
return (errno);
return (0);
diff --git a/usr.sbin/bhyve/mem.h b/usr.sbin/bhyve/mem.h
--- a/usr.sbin/bhyve/mem.h
+++ b/usr.sbin/bhyve/mem.h
@@ -33,9 +33,9 @@
#include <sys/linker_set.h>
-struct vmctx;
+struct vcpu;
-typedef int (*mem_func_t)(struct vmctx *ctx, int vcpu, int dir, uint64_t addr,
+typedef int (*mem_func_t)(struct vcpu *vcpu, int dir, uint64_t addr,
int size, uint64_t *val, void *arg1, long arg2);
struct mem_range {
@@ -53,15 +53,13 @@
#define MEM_F_IMMUTABLE 0x4 /* mem_range cannot be unregistered */
void init_mem(int ncpu);
-int emulate_mem(struct vmctx *, int vcpu, uint64_t paddr, struct vie *vie,
+int emulate_mem(struct vcpu *vcpu, uint64_t paddr, struct vie *vie,
struct vm_guest_paging *paging);
-int read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval,
- int size);
+int read_mem(struct vcpu *vpu, uint64_t gpa, uint64_t *rval, int size);
int register_mem(struct mem_range *memp);
int register_mem_fallback(struct mem_range *memp);
int unregister_mem(struct mem_range *memp);
-int write_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t wval,
- int size);
+int write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size);
#endif /* _MEM_H_ */
diff --git a/usr.sbin/bhyve/mem.c b/usr.sbin/bhyve/mem.c
--- a/usr.sbin/bhyve/mem.c
+++ b/usr.sbin/bhyve/mem.c
@@ -48,6 +48,7 @@
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
+#include <vmmapi.h>
#include "mem.h"
@@ -142,53 +143,53 @@
RB_GENERATE(mmio_rb_tree, mmio_rb_range, mr_link, mmio_rb_range_compare);
-typedef int (mem_cb_t)(struct vmctx *ctx, int vcpu, uint64_t gpa,
- struct mem_range *mr, void *arg);
+typedef int (mem_cb_t)(struct vcpu *vcpu, uint64_t gpa, struct mem_range *mr,
+ void *arg);
static int
-mem_read(void *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
+mem_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size, void *arg)
{
int error;
struct mem_range *mr = arg;
- error = (*mr->handler)(ctx, vcpu, MEM_F_READ, gpa, size,
- rval, mr->arg1, mr->arg2);
+ error = (*mr->handler)(vcpu, MEM_F_READ, gpa, size, rval, mr->arg1,
+ mr->arg2);
return (error);
}
static int
-mem_write(void *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
+mem_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size, void *arg)
{
int error;
struct mem_range *mr = arg;
- error = (*mr->handler)(ctx, vcpu, MEM_F_WRITE, gpa, size,
- &wval, mr->arg1, mr->arg2);
+ error = (*mr->handler)(vcpu, MEM_F_WRITE, gpa, size, &wval, mr->arg1,
+ mr->arg2);
return (error);
}
static int
-access_memory(struct vmctx *ctx, int vcpu, uint64_t paddr, mem_cb_t *cb,
- void *arg)
+access_memory(struct vcpu *vcpu, uint64_t paddr, mem_cb_t *cb, void *arg)
{
struct mmio_rb_range *entry;
- int err, perror, immutable;
+ int err, perror, immutable, vcpuid;
+ vcpuid = vcpu_id(vcpu);
pthread_rwlock_rdlock(&mmio_rwlock);
/*
* First check the per-vCPU cache
*/
- if (mmio_hint[vcpu] &&
- paddr >= mmio_hint[vcpu]->mr_base &&
- paddr <= mmio_hint[vcpu]->mr_end) {
- entry = mmio_hint[vcpu];
+ if (mmio_hint[vcpuid] &&
+ paddr >= mmio_hint[vcpuid]->mr_base &&
+ paddr <= mmio_hint[vcpuid]->mr_end) {
+ entry = mmio_hint[vcpuid];
} else
entry = NULL;
if (entry == NULL) {
if (mmio_rb_lookup(&mmio_rb_root, paddr, &entry) == 0) {
/* Update the per-vCPU cache */
- mmio_hint[vcpu] = entry;
+ mmio_hint[vcpuid] = entry;
} else if (mmio_rb_lookup(&mmio_rb_fallback, paddr, &entry)) {
perror = pthread_rwlock_unlock(&mmio_rwlock);
assert(perror == 0);
@@ -215,14 +216,13 @@
assert(perror == 0);
}
- err = cb(ctx, vcpu, paddr, &entry->mr_param, arg);
+ err = cb(vcpu, paddr, &entry->mr_param, arg);
if (!immutable) {
perror = pthread_rwlock_unlock(&mmio_rwlock);
assert(perror == 0);
}
-
return (err);
}
@@ -232,26 +232,25 @@
};
static int
-emulate_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr,
+emulate_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr,
void *arg)
{
struct emulate_mem_args *ema;
ema = arg;
- return (vmm_emulate_instruction(ctx, vcpu, paddr, ema->vie, ema->paging,
+ return (vmm_emulate_instruction(vcpu, paddr, ema->vie, ema->paging,
mem_read, mem_write, mr));
}
int
-emulate_mem(struct vmctx *ctx, int vcpu, uint64_t paddr, struct vie *vie,
+emulate_mem(struct vcpu *vcpu, uint64_t paddr, struct vie *vie,
struct vm_guest_paging *paging)
-
{
struct emulate_mem_args ema;
ema.vie = vie;
ema.paging = paging;
- return (access_memory(ctx, vcpu, paddr, emulate_mem_cb, &ema));
+ return (access_memory(vcpu, paddr, emulate_mem_cb, &ema));
}
struct rw_mem_args {
@@ -261,36 +260,35 @@
};
static int
-rw_mem_cb(struct vmctx *ctx, int vcpu, uint64_t paddr, struct mem_range *mr,
- void *arg)
+rw_mem_cb(struct vcpu *vcpu, uint64_t paddr, struct mem_range *mr, void *arg)
{
struct rw_mem_args *rma;
rma = arg;
- return (mr->handler(ctx, vcpu, rma->operation, paddr, rma->size,
+ return (mr->handler(vcpu, rma->operation, paddr, rma->size,
rma->val, mr->arg1, mr->arg2));
}
int
-read_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t *rval, int size)
+read_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size)
{
struct rw_mem_args rma;
rma.val = rval;
rma.size = size;
rma.operation = MEM_F_READ;
- return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma));
+ return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
}
int
-write_mem(struct vmctx *ctx, int vcpu, uint64_t gpa, uint64_t wval, int size)
+write_mem(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size)
{
struct rw_mem_args rma;
rma.val = &wval;
rma.size = size;
rma.operation = MEM_F_WRITE;
- return (access_memory(ctx, vcpu, gpa, rw_mem_cb, &rma));
+ return (access_memory(vcpu, gpa, rw_mem_cb, &rma));
}
static int
diff --git a/usr.sbin/bhyve/pci_emul.c b/usr.sbin/bhyve/pci_emul.c
--- a/usr.sbin/bhyve/pci_emul.c
+++ b/usr.sbin/bhyve/pci_emul.c
@@ -476,7 +476,7 @@
}
static int
-pci_emul_mem_handler(struct vmctx *ctx __unused, int vcpu __unused, int dir,
+pci_emul_mem_handler(struct vcpu *vcpu __unused, int dir,
uint64_t addr, int size, uint64_t *val, void *arg1, long arg2)
{
struct pci_devinst *pdi = arg1;
@@ -1278,8 +1278,8 @@
}
static int
-pci_emul_fallback_handler(struct vmctx *ctx __unused, int vcpu __unused,
- int dir, uint64_t addr __unused, int size __unused, uint64_t *val,
+pci_emul_fallback_handler(struct vcpu *vcpu __unused, int dir,
+ uint64_t addr __unused, int size __unused, uint64_t *val,
void *arg1 __unused, long arg2 __unused)
{
/*
@@ -1294,9 +1294,8 @@
}
static int
-pci_emul_ecfg_handler(struct vmctx *ctx __unused, int vcpu __unused, int dir,
- uint64_t addr, int bytes, uint64_t *val, void *arg1 __unused,
- long arg2 __unused)
+pci_emul_ecfg_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr,
+ int bytes, uint64_t *val, void *arg1 __unused, long arg2 __unused)
{
int bus, slot, func, coff, in;
diff --git a/usr.sbin/bhyve/pci_passthru.c b/usr.sbin/bhyve/pci_passthru.c
--- a/usr.sbin/bhyve/pci_passthru.c
+++ b/usr.sbin/bhyve/pci_passthru.c
@@ -445,7 +445,7 @@
/* If the entry is masked, don't set it up */
if ((entry->vector_control & PCIM_MSIX_VCTRL_MASK) == 0 ||
(vector_control & PCIM_MSIX_VCTRL_MASK) == 0) {
- (void)vm_setup_pptdev_msix(sc->psc_pi->pi_vmctx, 0,
+ (void)vm_setup_pptdev_msix(sc->psc_pi->pi_vmctx,
sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
sc->psc_sel.pc_func, index, entry->addr,
entry->msg_data, entry->vector_control);
@@ -966,7 +966,7 @@
if (msicap_access(sc, coff)) {
pci_emul_capwrite(pi, coff, bytes, val, sc->psc_msi.capoff,
PCIY_MSI);
- error = vm_setup_pptdev_msi(pi->pi_vmctx, 0, sc->psc_sel.pc_bus,
+ error = vm_setup_pptdev_msi(pi->pi_vmctx, sc->psc_sel.pc_bus,
sc->psc_sel.pc_dev, sc->psc_sel.pc_func,
pi->pi_msi.addr, pi->pi_msi.msg_data,
pi->pi_msi.maxmsgnum);
@@ -981,7 +981,7 @@
if (pi->pi_msix.enabled) {
msix_table_entries = pi->pi_msix.table_count;
for (i = 0; i < msix_table_entries; i++) {
- error = vm_setup_pptdev_msix(pi->pi_vmctx, 0,
+ error = vm_setup_pptdev_msix(pi->pi_vmctx,
sc->psc_sel.pc_bus, sc->psc_sel.pc_dev,
sc->psc_sel.pc_func, i,
pi->pi_msix.table[i].addr,
diff --git a/usr.sbin/bhyve/pctestdev.c b/usr.sbin/bhyve/pctestdev.c
--- a/usr.sbin/bhyve/pctestdev.c
+++ b/usr.sbin/bhyve/pctestdev.c
@@ -74,7 +74,7 @@
static int pctestdev_debugexit_io(struct vmctx *ctx, int in,
int port, int bytes, uint32_t *eax, void *arg);
-static int pctestdev_iomem_io(struct vmctx *ctx, int vcpu, int dir,
+static int pctestdev_iomem_io(struct vcpu *vcpu, int dir,
uint64_t addr, int size, uint64_t *val, void *arg1,
long arg2);
static int pctestdev_ioport_io(struct vmctx *ctx, int in,
@@ -190,7 +190,7 @@
}
static int
-pctestdev_iomem_io(struct vmctx *ctx __unused, int vcpu __unused, int dir,
+pctestdev_iomem_io(struct vcpu *vcpu __unused, int dir,
uint64_t addr, int size, uint64_t *val, void *arg1 __unused,
long arg2 __unused)
{
diff --git a/usr.sbin/bhyve/snapshot.c b/usr.sbin/bhyve/snapshot.c
--- a/usr.sbin/bhyve/snapshot.c
+++ b/usr.sbin/bhyve/snapshot.c
@@ -1296,7 +1296,7 @@
pthread_mutex_lock(&vcpu_lock);
checkpoint_active = true;
- vm_suspend_cpu(ctx, -1);
+ vm_suspend_all_cpus(ctx);
while (CPU_CMP(&vcpus_active, &vcpus_suspended) != 0)
pthread_cond_wait(&vcpus_idle, &vcpu_lock);
pthread_mutex_unlock(&vcpu_lock);
@@ -1309,7 +1309,7 @@
pthread_mutex_lock(&vcpu_lock);
checkpoint_active = false;
pthread_mutex_unlock(&vcpu_lock);
- vm_resume_cpu(ctx, -1);
+ vm_resume_all_cpus(ctx);
pthread_cond_broadcast(&vcpus_can_run);
}
diff --git a/usr.sbin/bhyve/spinup_ap.h b/usr.sbin/bhyve/spinup_ap.h
--- a/usr.sbin/bhyve/spinup_ap.h
+++ b/usr.sbin/bhyve/spinup_ap.h
@@ -31,6 +31,6 @@
#ifndef _SPINUP_AP_H_
#define _SPINUP_AP_H_
-void spinup_ap(struct vmctx *ctx, int newcpu, uint64_t rip);
+void spinup_ap(struct vcpu *newcpu, uint64_t rip);
#endif
diff --git a/usr.sbin/bhyve/spinup_ap.c b/usr.sbin/bhyve/spinup_ap.c
--- a/usr.sbin/bhyve/spinup_ap.c
+++ b/usr.sbin/bhyve/spinup_ap.c
@@ -45,7 +45,7 @@
#include "spinup_ap.h"
static void
-spinup_ap_realmode(struct vmctx *ctx, int newcpu, uint64_t rip)
+spinup_ap_realmode(struct vcpu *newcpu, uint64_t rip)
{
int vector, error;
uint16_t cs;
@@ -58,35 +58,32 @@
* Update the %cs and %rip of the guest so that it starts
* executing real mode code at at 'vector << 12'.
*/
- error = vm_set_register(ctx, newcpu, VM_REG_GUEST_RIP, 0);
+ error = vm_set_register(newcpu, VM_REG_GUEST_RIP, 0);
assert(error == 0);
- error = vm_get_desc(ctx, newcpu, VM_REG_GUEST_CS, &desc_base,
+ error = vm_get_desc(newcpu, VM_REG_GUEST_CS, &desc_base,
&desc_limit, &desc_access);
assert(error == 0);
desc_base = vector << PAGE_SHIFT;
- error = vm_set_desc(ctx, newcpu, VM_REG_GUEST_CS,
+ error = vm_set_desc(newcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
assert(error == 0);
cs = (vector << PAGE_SHIFT) >> 4;
- error = vm_set_register(ctx, newcpu, VM_REG_GUEST_CS, cs);
+ error = vm_set_register(newcpu, VM_REG_GUEST_CS, cs);
assert(error == 0);
}
void
-spinup_ap(struct vmctx *ctx, int newcpu, uint64_t rip)
+spinup_ap(struct vcpu *newcpu, uint64_t rip)
{
int error;
- assert(newcpu != 0);
- assert(newcpu < guest_ncpus);
-
- error = vcpu_reset(ctx, newcpu);
+ error = vcpu_reset(newcpu);
assert(error == 0);
- spinup_ap_realmode(ctx, newcpu, rip);
+ spinup_ap_realmode(newcpu, rip);
- vm_resume_cpu(ctx, newcpu);
+ vm_resume_cpu(newcpu);
}
diff --git a/usr.sbin/bhyve/task_switch.c b/usr.sbin/bhyve/task_switch.c
--- a/usr.sbin/bhyve/task_switch.c
+++ b/usr.sbin/bhyve/task_switch.c
@@ -101,22 +101,22 @@
#define TSS_BUSY(type) (((type) & 0x2) != 0)
static uint64_t
-GETREG(struct vmctx *ctx, int vcpu, int reg)
+GETREG(struct vcpu *vcpu, int reg)
{
uint64_t val;
int error;
- error = vm_get_register(ctx, vcpu, reg, &val);
+ error = vm_get_register(vcpu, reg, &val);
assert(error == 0);
return (val);
}
static void
-SETREG(struct vmctx *ctx, int vcpu, int reg, uint64_t val)
+SETREG(struct vcpu *vcpu, int reg, uint64_t val)
{
int error;
- error = vm_set_register(ctx, vcpu, reg, val);
+ error = vm_set_register(vcpu, reg, val);
assert(error == 0);
}
@@ -152,7 +152,7 @@
* Bit 2(GDT/LDT) has the usual interpretation of Table Indicator (TI).
*/
static void
-sel_exception(struct vmctx *ctx, int vcpu, int vector, uint16_t sel, int ext)
+sel_exception(struct vcpu *vcpu, int vector, uint16_t sel, int ext)
{
/*
* Bit 2 from the selector is retained as-is in the error code.
@@ -166,7 +166,7 @@
sel &= ~0x3;
if (ext)
sel |= 0x1;
- vm_inject_fault(ctx, vcpu, vector, 1, sel);
+ vm_inject_fault(vcpu, vector, 1, sel);
}
/*
@@ -174,14 +174,14 @@
* and non-zero otherwise.
*/
static int
-desc_table_limit_check(struct vmctx *ctx, int vcpu, uint16_t sel)
+desc_table_limit_check(struct vcpu *vcpu, uint16_t sel)
{
uint64_t base;
uint32_t limit, access;
int error, reg;
reg = ISLDT(sel) ? VM_REG_GUEST_LDTR : VM_REG_GUEST_GDTR;
- error = vm_get_desc(ctx, vcpu, reg, &base, &limit, &access);
+ error = vm_get_desc(vcpu, reg, &base, &limit, &access);
assert(error == 0);
if (reg == VM_REG_GUEST_LDTR) {
@@ -204,7 +204,7 @@
* Returns -1 otherwise.
*/
static int
-desc_table_rw(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+desc_table_rw(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint16_t sel, struct user_segment_descriptor *desc, bool doread,
int *faultptr)
{
@@ -214,11 +214,11 @@
int error, reg;
reg = ISLDT(sel) ? VM_REG_GUEST_LDTR : VM_REG_GUEST_GDTR;
- error = vm_get_desc(ctx, vcpu, reg, &base, &limit, &access);
+ error = vm_get_desc(vcpu, reg, &base, &limit, &access);
assert(error == 0);
assert(limit >= SEL_LIMIT(sel));
- error = vm_copy_setup(ctx, vcpu, paging, base + SEL_START(sel),
+ error = vm_copy_setup(vcpu, paging, base + SEL_START(sel),
sizeof(*desc), doread ? PROT_READ : PROT_WRITE, iov, nitems(iov),
faultptr);
if (error || *faultptr)
@@ -232,17 +232,17 @@
}
static int
-desc_table_read(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+desc_table_read(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint16_t sel, struct user_segment_descriptor *desc, int *faultptr)
{
- return (desc_table_rw(ctx, vcpu, paging, sel, desc, true, faultptr));
+ return (desc_table_rw(vcpu, paging, sel, desc, true, faultptr));
}
static int
-desc_table_write(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+desc_table_write(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint16_t sel, struct user_segment_descriptor *desc, int *faultptr)
{
- return (desc_table_rw(ctx, vcpu, paging, sel, desc, false, faultptr));
+ return (desc_table_rw(vcpu, paging, sel, desc, false, faultptr));
}
/*
@@ -253,7 +253,7 @@
* Returns -1 otherwise.
*/
static int
-read_tss_descriptor(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
+read_tss_descriptor(struct vcpu *vcpu, struct vm_task_switch *ts,
uint16_t sel, struct user_segment_descriptor *desc, int *faultptr)
{
struct vm_guest_paging sup_paging;
@@ -263,17 +263,17 @@
assert(IDXSEL(sel) != 0);
/* Fetch the new TSS descriptor */
- if (desc_table_limit_check(ctx, vcpu, sel)) {
+ if (desc_table_limit_check(vcpu, sel)) {
if (ts->reason == TSR_IRET)
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
else
- sel_exception(ctx, vcpu, IDT_GP, sel, ts->ext);
+ sel_exception(vcpu, IDT_GP, sel, ts->ext);
return (1);
}
sup_paging = ts->paging;
sup_paging.cpl = 0; /* implicit supervisor mode */
- error = desc_table_read(ctx, vcpu, &sup_paging, sel, desc, faultptr);
+ error = desc_table_read(vcpu, &sup_paging, sel, desc, faultptr);
return (error);
}
@@ -309,7 +309,7 @@
* Validate the descriptor 'seg_desc' associated with 'segment'.
*/
static int
-validate_seg_desc(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
+validate_seg_desc(struct vcpu *vcpu, struct vm_task_switch *ts,
int segment, struct seg_desc *seg_desc, int *faultptr)
{
struct vm_guest_paging sup_paging;
@@ -341,17 +341,17 @@
}
/* Get the segment selector */
- sel = GETREG(ctx, vcpu, segment);
+ sel = GETREG(vcpu, segment);
/* LDT selector must point into the GDT */
if (ldtseg && ISLDT(sel)) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
/* Descriptor table limit check */
- if (desc_table_limit_check(ctx, vcpu, sel)) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ if (desc_table_limit_check(vcpu, sel)) {
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
@@ -359,7 +359,7 @@
if (IDXSEL(sel) == 0) {
/* Code and stack segment selectors cannot be NULL */
if (codeseg || stackseg) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
seg_desc->base = 0;
@@ -371,7 +371,7 @@
/* Read the descriptor from the GDT/LDT */
sup_paging = ts->paging;
sup_paging.cpl = 0; /* implicit supervisor mode */
- error = desc_table_read(ctx, vcpu, &sup_paging, sel, &usd, faultptr);
+ error = desc_table_read(vcpu, &sup_paging, sel, &usd, faultptr);
if (error || *faultptr)
return (error);
@@ -380,7 +380,7 @@
(codeseg && !code_desc(usd.sd_type)) ||
(dataseg && !data_desc(usd.sd_type)) ||
(stackseg && !stack_desc(usd.sd_type))) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
@@ -392,17 +392,17 @@
idtvec = IDT_SS;
else
idtvec = IDT_NP;
- sel_exception(ctx, vcpu, idtvec, sel, ts->ext);
+ sel_exception(vcpu, idtvec, sel, ts->ext);
return (1);
}
- cs = GETREG(ctx, vcpu, VM_REG_GUEST_CS);
+ cs = GETREG(vcpu, VM_REG_GUEST_CS);
cpl = cs & SEL_RPL_MASK;
rpl = sel & SEL_RPL_MASK;
dpl = usd.sd_dpl;
if (stackseg && (rpl != cpl || dpl != cpl)) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
@@ -410,7 +410,7 @@
conforming = (usd.sd_type & 0x4) ? true : false;
if ((conforming && (cpl < dpl)) ||
(!conforming && (cpl != dpl))) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
}
@@ -426,7 +426,7 @@
conforming = false;
if (!conforming && (rpl > dpl || cpl > dpl)) {
- sel_exception(ctx, vcpu, IDT_TS, sel, ts->ext);
+ sel_exception(vcpu, IDT_TS, sel, ts->ext);
return (1);
}
}
@@ -435,30 +435,30 @@
}
static void
-tss32_save(struct vmctx *ctx, int vcpu, struct vm_task_switch *task_switch,
+tss32_save(struct vcpu *vcpu, struct vm_task_switch *task_switch,
uint32_t eip, struct tss32 *tss, struct iovec *iov)
{
/* General purpose registers */
- tss->tss_eax = GETREG(ctx, vcpu, VM_REG_GUEST_RAX);
- tss->tss_ecx = GETREG(ctx, vcpu, VM_REG_GUEST_RCX);
- tss->tss_edx = GETREG(ctx, vcpu, VM_REG_GUEST_RDX);
- tss->tss_ebx = GETREG(ctx, vcpu, VM_REG_GUEST_RBX);
- tss->tss_esp = GETREG(ctx, vcpu, VM_REG_GUEST_RSP);
- tss->tss_ebp = GETREG(ctx, vcpu, VM_REG_GUEST_RBP);
- tss->tss_esi = GETREG(ctx, vcpu, VM_REG_GUEST_RSI);
- tss->tss_edi = GETREG(ctx, vcpu, VM_REG_GUEST_RDI);
+ tss->tss_eax = GETREG(vcpu, VM_REG_GUEST_RAX);
+ tss->tss_ecx = GETREG(vcpu, VM_REG_GUEST_RCX);
+ tss->tss_edx = GETREG(vcpu, VM_REG_GUEST_RDX);
+ tss->tss_ebx = GETREG(vcpu, VM_REG_GUEST_RBX);
+ tss->tss_esp = GETREG(vcpu, VM_REG_GUEST_RSP);
+ tss->tss_ebp = GETREG(vcpu, VM_REG_GUEST_RBP);
+ tss->tss_esi = GETREG(vcpu, VM_REG_GUEST_RSI);
+ tss->tss_edi = GETREG(vcpu, VM_REG_GUEST_RDI);
/* Segment selectors */
- tss->tss_es = GETREG(ctx, vcpu, VM_REG_GUEST_ES);
- tss->tss_cs = GETREG(ctx, vcpu, VM_REG_GUEST_CS);
- tss->tss_ss = GETREG(ctx, vcpu, VM_REG_GUEST_SS);
- tss->tss_ds = GETREG(ctx, vcpu, VM_REG_GUEST_DS);
- tss->tss_fs = GETREG(ctx, vcpu, VM_REG_GUEST_FS);
- tss->tss_gs = GETREG(ctx, vcpu, VM_REG_GUEST_GS);
+ tss->tss_es = GETREG(vcpu, VM_REG_GUEST_ES);
+ tss->tss_cs = GETREG(vcpu, VM_REG_GUEST_CS);
+ tss->tss_ss = GETREG(vcpu, VM_REG_GUEST_SS);
+ tss->tss_ds = GETREG(vcpu, VM_REG_GUEST_DS);
+ tss->tss_fs = GETREG(vcpu, VM_REG_GUEST_FS);
+ tss->tss_gs = GETREG(vcpu, VM_REG_GUEST_GS);
/* eflags and eip */
- tss->tss_eflags = GETREG(ctx, vcpu, VM_REG_GUEST_RFLAGS);
+ tss->tss_eflags = GETREG(vcpu, VM_REG_GUEST_RFLAGS);
if (task_switch->reason == TSR_IRET)
tss->tss_eflags &= ~PSL_NT;
tss->tss_eip = eip;
@@ -468,11 +468,11 @@
}
static void
-update_seg_desc(struct vmctx *ctx, int vcpu, int reg, struct seg_desc *sd)
+update_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *sd)
{
int error;
- error = vm_set_desc(ctx, vcpu, reg, sd->base, sd->limit, sd->access);
+ error = vm_set_desc(vcpu, reg, sd->base, sd->limit, sd->access);
assert(error == 0);
}
@@ -480,7 +480,7 @@
* Update the vcpu registers to reflect the state of the new task.
*/
static int
-tss32_restore(struct vmctx *ctx, int vcpu, struct vm_task_switch *ts,
+tss32_restore(struct vmctx *ctx, struct vcpu *vcpu, struct vm_task_switch *ts,
uint16_t ot_sel, struct tss32 *tss, struct iovec *iov, int *faultptr)
{
struct seg_desc seg_desc, seg_desc2;
@@ -500,7 +500,7 @@
eflags |= PSL_NT;
/* LDTR */
- SETREG(ctx, vcpu, VM_REG_GUEST_LDTR, tss->tss_ldt);
+ SETREG(vcpu, VM_REG_GUEST_LDTR, tss->tss_ldt);
/* PBDR */
if (ts->paging.paging_mode != PAGING_MODE_FLAT) {
@@ -520,40 +520,40 @@
*/
reserved = ~maxphyaddr | 0x1E6;
if (pdpte[i] & reserved) {
- vm_inject_gp(ctx, vcpu);
+ vm_inject_gp(vcpu);
return (1);
}
}
- SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE0, pdpte[0]);
- SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE1, pdpte[1]);
- SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE2, pdpte[2]);
- SETREG(ctx, vcpu, VM_REG_GUEST_PDPTE3, pdpte[3]);
+ SETREG(vcpu, VM_REG_GUEST_PDPTE0, pdpte[0]);
+ SETREG(vcpu, VM_REG_GUEST_PDPTE1, pdpte[1]);
+ SETREG(vcpu, VM_REG_GUEST_PDPTE2, pdpte[2]);
+ SETREG(vcpu, VM_REG_GUEST_PDPTE3, pdpte[3]);
}
- SETREG(ctx, vcpu, VM_REG_GUEST_CR3, tss->tss_cr3);
+ SETREG(vcpu, VM_REG_GUEST_CR3, tss->tss_cr3);
ts->paging.cr3 = tss->tss_cr3;
}
/* eflags and eip */
- SETREG(ctx, vcpu, VM_REG_GUEST_RFLAGS, eflags);
- SETREG(ctx, vcpu, VM_REG_GUEST_RIP, tss->tss_eip);
+ SETREG(vcpu, VM_REG_GUEST_RFLAGS, eflags);
+ SETREG(vcpu, VM_REG_GUEST_RIP, tss->tss_eip);
/* General purpose registers */
- SETREG(ctx, vcpu, VM_REG_GUEST_RAX, tss->tss_eax);
- SETREG(ctx, vcpu, VM_REG_GUEST_RCX, tss->tss_ecx);
- SETREG(ctx, vcpu, VM_REG_GUEST_RDX, tss->tss_edx);
- SETREG(ctx, vcpu, VM_REG_GUEST_RBX, tss->tss_ebx);
- SETREG(ctx, vcpu, VM_REG_GUEST_RSP, tss->tss_esp);
- SETREG(ctx, vcpu, VM_REG_GUEST_RBP, tss->tss_ebp);
- SETREG(ctx, vcpu, VM_REG_GUEST_RSI, tss->tss_esi);
- SETREG(ctx, vcpu, VM_REG_GUEST_RDI, tss->tss_edi);
+ SETREG(vcpu, VM_REG_GUEST_RAX, tss->tss_eax);
+ SETREG(vcpu, VM_REG_GUEST_RCX, tss->tss_ecx);
+ SETREG(vcpu, VM_REG_GUEST_RDX, tss->tss_edx);
+ SETREG(vcpu, VM_REG_GUEST_RBX, tss->tss_ebx);
+ SETREG(vcpu, VM_REG_GUEST_RSP, tss->tss_esp);
+ SETREG(vcpu, VM_REG_GUEST_RBP, tss->tss_ebp);
+ SETREG(vcpu, VM_REG_GUEST_RSI, tss->tss_esi);
+ SETREG(vcpu, VM_REG_GUEST_RDI, tss->tss_edi);
/* Segment selectors */
- SETREG(ctx, vcpu, VM_REG_GUEST_ES, tss->tss_es);
- SETREG(ctx, vcpu, VM_REG_GUEST_CS, tss->tss_cs);
- SETREG(ctx, vcpu, VM_REG_GUEST_SS, tss->tss_ss);
- SETREG(ctx, vcpu, VM_REG_GUEST_DS, tss->tss_ds);
- SETREG(ctx, vcpu, VM_REG_GUEST_FS, tss->tss_fs);
- SETREG(ctx, vcpu, VM_REG_GUEST_GS, tss->tss_gs);
+ SETREG(vcpu, VM_REG_GUEST_ES, tss->tss_es);
+ SETREG(vcpu, VM_REG_GUEST_CS, tss->tss_cs);
+ SETREG(vcpu, VM_REG_GUEST_SS, tss->tss_ss);
+ SETREG(vcpu, VM_REG_GUEST_DS, tss->tss_ds);
+ SETREG(vcpu, VM_REG_GUEST_FS, tss->tss_fs);
+ SETREG(vcpu, VM_REG_GUEST_GS, tss->tss_gs);
/*
* If this is a nested task then write out the new TSS to update
@@ -563,11 +563,11 @@
vm_copyout(tss, iov, sizeof(*tss));
/* Validate segment descriptors */
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_LDTR, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_LDTR, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_LDTR, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_LDTR, &seg_desc);
/*
* Section "Checks on Guest Segment Registers", Intel SDM, Vol 3.
@@ -578,42 +578,42 @@
* VM-entry checks so the guest can handle any exception injected
* during task switch emulation.
*/
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_CS, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_CS, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_SS, &seg_desc2,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_SS, &seg_desc2,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_CS, &seg_desc);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_SS, &seg_desc2);
+ update_seg_desc(vcpu, VM_REG_GUEST_CS, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_SS, &seg_desc2);
ts->paging.cpl = tss->tss_cs & SEL_RPL_MASK;
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_DS, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_DS, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_DS, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_DS, &seg_desc);
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_ES, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_ES, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_ES, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_ES, &seg_desc);
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_FS, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_FS, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_FS, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_FS, &seg_desc);
- error = validate_seg_desc(ctx, vcpu, ts, VM_REG_GUEST_GS, &seg_desc,
+ error = validate_seg_desc(vcpu, ts, VM_REG_GUEST_GS, &seg_desc,
faultptr);
if (error || *faultptr)
return (error);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_GS, &seg_desc);
+ update_seg_desc(vcpu, VM_REG_GUEST_GS, &seg_desc);
return (0);
}
@@ -624,7 +624,7 @@
* code to be saved (e.g. #PF).
*/
static int
-push_errcode(struct vmctx *ctx, int vcpu, struct vm_guest_paging *paging,
+push_errcode(struct vcpu *vcpu, struct vm_guest_paging *paging,
int task_type, uint32_t errcode, int *faultptr)
{
struct iovec iov[2];
@@ -636,11 +636,11 @@
*faultptr = 0;
- cr0 = GETREG(ctx, vcpu, VM_REG_GUEST_CR0);
- rflags = GETREG(ctx, vcpu, VM_REG_GUEST_RFLAGS);
- stacksel = GETREG(ctx, vcpu, VM_REG_GUEST_SS);
+ cr0 = GETREG(vcpu, VM_REG_GUEST_CR0);
+ rflags = GETREG(vcpu, VM_REG_GUEST_RFLAGS);
+ stacksel = GETREG(vcpu, VM_REG_GUEST_SS);
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_SS, &seg_desc.base,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_SS, &seg_desc.base,
&seg_desc.limit, &seg_desc.access);
assert(error == 0);
@@ -664,29 +664,29 @@
else
stacksize = 2;
- esp = GETREG(ctx, vcpu, VM_REG_GUEST_RSP);
+ esp = GETREG(vcpu, VM_REG_GUEST_RSP);
esp -= bytes;
if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS,
&seg_desc, esp, bytes, stacksize, PROT_WRITE, &gla)) {
- sel_exception(ctx, vcpu, IDT_SS, stacksel, 1);
+ sel_exception(vcpu, IDT_SS, stacksel, 1);
*faultptr = 1;
return (0);
}
if (vie_alignment_check(paging->cpl, bytes, cr0, rflags, gla)) {
- vm_inject_ac(ctx, vcpu, 1);
+ vm_inject_ac(vcpu, 1);
*faultptr = 1;
return (0);
}
- error = vm_copy_setup(ctx, vcpu, paging, gla, bytes, PROT_WRITE,
+ error = vm_copy_setup(vcpu, paging, gla, bytes, PROT_WRITE,
iov, nitems(iov), faultptr);
if (error || *faultptr)
return (error);
vm_copyout(&errcode, iov, bytes);
- SETREG(ctx, vcpu, VM_REG_GUEST_RSP, esp);
+ SETREG(vcpu, VM_REG_GUEST_RSP, esp);
return (0);
}
@@ -704,7 +704,7 @@
} while (0)
int
-vmexit_task_switch(struct vmctx *ctx, struct vm_exit *vmexit, int *pvcpu)
+vmexit_task_switch(struct vmctx *ctx, struct vcpu *vcpu, struct vm_exit *vmexit)
{
struct seg_desc nt;
struct tss32 oldtss, newtss;
@@ -714,7 +714,7 @@
struct iovec nt_iov[2], ot_iov[2];
uint64_t cr0, ot_base;
uint32_t eip, ot_lim, access;
- int error, ext, fault, minlimit, nt_type, ot_type, vcpu;
+ int error, ext, fault, minlimit, nt_type, ot_type;
enum task_switch_reason reason;
uint16_t nt_sel, ot_sel;
@@ -723,7 +723,6 @@
ext = vmexit->u.task_switch.ext;
reason = vmexit->u.task_switch.reason;
paging = &vmexit->u.task_switch.paging;
- vcpu = *pvcpu;
assert(paging->cpu_mode == CPU_MODE_PROTECTED);
@@ -742,7 +741,7 @@
sup_paging.cpl = 0; /* implicit supervisor mode */
/* Fetch the new TSS descriptor */
- error = read_tss_descriptor(ctx, vcpu, task_switch, nt_sel, &nt_desc,
+ error = read_tss_descriptor(vcpu, task_switch, nt_sel, &nt_desc,
&fault);
CHKERR(error, fault);
@@ -752,13 +751,13 @@
nt_type = SEG_DESC_TYPE(nt.access);
if (nt_type != SDT_SYS386BSY && nt_type != SDT_SYS386TSS &&
nt_type != SDT_SYS286BSY && nt_type != SDT_SYS286TSS) {
- sel_exception(ctx, vcpu, IDT_TS, nt_sel, ext);
+ sel_exception(vcpu, IDT_TS, nt_sel, ext);
goto done;
}
/* TSS descriptor must have present bit set */
if (!SEG_DESC_PRESENT(nt.access)) {
- sel_exception(ctx, vcpu, IDT_NP, nt_sel, ext);
+ sel_exception(vcpu, IDT_NP, nt_sel, ext);
goto done;
}
@@ -775,13 +774,13 @@
assert(minlimit > 0);
if (nt.limit < (unsigned int)minlimit) {
- sel_exception(ctx, vcpu, IDT_TS, nt_sel, ext);
+ sel_exception(vcpu, IDT_TS, nt_sel, ext);
goto done;
}
/* TSS must be busy if task switch is due to IRET */
if (reason == TSR_IRET && !TSS_BUSY(nt_type)) {
- sel_exception(ctx, vcpu, IDT_TS, nt_sel, ext);
+ sel_exception(vcpu, IDT_TS, nt_sel, ext);
goto done;
}
@@ -790,18 +789,18 @@
* CALL, JMP, exception or interrupt.
*/
if (reason != TSR_IRET && TSS_BUSY(nt_type)) {
- sel_exception(ctx, vcpu, IDT_GP, nt_sel, ext);
+ sel_exception(vcpu, IDT_GP, nt_sel, ext);
goto done;
}
/* Fetch the new TSS */
- error = vm_copy_setup(ctx, vcpu, &sup_paging, nt.base, minlimit + 1,
+ error = vm_copy_setup(vcpu, &sup_paging, nt.base, minlimit + 1,
PROT_READ | PROT_WRITE, nt_iov, nitems(nt_iov), &fault);
CHKERR(error, fault);
vm_copyin(nt_iov, &newtss, minlimit + 1);
/* Get the old TSS selector from the guest's task register */
- ot_sel = GETREG(ctx, vcpu, VM_REG_GUEST_TR);
+ ot_sel = GETREG(vcpu, VM_REG_GUEST_TR);
if (ISLDT(ot_sel) || IDXSEL(ot_sel) == 0) {
/*
* This might happen if a task switch was attempted without
@@ -809,12 +808,12 @@
* TR would contain the values from power-on:
* (sel = 0, base = 0, limit = 0xffff).
*/
- sel_exception(ctx, vcpu, IDT_TS, ot_sel, task_switch->ext);
+ sel_exception(vcpu, IDT_TS, ot_sel, task_switch->ext);
goto done;
}
/* Get the old TSS base and limit from the guest's task register */
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_TR, &ot_base, &ot_lim,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_TR, &ot_base, &ot_lim,
&access);
assert(error == 0);
assert(!SEG_DESC_UNUSABLE(access) && SEG_DESC_PRESENT(access));
@@ -822,12 +821,12 @@
assert(ot_type == SDT_SYS386BSY || ot_type == SDT_SYS286BSY);
/* Fetch the old TSS descriptor */
- error = read_tss_descriptor(ctx, vcpu, task_switch, ot_sel, &ot_desc,
+ error = read_tss_descriptor(vcpu, task_switch, ot_sel, &ot_desc,
&fault);
CHKERR(error, fault);
/* Get the old TSS */
- error = vm_copy_setup(ctx, vcpu, &sup_paging, ot_base, minlimit + 1,
+ error = vm_copy_setup(vcpu, &sup_paging, ot_base, minlimit + 1,
PROT_READ | PROT_WRITE, ot_iov, nitems(ot_iov), &fault);
CHKERR(error, fault);
vm_copyin(ot_iov, &oldtss, minlimit + 1);
@@ -838,7 +837,7 @@
*/
if (reason == TSR_IRET || reason == TSR_JMP) {
ot_desc.sd_type &= ~0x2;
- error = desc_table_write(ctx, vcpu, &sup_paging, ot_sel,
+ error = desc_table_write(vcpu, &sup_paging, ot_sel,
&ot_desc, &fault);
CHKERR(error, fault);
}
@@ -849,7 +848,7 @@
}
/* Save processor state in old TSS */
- tss32_save(ctx, vcpu, task_switch, eip, &oldtss, ot_iov);
+ tss32_save(vcpu, task_switch, eip, &oldtss, ot_iov);
/*
* If the task switch was triggered for any reason other than IRET
@@ -857,28 +856,28 @@
*/
if (reason != TSR_IRET) {
nt_desc.sd_type |= 0x2;
- error = desc_table_write(ctx, vcpu, &sup_paging, nt_sel,
+ error = desc_table_write(vcpu, &sup_paging, nt_sel,
&nt_desc, &fault);
CHKERR(error, fault);
}
/* Update task register to point at the new TSS */
- SETREG(ctx, vcpu, VM_REG_GUEST_TR, nt_sel);
+ SETREG(vcpu, VM_REG_GUEST_TR, nt_sel);
/* Update the hidden descriptor state of the task register */
nt = usd_to_seg_desc(&nt_desc);
- update_seg_desc(ctx, vcpu, VM_REG_GUEST_TR, &nt);
+ update_seg_desc(vcpu, VM_REG_GUEST_TR, &nt);
/* Set CR0.TS */
- cr0 = GETREG(ctx, vcpu, VM_REG_GUEST_CR0);
- SETREG(ctx, vcpu, VM_REG_GUEST_CR0, cr0 | CR0_TS);
+ cr0 = GETREG(vcpu, VM_REG_GUEST_CR0);
+ SETREG(vcpu, VM_REG_GUEST_CR0, cr0 | CR0_TS);
/*
* We are now committed to the task switch. Any exceptions encountered
* after this point will be handled in the context of the new task and
* the saved instruction pointer will belong to the new task.
*/
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, newtss.tss_eip);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RIP, newtss.tss_eip);
assert(error == 0);
/* Load processor state from new TSS */
@@ -894,7 +893,7 @@
if (task_switch->errcode_valid) {
assert(task_switch->ext);
assert(task_switch->reason == TSR_IDT_GATE);
- error = push_errcode(ctx, vcpu, &task_switch->paging, nt_type,
+ error = push_errcode(vcpu, &task_switch->paging, nt_type,
task_switch->errcode, &fault);
CHKERR(error, fault);
}
@@ -930,7 +929,7 @@
* exitintinfo.
*/
if (task_switch->reason == TSR_IDT_GATE) {
- error = vm_set_intinfo(ctx, vcpu, 0);
+ error = vm_set_intinfo(vcpu, 0);
assert(error == 0);
}
diff --git a/usr.sbin/bhyve/vga.c b/usr.sbin/bhyve/vga.c
--- a/usr.sbin/bhyve/vga.c
+++ b/usr.sbin/bhyve/vga.c
@@ -339,7 +339,7 @@
}
static uint64_t
-vga_mem_rd_handler(struct vmctx *ctx __unused, uint64_t addr, void *arg1)
+vga_mem_rd_handler(uint64_t addr, void *arg1)
{
struct vga_softc *sc = arg1;
uint8_t map_sel;
@@ -399,8 +399,7 @@
}
static void
-vga_mem_wr_handler(struct vmctx *ctx __unused, uint64_t addr, uint8_t val,
- void *arg1)
+vga_mem_wr_handler(uint64_t addr, uint8_t val, void *arg1)
{
struct vga_softc *sc = arg1;
uint8_t c0, c1, c2, c3;
@@ -654,59 +653,59 @@
}
static int
-vga_mem_handler(struct vmctx *ctx, int vcpu __unused, int dir, uint64_t addr,
- int size, uint64_t *val, void *arg1, long arg2 __unused)
+vga_mem_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr, int size,
+ uint64_t *val, void *arg1, long arg2 __unused)
{
if (dir == MEM_F_WRITE) {
switch (size) {
case 1:
- vga_mem_wr_handler(ctx, addr, *val, arg1);
+ vga_mem_wr_handler(addr, *val, arg1);
break;
case 2:
- vga_mem_wr_handler(ctx, addr, *val, arg1);
- vga_mem_wr_handler(ctx, addr + 1, *val >> 8, arg1);
+ vga_mem_wr_handler(addr, *val, arg1);
+ vga_mem_wr_handler(addr + 1, *val >> 8, arg1);
break;
case 4:
- vga_mem_wr_handler(ctx, addr, *val, arg1);
- vga_mem_wr_handler(ctx, addr + 1, *val >> 8, arg1);
- vga_mem_wr_handler(ctx, addr + 2, *val >> 16, arg1);
- vga_mem_wr_handler(ctx, addr + 3, *val >> 24, arg1);
+ vga_mem_wr_handler(addr, *val, arg1);
+ vga_mem_wr_handler(addr + 1, *val >> 8, arg1);
+ vga_mem_wr_handler(addr + 2, *val >> 16, arg1);
+ vga_mem_wr_handler(addr + 3, *val >> 24, arg1);
break;
case 8:
- vga_mem_wr_handler(ctx, addr, *val, arg1);
- vga_mem_wr_handler(ctx, addr + 1, *val >> 8, arg1);
- vga_mem_wr_handler(ctx, addr + 2, *val >> 16, arg1);
- vga_mem_wr_handler(ctx, addr + 3, *val >> 24, arg1);
- vga_mem_wr_handler(ctx, addr + 4, *val >> 32, arg1);
- vga_mem_wr_handler(ctx, addr + 5, *val >> 40, arg1);
- vga_mem_wr_handler(ctx, addr + 6, *val >> 48, arg1);
- vga_mem_wr_handler(ctx, addr + 7, *val >> 56, arg1);
+ vga_mem_wr_handler(addr, *val, arg1);
+ vga_mem_wr_handler(addr + 1, *val >> 8, arg1);
+ vga_mem_wr_handler(addr + 2, *val >> 16, arg1);
+ vga_mem_wr_handler(addr + 3, *val >> 24, arg1);
+ vga_mem_wr_handler(addr + 4, *val >> 32, arg1);
+ vga_mem_wr_handler(addr + 5, *val >> 40, arg1);
+ vga_mem_wr_handler(addr + 6, *val >> 48, arg1);
+ vga_mem_wr_handler(addr + 7, *val >> 56, arg1);
break;
}
} else {
switch (size) {
case 1:
- *val = vga_mem_rd_handler(ctx, addr, arg1);
+ *val = vga_mem_rd_handler(addr, arg1);
break;
case 2:
- *val = vga_mem_rd_handler(ctx, addr, arg1);
- *val |= vga_mem_rd_handler(ctx, addr + 1, arg1) << 8;
+ *val = vga_mem_rd_handler(addr, arg1);
+ *val |= vga_mem_rd_handler(addr + 1, arg1) << 8;
break;
case 4:
- *val = vga_mem_rd_handler(ctx, addr, arg1);
- *val |= vga_mem_rd_handler(ctx, addr + 1, arg1) << 8;
- *val |= vga_mem_rd_handler(ctx, addr + 2, arg1) << 16;
- *val |= vga_mem_rd_handler(ctx, addr + 3, arg1) << 24;
+ *val = vga_mem_rd_handler(addr, arg1);
+ *val |= vga_mem_rd_handler(addr + 1, arg1) << 8;
+ *val |= vga_mem_rd_handler(addr + 2, arg1) << 16;
+ *val |= vga_mem_rd_handler(addr + 3, arg1) << 24;
break;
case 8:
- *val = vga_mem_rd_handler(ctx, addr, arg1);
- *val |= vga_mem_rd_handler(ctx, addr + 1, arg1) << 8;
- *val |= vga_mem_rd_handler(ctx, addr + 2, arg1) << 16;
- *val |= vga_mem_rd_handler(ctx, addr + 3, arg1) << 24;
- *val |= vga_mem_rd_handler(ctx, addr + 4, arg1) << 32;
- *val |= vga_mem_rd_handler(ctx, addr + 5, arg1) << 40;
- *val |= vga_mem_rd_handler(ctx, addr + 6, arg1) << 48;
- *val |= vga_mem_rd_handler(ctx, addr + 7, arg1) << 56;
+ *val = vga_mem_rd_handler(addr, arg1);
+ *val |= vga_mem_rd_handler(addr + 1, arg1) << 8;
+ *val |= vga_mem_rd_handler(addr + 2, arg1) << 16;
+ *val |= vga_mem_rd_handler(addr + 3, arg1) << 24;
+ *val |= vga_mem_rd_handler(addr + 4, arg1) << 32;
+ *val |= vga_mem_rd_handler(addr + 5, arg1) << 40;
+ *val |= vga_mem_rd_handler(addr + 6, arg1) << 48;
+ *val |= vga_mem_rd_handler(addr + 7, arg1) << 56;
break;
}
}
diff --git a/usr.sbin/bhyve/xmsr.h b/usr.sbin/bhyve/xmsr.h
--- a/usr.sbin/bhyve/xmsr.h
+++ b/usr.sbin/bhyve/xmsr.h
@@ -32,7 +32,7 @@
#define _XMSR_H_
int init_msr(void);
-int emulate_wrmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t val);
-int emulate_rdmsr(struct vmctx *ctx, int vcpu, uint32_t code, uint64_t *val);
+int emulate_wrmsr(struct vcpu *vcpu, uint32_t code, uint64_t val);
+int emulate_rdmsr(struct vcpu *vcpu, uint32_t code, uint64_t *val);
#endif
diff --git a/usr.sbin/bhyve/xmsr.c b/usr.sbin/bhyve/xmsr.c
--- a/usr.sbin/bhyve/xmsr.c
+++ b/usr.sbin/bhyve/xmsr.c
@@ -49,8 +49,7 @@
static int cpu_vendor_intel, cpu_vendor_amd, cpu_vendor_hygon;
int
-emulate_wrmsr(struct vmctx *ctx __unused, int vcpu __unused, uint32_t num,
- uint64_t val __unused)
+emulate_wrmsr(struct vcpu *vcpu __unused, uint32_t num, uint64_t val __unused)
{
if (cpu_vendor_intel) {
@@ -104,8 +103,7 @@
}
int
-emulate_rdmsr(struct vmctx *ctx __unused, int vcpu __unused, uint32_t num,
- uint64_t *val)
+emulate_rdmsr(struct vcpu *vcpu __unused, uint32_t num, uint64_t *val)
{
int error = 0;
diff --git a/usr.sbin/bhyvectl/bhyvectl.c b/usr.sbin/bhyvectl/bhyvectl.c
--- a/usr.sbin/bhyvectl/bhyvectl.c
+++ b/usr.sbin/bhyvectl/bhyvectl.c
@@ -533,18 +533,18 @@
}
static int
-vm_get_vmcs_field(struct vmctx *ctx, int vcpu, int field, uint64_t *ret_val)
+vm_get_vmcs_field(struct vcpu *vcpu, int field, uint64_t *ret_val)
{
- return (vm_get_register(ctx, vcpu, VMCS_IDENT(field), ret_val));
+ return (vm_get_register(vcpu, VMCS_IDENT(field), ret_val));
}
static int
-vm_get_vmcb_field(struct vmctx *ctx, int vcpu, int off, int bytes,
+vm_get_vmcb_field(struct vcpu *vcpu, int off, int bytes,
uint64_t *ret_val)
{
- return (vm_get_register(ctx, vcpu, VMCB_ACCESS(off, bytes), ret_val));
+ return (vm_get_register(vcpu, VMCB_ACCESS(off, bytes), ret_val));
}
enum {
@@ -662,7 +662,7 @@
}
static int
-get_all_registers(struct vmctx *ctx, int vcpu)
+get_all_registers(struct vcpu *vcpu, int vcpuid)
{
uint64_t cr0, cr2, cr3, cr4, dr0, dr1, dr2, dr3, dr6, dr7;
uint64_t rsp, rip, rflags, efer;
@@ -671,651 +671,651 @@
int error = 0;
if (!error && (get_efer || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_EFER, &efer);
+ error = vm_get_register(vcpu, VM_REG_GUEST_EFER, &efer);
if (error == 0)
- printf("efer[%d]\t\t0x%016lx\n", vcpu, efer);
+ printf("efer[%d]\t\t0x%016lx\n", vcpuid, efer);
}
if (!error && (get_cr0 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_CR0, &cr0);
+ error = vm_get_register(vcpu, VM_REG_GUEST_CR0, &cr0);
if (error == 0)
- printf("cr0[%d]\t\t0x%016lx\n", vcpu, cr0);
+ printf("cr0[%d]\t\t0x%016lx\n", vcpuid, cr0);
}
if (!error && (get_cr2 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_CR2, &cr2);
+ error = vm_get_register(vcpu, VM_REG_GUEST_CR2, &cr2);
if (error == 0)
- printf("cr2[%d]\t\t0x%016lx\n", vcpu, cr2);
+ printf("cr2[%d]\t\t0x%016lx\n", vcpuid, cr2);
}
if (!error && (get_cr3 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_CR3, &cr3);
+ error = vm_get_register(vcpu, VM_REG_GUEST_CR3, &cr3);
if (error == 0)
- printf("cr3[%d]\t\t0x%016lx\n", vcpu, cr3);
+ printf("cr3[%d]\t\t0x%016lx\n", vcpuid, cr3);
}
if (!error && (get_cr4 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_CR4, &cr4);
+ error = vm_get_register(vcpu, VM_REG_GUEST_CR4, &cr4);
if (error == 0)
- printf("cr4[%d]\t\t0x%016lx\n", vcpu, cr4);
+ printf("cr4[%d]\t\t0x%016lx\n", vcpuid, cr4);
}
if (!error && (get_dr0 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_DR0, &dr0);
+ error = vm_get_register(vcpu, VM_REG_GUEST_DR0, &dr0);
if (error == 0)
- printf("dr0[%d]\t\t0x%016lx\n", vcpu, dr0);
+ printf("dr0[%d]\t\t0x%016lx\n", vcpuid, dr0);
}
if (!error && (get_dr1 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_DR1, &dr1);
+ error = vm_get_register(vcpu, VM_REG_GUEST_DR1, &dr1);
if (error == 0)
- printf("dr1[%d]\t\t0x%016lx\n", vcpu, dr1);
+ printf("dr1[%d]\t\t0x%016lx\n", vcpuid, dr1);
}
if (!error && (get_dr2 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_DR2, &dr2);
+ error = vm_get_register(vcpu, VM_REG_GUEST_DR2, &dr2);
if (error == 0)
- printf("dr2[%d]\t\t0x%016lx\n", vcpu, dr2);
+ printf("dr2[%d]\t\t0x%016lx\n", vcpuid, dr2);
}
if (!error && (get_dr3 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_DR3, &dr3);
+ error = vm_get_register(vcpu, VM_REG_GUEST_DR3, &dr3);
if (error == 0)
- printf("dr3[%d]\t\t0x%016lx\n", vcpu, dr3);
+ printf("dr3[%d]\t\t0x%016lx\n", vcpuid, dr3);
}
if (!error && (get_dr6 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_DR6, &dr6);
+ error = vm_get_register(vcpu, VM_REG_GUEST_DR6, &dr6);
if (error == 0)
- printf("dr6[%d]\t\t0x%016lx\n", vcpu, dr6);
+ printf("dr6[%d]\t\t0x%016lx\n", vcpuid, dr6);
}
if (!error && (get_dr7 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_DR7, &dr7);
+ error = vm_get_register(vcpu, VM_REG_GUEST_DR7, &dr7);
if (error == 0)
- printf("dr7[%d]\t\t0x%016lx\n", vcpu, dr7);
+ printf("dr7[%d]\t\t0x%016lx\n", vcpuid, dr7);
}
if (!error && (get_rsp || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RSP, &rsp);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RSP, &rsp);
if (error == 0)
- printf("rsp[%d]\t\t0x%016lx\n", vcpu, rsp);
+ printf("rsp[%d]\t\t0x%016lx\n", vcpuid, rsp);
}
if (!error && (get_rip || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RIP, &rip);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip);
if (error == 0)
- printf("rip[%d]\t\t0x%016lx\n", vcpu, rip);
+ printf("rip[%d]\t\t0x%016lx\n", vcpuid, rip);
}
if (!error && (get_rax || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RAX, &rax);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RAX, &rax);
if (error == 0)
- printf("rax[%d]\t\t0x%016lx\n", vcpu, rax);
+ printf("rax[%d]\t\t0x%016lx\n", vcpuid, rax);
}
if (!error && (get_rbx || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RBX, &rbx);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RBX, &rbx);
if (error == 0)
- printf("rbx[%d]\t\t0x%016lx\n", vcpu, rbx);
+ printf("rbx[%d]\t\t0x%016lx\n", vcpuid, rbx);
}
if (!error && (get_rcx || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RCX, &rcx);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RCX, &rcx);
if (error == 0)
- printf("rcx[%d]\t\t0x%016lx\n", vcpu, rcx);
+ printf("rcx[%d]\t\t0x%016lx\n", vcpuid, rcx);
}
if (!error && (get_rdx || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RDX, &rdx);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RDX, &rdx);
if (error == 0)
- printf("rdx[%d]\t\t0x%016lx\n", vcpu, rdx);
+ printf("rdx[%d]\t\t0x%016lx\n", vcpuid, rdx);
}
if (!error && (get_rsi || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RSI, &rsi);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RSI, &rsi);
if (error == 0)
- printf("rsi[%d]\t\t0x%016lx\n", vcpu, rsi);
+ printf("rsi[%d]\t\t0x%016lx\n", vcpuid, rsi);
}
if (!error && (get_rdi || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RDI, &rdi);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RDI, &rdi);
if (error == 0)
- printf("rdi[%d]\t\t0x%016lx\n", vcpu, rdi);
+ printf("rdi[%d]\t\t0x%016lx\n", vcpuid, rdi);
}
if (!error && (get_rbp || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RBP, &rbp);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RBP, &rbp);
if (error == 0)
- printf("rbp[%d]\t\t0x%016lx\n", vcpu, rbp);
+ printf("rbp[%d]\t\t0x%016lx\n", vcpuid, rbp);
}
if (!error && (get_r8 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_R8, &r8);
+ error = vm_get_register(vcpu, VM_REG_GUEST_R8, &r8);
if (error == 0)
- printf("r8[%d]\t\t0x%016lx\n", vcpu, r8);
+ printf("r8[%d]\t\t0x%016lx\n", vcpuid, r8);
}
if (!error && (get_r9 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_R9, &r9);
+ error = vm_get_register(vcpu, VM_REG_GUEST_R9, &r9);
if (error == 0)
- printf("r9[%d]\t\t0x%016lx\n", vcpu, r9);
+ printf("r9[%d]\t\t0x%016lx\n", vcpuid, r9);
}
if (!error && (get_r10 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_R10, &r10);
+ error = vm_get_register(vcpu, VM_REG_GUEST_R10, &r10);
if (error == 0)
- printf("r10[%d]\t\t0x%016lx\n", vcpu, r10);
+ printf("r10[%d]\t\t0x%016lx\n", vcpuid, r10);
}
if (!error && (get_r11 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_R11, &r11);
+ error = vm_get_register(vcpu, VM_REG_GUEST_R11, &r11);
if (error == 0)
- printf("r11[%d]\t\t0x%016lx\n", vcpu, r11);
+ printf("r11[%d]\t\t0x%016lx\n", vcpuid, r11);
}
if (!error && (get_r12 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_R12, &r12);
+ error = vm_get_register(vcpu, VM_REG_GUEST_R12, &r12);
if (error == 0)
- printf("r12[%d]\t\t0x%016lx\n", vcpu, r12);
+ printf("r12[%d]\t\t0x%016lx\n", vcpuid, r12);
}
if (!error && (get_r13 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_R13, &r13);
+ error = vm_get_register(vcpu, VM_REG_GUEST_R13, &r13);
if (error == 0)
- printf("r13[%d]\t\t0x%016lx\n", vcpu, r13);
+ printf("r13[%d]\t\t0x%016lx\n", vcpuid, r13);
}
if (!error && (get_r14 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_R14, &r14);
+ error = vm_get_register(vcpu, VM_REG_GUEST_R14, &r14);
if (error == 0)
- printf("r14[%d]\t\t0x%016lx\n", vcpu, r14);
+ printf("r14[%d]\t\t0x%016lx\n", vcpuid, r14);
}
if (!error && (get_r15 || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_R15, &r15);
+ error = vm_get_register(vcpu, VM_REG_GUEST_R15, &r15);
if (error == 0)
- printf("r15[%d]\t\t0x%016lx\n", vcpu, r15);
+ printf("r15[%d]\t\t0x%016lx\n", vcpuid, r15);
}
if (!error && (get_rflags || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_RFLAGS,
+ error = vm_get_register(vcpu, VM_REG_GUEST_RFLAGS,
&rflags);
if (error == 0)
- printf("rflags[%d]\t0x%016lx\n", vcpu, rflags);
+ printf("rflags[%d]\t0x%016lx\n", vcpuid, rflags);
}
return (error);
}
static int
-get_all_segments(struct vmctx *ctx, int vcpu)
+get_all_segments(struct vcpu *vcpu, int vcpuid)
{
uint64_t cs, ds, es, fs, gs, ss, tr, ldtr;
int error = 0;
if (!error && (get_desc_ds || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_DS,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_DS,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("ds desc[%d]\t0x%016lx/0x%08x/0x%08x\n",
- vcpu, desc_base, desc_limit, desc_access);
+ vcpuid, desc_base, desc_limit, desc_access);
}
}
if (!error && (get_desc_es || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_ES,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_ES,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("es desc[%d]\t0x%016lx/0x%08x/0x%08x\n",
- vcpu, desc_base, desc_limit, desc_access);
+ vcpuid, desc_base, desc_limit, desc_access);
}
}
if (!error && (get_desc_fs || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_FS,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_FS,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("fs desc[%d]\t0x%016lx/0x%08x/0x%08x\n",
- vcpu, desc_base, desc_limit, desc_access);
+ vcpuid, desc_base, desc_limit, desc_access);
}
}
if (!error && (get_desc_gs || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_GS,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_GS,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("gs desc[%d]\t0x%016lx/0x%08x/0x%08x\n",
- vcpu, desc_base, desc_limit, desc_access);
+ vcpuid, desc_base, desc_limit, desc_access);
}
}
if (!error && (get_desc_ss || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_SS,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_SS,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("ss desc[%d]\t0x%016lx/0x%08x/0x%08x\n",
- vcpu, desc_base, desc_limit, desc_access);
+ vcpuid, desc_base, desc_limit, desc_access);
}
}
if (!error && (get_desc_cs || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_CS,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_CS,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("cs desc[%d]\t0x%016lx/0x%08x/0x%08x\n",
- vcpu, desc_base, desc_limit, desc_access);
+ vcpuid, desc_base, desc_limit, desc_access);
}
}
if (!error && (get_desc_tr || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_TR,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_TR,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("tr desc[%d]\t0x%016lx/0x%08x/0x%08x\n",
- vcpu, desc_base, desc_limit, desc_access);
+ vcpuid, desc_base, desc_limit, desc_access);
}
}
if (!error && (get_desc_ldtr || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_LDTR,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_LDTR,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("ldtr desc[%d]\t0x%016lx/0x%08x/0x%08x\n",
- vcpu, desc_base, desc_limit, desc_access);
+ vcpuid, desc_base, desc_limit, desc_access);
}
}
if (!error && (get_desc_gdtr || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_GDTR,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_GDTR,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("gdtr[%d]\t\t0x%016lx/0x%08x\n",
- vcpu, desc_base, desc_limit);
+ vcpuid, desc_base, desc_limit);
}
}
if (!error && (get_desc_idtr || get_all)) {
- error = vm_get_desc(ctx, vcpu, VM_REG_GUEST_IDTR,
+ error = vm_get_desc(vcpu, VM_REG_GUEST_IDTR,
&desc_base, &desc_limit, &desc_access);
if (error == 0) {
printf("idtr[%d]\t\t0x%016lx/0x%08x\n",
- vcpu, desc_base, desc_limit);
+ vcpuid, desc_base, desc_limit);
}
}
if (!error && (get_cs || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_CS, &cs);
+ error = vm_get_register(vcpu, VM_REG_GUEST_CS, &cs);
if (error == 0)
- printf("cs[%d]\t\t0x%04lx\n", vcpu, cs);
+ printf("cs[%d]\t\t0x%04lx\n", vcpuid, cs);
}
if (!error && (get_ds || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_DS, &ds);
+ error = vm_get_register(vcpu, VM_REG_GUEST_DS, &ds);
if (error == 0)
- printf("ds[%d]\t\t0x%04lx\n", vcpu, ds);
+ printf("ds[%d]\t\t0x%04lx\n", vcpuid, ds);
}
if (!error && (get_es || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_ES, &es);
+ error = vm_get_register(vcpu, VM_REG_GUEST_ES, &es);
if (error == 0)
- printf("es[%d]\t\t0x%04lx\n", vcpu, es);
+ printf("es[%d]\t\t0x%04lx\n", vcpuid, es);
}
if (!error && (get_fs || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_FS, &fs);
+ error = vm_get_register(vcpu, VM_REG_GUEST_FS, &fs);
if (error == 0)
- printf("fs[%d]\t\t0x%04lx\n", vcpu, fs);
+ printf("fs[%d]\t\t0x%04lx\n", vcpuid, fs);
}
if (!error && (get_gs || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_GS, &gs);
+ error = vm_get_register(vcpu, VM_REG_GUEST_GS, &gs);
if (error == 0)
- printf("gs[%d]\t\t0x%04lx\n", vcpu, gs);
+ printf("gs[%d]\t\t0x%04lx\n", vcpuid, gs);
}
if (!error && (get_ss || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_SS, &ss);
+ error = vm_get_register(vcpu, VM_REG_GUEST_SS, &ss);
if (error == 0)
- printf("ss[%d]\t\t0x%04lx\n", vcpu, ss);
+ printf("ss[%d]\t\t0x%04lx\n", vcpuid, ss);
}
if (!error && (get_tr || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_TR, &tr);
+ error = vm_get_register(vcpu, VM_REG_GUEST_TR, &tr);
if (error == 0)
- printf("tr[%d]\t\t0x%04lx\n", vcpu, tr);
+ printf("tr[%d]\t\t0x%04lx\n", vcpuid, tr);
}
if (!error && (get_ldtr || get_all)) {
- error = vm_get_register(ctx, vcpu, VM_REG_GUEST_LDTR, &ldtr);
+ error = vm_get_register(vcpu, VM_REG_GUEST_LDTR, &ldtr);
if (error == 0)
- printf("ldtr[%d]\t\t0x%04lx\n", vcpu, ldtr);
+ printf("ldtr[%d]\t\t0x%04lx\n", vcpuid, ldtr);
}
return (error);
}
static int
-get_misc_vmcs(struct vmctx *ctx, int vcpu)
+get_misc_vmcs(struct vcpu *vcpu, int vcpuid)
{
uint64_t ctl, cr0, cr3, cr4, rsp, rip, pat, addr, u64;
int error = 0;
if (!error && (get_cr0_mask || get_all)) {
uint64_t cr0mask;
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_CR0_MASK, &cr0mask);
+ error = vm_get_vmcs_field(vcpu, VMCS_CR0_MASK, &cr0mask);
if (error == 0)
- printf("cr0_mask[%d]\t\t0x%016lx\n", vcpu, cr0mask);
+ printf("cr0_mask[%d]\t\t0x%016lx\n", vcpuid, cr0mask);
}
if (!error && (get_cr0_shadow || get_all)) {
uint64_t cr0shadow;
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_CR0_SHADOW,
+ error = vm_get_vmcs_field(vcpu, VMCS_CR0_SHADOW,
&cr0shadow);
if (error == 0)
- printf("cr0_shadow[%d]\t\t0x%016lx\n", vcpu, cr0shadow);
+ printf("cr0_shadow[%d]\t\t0x%016lx\n", vcpuid, cr0shadow);
}
if (!error && (get_cr4_mask || get_all)) {
uint64_t cr4mask;
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_CR4_MASK, &cr4mask);
+ error = vm_get_vmcs_field(vcpu, VMCS_CR4_MASK, &cr4mask);
if (error == 0)
- printf("cr4_mask[%d]\t\t0x%016lx\n", vcpu, cr4mask);
+ printf("cr4_mask[%d]\t\t0x%016lx\n", vcpuid, cr4mask);
}
if (!error && (get_cr4_shadow || get_all)) {
uint64_t cr4shadow;
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_CR4_SHADOW,
+ error = vm_get_vmcs_field(vcpu, VMCS_CR4_SHADOW,
&cr4shadow);
if (error == 0)
- printf("cr4_shadow[%d]\t\t0x%016lx\n", vcpu, cr4shadow);
+ printf("cr4_shadow[%d]\t\t0x%016lx\n", vcpuid, cr4shadow);
}
if (!error && (get_cr3_targets || get_all)) {
uint64_t target_count, target_addr;
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_CR3_TARGET_COUNT,
+ error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET_COUNT,
&target_count);
if (error == 0) {
printf("cr3_target_count[%d]\t0x%016lx\n",
- vcpu, target_count);
+ vcpuid, target_count);
}
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_CR3_TARGET0,
+ error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET0,
&target_addr);
if (error == 0) {
printf("cr3_target0[%d]\t\t0x%016lx\n",
- vcpu, target_addr);
+ vcpuid, target_addr);
}
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_CR3_TARGET1,
+ error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET1,
&target_addr);
if (error == 0) {
printf("cr3_target1[%d]\t\t0x%016lx\n",
- vcpu, target_addr);
+ vcpuid, target_addr);
}
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_CR3_TARGET2,
+ error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET2,
&target_addr);
if (error == 0) {
printf("cr3_target2[%d]\t\t0x%016lx\n",
- vcpu, target_addr);
+ vcpuid, target_addr);
}
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_CR3_TARGET3,
+ error = vm_get_vmcs_field(vcpu, VMCS_CR3_TARGET3,
&target_addr);
if (error == 0) {
printf("cr3_target3[%d]\t\t0x%016lx\n",
- vcpu, target_addr);
+ vcpuid, target_addr);
}
}
if (!error && (get_pinbased_ctls || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_PIN_BASED_CTLS, &ctl);
+ error = vm_get_vmcs_field(vcpu, VMCS_PIN_BASED_CTLS, &ctl);
if (error == 0)
- printf("pinbased_ctls[%d]\t0x%016lx\n", vcpu, ctl);
+ printf("pinbased_ctls[%d]\t0x%016lx\n", vcpuid, ctl);
}
if (!error && (get_procbased_ctls || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_PRI_PROC_BASED_CTLS, &ctl);
if (error == 0)
- printf("procbased_ctls[%d]\t0x%016lx\n", vcpu, ctl);
+ printf("procbased_ctls[%d]\t0x%016lx\n", vcpuid, ctl);
}
if (!error && (get_procbased_ctls2 || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_SEC_PROC_BASED_CTLS, &ctl);
if (error == 0)
- printf("procbased_ctls2[%d]\t0x%016lx\n", vcpu, ctl);
+ printf("procbased_ctls2[%d]\t0x%016lx\n", vcpuid, ctl);
}
if (!error && (get_vmcs_gla || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_GUEST_LINEAR_ADDRESS, &u64);
if (error == 0)
- printf("gla[%d]\t\t0x%016lx\n", vcpu, u64);
+ printf("gla[%d]\t\t0x%016lx\n", vcpuid, u64);
}
if (!error && (get_vmcs_gpa || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_GUEST_PHYSICAL_ADDRESS, &u64);
if (error == 0)
- printf("gpa[%d]\t\t0x%016lx\n", vcpu, u64);
+ printf("gpa[%d]\t\t0x%016lx\n", vcpuid, u64);
}
if (!error && (get_vmcs_entry_interruption_info ||
get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_ENTRY_INTR_INFO,&u64);
+ error = vm_get_vmcs_field(vcpu, VMCS_ENTRY_INTR_INFO,&u64);
if (error == 0) {
printf("entry_interruption_info[%d]\t0x%016lx\n",
- vcpu, u64);
+ vcpuid, u64);
}
}
if (!error && (get_tpr_threshold || get_all)) {
uint64_t threshold;
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_TPR_THRESHOLD,
+ error = vm_get_vmcs_field(vcpu, VMCS_TPR_THRESHOLD,
&threshold);
if (error == 0)
- printf("tpr_threshold[%d]\t0x%016lx\n", vcpu, threshold);
+ printf("tpr_threshold[%d]\t0x%016lx\n", vcpuid, threshold);
}
if (!error && (get_inst_err || get_all)) {
uint64_t insterr;
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_INSTRUCTION_ERROR,
+ error = vm_get_vmcs_field(vcpu, VMCS_INSTRUCTION_ERROR,
&insterr);
if (error == 0) {
printf("instruction_error[%d]\t0x%016lx\n",
- vcpu, insterr);
+ vcpuid, insterr);
}
}
if (!error && (get_exit_ctls || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_EXIT_CTLS, &ctl);
+ error = vm_get_vmcs_field(vcpu, VMCS_EXIT_CTLS, &ctl);
if (error == 0)
- printf("exit_ctls[%d]\t\t0x%016lx\n", vcpu, ctl);
+ printf("exit_ctls[%d]\t\t0x%016lx\n", vcpuid, ctl);
}
if (!error && (get_entry_ctls || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_ENTRY_CTLS, &ctl);
+ error = vm_get_vmcs_field(vcpu, VMCS_ENTRY_CTLS, &ctl);
if (error == 0)
- printf("entry_ctls[%d]\t\t0x%016lx\n", vcpu, ctl);
+ printf("entry_ctls[%d]\t\t0x%016lx\n", vcpuid, ctl);
}
if (!error && (get_host_pat || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_HOST_IA32_PAT, &pat);
+ error = vm_get_vmcs_field(vcpu, VMCS_HOST_IA32_PAT, &pat);
if (error == 0)
- printf("host_pat[%d]\t\t0x%016lx\n", vcpu, pat);
+ printf("host_pat[%d]\t\t0x%016lx\n", vcpuid, pat);
}
if (!error && (get_host_cr0 || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_HOST_CR0, &cr0);
+ error = vm_get_vmcs_field(vcpu, VMCS_HOST_CR0, &cr0);
if (error == 0)
- printf("host_cr0[%d]\t\t0x%016lx\n", vcpu, cr0);
+ printf("host_cr0[%d]\t\t0x%016lx\n", vcpuid, cr0);
}
if (!error && (get_host_cr3 || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_HOST_CR3, &cr3);
+ error = vm_get_vmcs_field(vcpu, VMCS_HOST_CR3, &cr3);
if (error == 0)
- printf("host_cr3[%d]\t\t0x%016lx\n", vcpu, cr3);
+ printf("host_cr3[%d]\t\t0x%016lx\n", vcpuid, cr3);
}
if (!error && (get_host_cr4 || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_HOST_CR4, &cr4);
+ error = vm_get_vmcs_field(vcpu, VMCS_HOST_CR4, &cr4);
if (error == 0)
- printf("host_cr4[%d]\t\t0x%016lx\n", vcpu, cr4);
+ printf("host_cr4[%d]\t\t0x%016lx\n", vcpuid, cr4);
}
if (!error && (get_host_rip || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_HOST_RIP, &rip);
+ error = vm_get_vmcs_field(vcpu, VMCS_HOST_RIP, &rip);
if (error == 0)
- printf("host_rip[%d]\t\t0x%016lx\n", vcpu, rip);
+ printf("host_rip[%d]\t\t0x%016lx\n", vcpuid, rip);
}
if (!error && (get_host_rsp || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_HOST_RSP, &rsp);
+ error = vm_get_vmcs_field(vcpu, VMCS_HOST_RSP, &rsp);
if (error == 0)
- printf("host_rsp[%d]\t\t0x%016lx\n", vcpu, rsp);
+ printf("host_rsp[%d]\t\t0x%016lx\n", vcpuid, rsp);
}
if (!error && (get_vmcs_link || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_LINK_POINTER, &addr);
+ error = vm_get_vmcs_field(vcpu, VMCS_LINK_POINTER, &addr);
if (error == 0)
- printf("vmcs_pointer[%d]\t0x%016lx\n", vcpu, addr);
+ printf("vmcs_pointer[%d]\t0x%016lx\n", vcpuid, addr);
}
if (!error && (get_vmcs_exit_interruption_info || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_EXIT_INTR_INFO, &u64);
+ error = vm_get_vmcs_field(vcpu, VMCS_EXIT_INTR_INFO, &u64);
if (error == 0) {
printf("vmcs_exit_interruption_info[%d]\t0x%016lx\n",
- vcpu, u64);
+ vcpuid, u64);
}
}
if (!error && (get_vmcs_exit_interruption_error || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_EXIT_INTR_ERRCODE,
+ error = vm_get_vmcs_field(vcpu, VMCS_EXIT_INTR_ERRCODE,
&u64);
if (error == 0) {
printf("vmcs_exit_interruption_error[%d]\t0x%016lx\n",
- vcpu, u64);
+ vcpuid, u64);
}
}
if (!error && (get_vmcs_interruptibility || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_GUEST_INTERRUPTIBILITY, &u64);
if (error == 0) {
printf("vmcs_guest_interruptibility[%d]\t0x%016lx\n",
- vcpu, u64);
+ vcpuid, u64);
}
}
if (!error && (get_vmcs_exit_inst_length || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_EXIT_INSTRUCTION_LENGTH, &u64);
if (error == 0)
- printf("vmcs_exit_inst_length[%d]\t0x%08x\n", vcpu,
+ printf("vmcs_exit_inst_length[%d]\t0x%08x\n", vcpuid,
(uint32_t)u64);
}
if (!error && (get_vmcs_exit_qualification || get_all)) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_EXIT_QUALIFICATION,
+ error = vm_get_vmcs_field(vcpu, VMCS_EXIT_QUALIFICATION,
&u64);
if (error == 0)
printf("vmcs_exit_qualification[%d]\t0x%016lx\n",
- vcpu, u64);
+ vcpuid, u64);
}
return (error);
}
static int
-get_misc_vmcb(struct vmctx *ctx, int vcpu)
+get_misc_vmcb(struct vcpu *vcpu, int vcpuid)
{
uint64_t ctl, addr;
int error = 0;
if (!error && (get_vmcb_intercept || get_all)) {
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_CR_INTERCEPT, 4,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_CR_INTERCEPT, 4,
&ctl);
if (error == 0)
- printf("cr_intercept[%d]\t0x%08x\n", vcpu, (int)ctl);
+ printf("cr_intercept[%d]\t0x%08x\n", vcpuid, (int)ctl);
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_DR_INTERCEPT, 4,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_DR_INTERCEPT, 4,
&ctl);
if (error == 0)
- printf("dr_intercept[%d]\t0x%08x\n", vcpu, (int)ctl);
+ printf("dr_intercept[%d]\t0x%08x\n", vcpuid, (int)ctl);
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_EXC_INTERCEPT, 4,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_EXC_INTERCEPT, 4,
&ctl);
if (error == 0)
- printf("exc_intercept[%d]\t0x%08x\n", vcpu, (int)ctl);
+ printf("exc_intercept[%d]\t0x%08x\n", vcpuid, (int)ctl);
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_INST1_INTERCEPT,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_INST1_INTERCEPT,
4, &ctl);
if (error == 0)
- printf("inst1_intercept[%d]\t0x%08x\n", vcpu, (int)ctl);
+ printf("inst1_intercept[%d]\t0x%08x\n", vcpuid, (int)ctl);
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_INST2_INTERCEPT,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_INST2_INTERCEPT,
4, &ctl);
if (error == 0)
- printf("inst2_intercept[%d]\t0x%08x\n", vcpu, (int)ctl);
+ printf("inst2_intercept[%d]\t0x%08x\n", vcpuid, (int)ctl);
}
if (!error && (get_vmcb_tlb_ctrl || get_all)) {
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_TLB_CTRL,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_TLB_CTRL,
4, &ctl);
if (error == 0)
- printf("TLB ctrl[%d]\t0x%016lx\n", vcpu, ctl);
+ printf("TLB ctrl[%d]\t0x%016lx\n", vcpuid, ctl);
}
if (!error && (get_vmcb_exit_details || get_all)) {
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_EXITINFO1,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_EXITINFO1,
8, &ctl);
if (error == 0)
- printf("exitinfo1[%d]\t0x%016lx\n", vcpu, ctl);
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_EXITINFO2,
+ printf("exitinfo1[%d]\t0x%016lx\n", vcpuid, ctl);
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_EXITINFO2,
8, &ctl);
if (error == 0)
- printf("exitinfo2[%d]\t0x%016lx\n", vcpu, ctl);
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_EXITINTINFO,
+ printf("exitinfo2[%d]\t0x%016lx\n", vcpuid, ctl);
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_EXITINTINFO,
8, &ctl);
if (error == 0)
- printf("exitintinfo[%d]\t0x%016lx\n", vcpu, ctl);
+ printf("exitintinfo[%d]\t0x%016lx\n", vcpuid, ctl);
}
if (!error && (get_vmcb_virq || get_all)) {
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_VIRQ,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_VIRQ,
8, &ctl);
if (error == 0)
- printf("v_irq/tpr[%d]\t0x%016lx\n", vcpu, ctl);
+ printf("v_irq/tpr[%d]\t0x%016lx\n", vcpuid, ctl);
}
if (!error && (get_apic_access_addr || get_all)) {
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_AVIC_BAR, 8,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_AVIC_BAR, 8,
&addr);
if (error == 0)
- printf("AVIC apic_bar[%d]\t0x%016lx\n", vcpu, addr);
+ printf("AVIC apic_bar[%d]\t0x%016lx\n", vcpuid, addr);
}
if (!error && (get_virtual_apic_addr || get_all)) {
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_AVIC_PAGE, 8,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_AVIC_PAGE, 8,
&addr);
if (error == 0)
- printf("AVIC backing page[%d]\t0x%016lx\n", vcpu, addr);
+ printf("AVIC backing page[%d]\t0x%016lx\n", vcpuid, addr);
}
if (!error && (get_avic_table || get_all)) {
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_AVIC_LT, 8,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_AVIC_LT, 8,
&addr);
if (error == 0)
printf("AVIC logical table[%d]\t0x%016lx\n",
- vcpu, addr);
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_AVIC_PT, 8,
+ vcpuid, addr);
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_AVIC_PT, 8,
&addr);
if (error == 0)
printf("AVIC physical table[%d]\t0x%016lx\n",
- vcpu, addr);
+ vcpuid, addr);
}
return (error);
@@ -1728,13 +1728,14 @@
main(int argc, char *argv[])
{
char *vmname;
- int error, ch, vcpu, ptenum;
+ int error, ch, vcpuid, ptenum;
vm_paddr_t gpa_pmap;
struct vm_exit vmexit;
uint64_t rax, cr0, cr2, cr3, cr4, dr0, dr1, dr2, dr3, dr6, dr7;
uint64_t rsp, rip, rflags, efer, pat;
uint64_t eptp, bm, addr, u64, pteval[4], *pte, info[2];
struct vmctx *ctx;
+ struct vcpu *vcpu;
cpuset_t cpus;
bool cpu_intel;
uint64_t cs, ds, es, fs, gs, ss, tr, ldtr;
@@ -1747,7 +1748,7 @@
cpu_intel = cpu_vendor_intel();
opts = setup_options(cpu_intel);
- vcpu = 0;
+ vcpuid = 0;
vmname = NULL;
assert_lapic_lvt = -1;
progname = basename(argv[0]);
@@ -1760,7 +1761,7 @@
vmname = optarg;
break;
case VCPU:
- vcpu = atoi(optarg);
+ vcpuid = atoi(optarg);
break;
case SET_MEM:
memsize = atoi(optarg) * MB;
@@ -1934,144 +1935,145 @@
vmname, strerror(errno));
exit (1);
}
+ vcpu = vm_vcpu_open(ctx, vcpuid);
}
if (!error && memsize)
error = vm_setup_memory(ctx, memsize, VM_MMAP_ALL);
if (!error && set_efer)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_EFER, efer);
+ error = vm_set_register(vcpu, VM_REG_GUEST_EFER, efer);
if (!error && set_cr0)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_CR0, cr0);
+ error = vm_set_register(vcpu, VM_REG_GUEST_CR0, cr0);
if (!error && set_cr2)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_CR2, cr2);
+ error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2);
if (!error && set_cr3)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_CR3, cr3);
+ error = vm_set_register(vcpu, VM_REG_GUEST_CR3, cr3);
if (!error && set_cr4)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_CR4, cr4);
+ error = vm_set_register(vcpu, VM_REG_GUEST_CR4, cr4);
if (!error && set_dr0)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_DR0, dr0);
+ error = vm_set_register(vcpu, VM_REG_GUEST_DR0, dr0);
if (!error && set_dr1)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_DR1, dr1);
+ error = vm_set_register(vcpu, VM_REG_GUEST_DR1, dr1);
if (!error && set_dr2)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_DR2, dr2);
+ error = vm_set_register(vcpu, VM_REG_GUEST_DR2, dr2);
if (!error && set_dr3)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_DR3, dr3);
+ error = vm_set_register(vcpu, VM_REG_GUEST_DR3, dr3);
if (!error && set_dr6)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_DR6, dr6);
+ error = vm_set_register(vcpu, VM_REG_GUEST_DR6, dr6);
if (!error && set_dr7)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_DR7, dr7);
+ error = vm_set_register(vcpu, VM_REG_GUEST_DR7, dr7);
if (!error && set_rsp)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RSP, rsp);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RSP, rsp);
if (!error && set_rip)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RIP, rip);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RIP, rip);
if (!error && set_rax)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RAX, rax);
+ error = vm_set_register(vcpu, VM_REG_GUEST_RAX, rax);
if (!error && set_rflags) {
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_RFLAGS,
+ error = vm_set_register(vcpu, VM_REG_GUEST_RFLAGS,
rflags);
}
if (!error && set_desc_ds) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_DS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_DS,
desc_base, desc_limit, desc_access);
}
if (!error && set_desc_es) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_ES,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_ES,
desc_base, desc_limit, desc_access);
}
if (!error && set_desc_ss) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_SS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_SS,
desc_base, desc_limit, desc_access);
}
if (!error && set_desc_cs) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_CS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_CS,
desc_base, desc_limit, desc_access);
}
if (!error && set_desc_fs) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_FS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_FS,
desc_base, desc_limit, desc_access);
}
if (!error && set_desc_gs) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_GS,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_GS,
desc_base, desc_limit, desc_access);
}
if (!error && set_desc_tr) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_TR,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_TR,
desc_base, desc_limit, desc_access);
}
if (!error && set_desc_ldtr) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_LDTR,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_LDTR,
desc_base, desc_limit, desc_access);
}
if (!error && set_desc_gdtr) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_GDTR,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR,
desc_base, desc_limit, 0);
}
if (!error && set_desc_idtr) {
- error = vm_set_desc(ctx, vcpu, VM_REG_GUEST_IDTR,
+ error = vm_set_desc(vcpu, VM_REG_GUEST_IDTR,
desc_base, desc_limit, 0);
}
if (!error && set_cs)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_CS, cs);
+ error = vm_set_register(vcpu, VM_REG_GUEST_CS, cs);
if (!error && set_ds)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_DS, ds);
+ error = vm_set_register(vcpu, VM_REG_GUEST_DS, ds);
if (!error && set_es)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_ES, es);
+ error = vm_set_register(vcpu, VM_REG_GUEST_ES, es);
if (!error && set_fs)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_FS, fs);
+ error = vm_set_register(vcpu, VM_REG_GUEST_FS, fs);
if (!error && set_gs)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_GS, gs);
+ error = vm_set_register(vcpu, VM_REG_GUEST_GS, gs);
if (!error && set_ss)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_SS, ss);
+ error = vm_set_register(vcpu, VM_REG_GUEST_SS, ss);
if (!error && set_tr)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_TR, tr);
+ error = vm_set_register(vcpu, VM_REG_GUEST_TR, tr);
if (!error && set_ldtr)
- error = vm_set_register(ctx, vcpu, VM_REG_GUEST_LDTR, ldtr);
+ error = vm_set_register(vcpu, VM_REG_GUEST_LDTR, ldtr);
if (!error && set_x2apic_state)
- error = vm_set_x2apic_state(ctx, vcpu, x2apic_state);
+ error = vm_set_x2apic_state(vcpu, x2apic_state);
if (!error && unassign_pptdev)
error = vm_unassign_pptdev(ctx, bus, slot, func);
if (!error && inject_nmi) {
- error = vm_inject_nmi(ctx, vcpu);
+ error = vm_inject_nmi(vcpu);
}
if (!error && assert_lapic_lvt != -1) {
- error = vm_lapic_local_irq(ctx, vcpu, assert_lapic_lvt);
+ error = vm_lapic_local_irq(vcpu, assert_lapic_lvt);
}
if (!error && (get_memseg || get_all))
@@ -2081,177 +2083,177 @@
error = show_memmap(ctx);
if (!error)
- error = get_all_registers(ctx, vcpu);
+ error = get_all_registers(vcpu, vcpuid);
if (!error)
- error = get_all_segments(ctx, vcpu);
+ error = get_all_segments(vcpu, vcpuid);
if (!error) {
if (cpu_intel)
- error = get_misc_vmcs(ctx, vcpu);
+ error = get_misc_vmcs(vcpu, vcpuid);
else
- error = get_misc_vmcb(ctx, vcpu);
+ error = get_misc_vmcb(vcpu, vcpuid);
}
if (!error && (get_x2apic_state || get_all)) {
- error = vm_get_x2apic_state(ctx, vcpu, &x2apic_state);
+ error = vm_get_x2apic_state(vcpu, &x2apic_state);
if (error == 0)
- printf("x2apic_state[%d]\t%d\n", vcpu, x2apic_state);
+ printf("x2apic_state[%d]\t%d\n", vcpuid, x2apic_state);
}
if (!error && (get_eptp || get_all)) {
if (cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_EPTP, &eptp);
+ error = vm_get_vmcs_field(vcpu, VMCS_EPTP, &eptp);
else
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_NPT_BASE,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_NPT_BASE,
8, &eptp);
if (error == 0)
printf("%s[%d]\t\t0x%016lx\n",
- cpu_intel ? "eptp" : "rvi/npt", vcpu, eptp);
+ cpu_intel ? "eptp" : "rvi/npt", vcpuid, eptp);
}
if (!error && (get_exception_bitmap || get_all)) {
if(cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_EXCEPTION_BITMAP, &bm);
else
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_EXC_INTERCEPT,
4, &bm);
if (error == 0)
- printf("exception_bitmap[%d]\t%#lx\n", vcpu, bm);
+ printf("exception_bitmap[%d]\t%#lx\n", vcpuid, bm);
}
if (!error && (get_io_bitmap || get_all)) {
if (cpu_intel) {
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_IO_BITMAP_A,
+ error = vm_get_vmcs_field(vcpu, VMCS_IO_BITMAP_A,
&bm);
if (error == 0)
- printf("io_bitmap_a[%d]\t%#lx\n", vcpu, bm);
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_IO_BITMAP_B,
+ printf("io_bitmap_a[%d]\t%#lx\n", vcpuid, bm);
+ error = vm_get_vmcs_field(vcpu, VMCS_IO_BITMAP_B,
&bm);
if (error == 0)
- printf("io_bitmap_b[%d]\t%#lx\n", vcpu, bm);
+ printf("io_bitmap_b[%d]\t%#lx\n", vcpuid, bm);
} else {
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_IO_PERM, 8, &bm);
if (error == 0)
- printf("io_bitmap[%d]\t%#lx\n", vcpu, bm);
+ printf("io_bitmap[%d]\t%#lx\n", vcpuid, bm);
}
}
if (!error && (get_tsc_offset || get_all)) {
uint64_t tscoff;
if (cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_TSC_OFFSET,
+ error = vm_get_vmcs_field(vcpu, VMCS_TSC_OFFSET,
&tscoff);
else
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_TSC_OFFSET,
8, &tscoff);
if (error == 0)
- printf("tsc_offset[%d]\t0x%016lx\n", vcpu, tscoff);
+ printf("tsc_offset[%d]\t0x%016lx\n", vcpuid, tscoff);
}
if (!error && (get_msr_bitmap_address || get_all)) {
if (cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_MSR_BITMAP,
+ error = vm_get_vmcs_field(vcpu, VMCS_MSR_BITMAP,
&addr);
else
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_MSR_PERM, 8, &addr);
if (error == 0)
- printf("msr_bitmap[%d]\t\t%#lx\n", vcpu, addr);
+ printf("msr_bitmap[%d]\t\t%#lx\n", vcpuid, addr);
}
if (!error && (get_msr_bitmap || get_all)) {
if (cpu_intel) {
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_MSR_BITMAP, &addr);
} else {
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_MSR_PERM, 8,
&addr);
}
if (error == 0)
- error = dump_msr_bitmap(vcpu, addr, cpu_intel);
+ error = dump_msr_bitmap(vcpuid, addr, cpu_intel);
}
if (!error && (get_vpid_asid || get_all)) {
uint64_t vpid;
if (cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_VPID, &vpid);
+ error = vm_get_vmcs_field(vcpu, VMCS_VPID, &vpid);
else
- error = vm_get_vmcb_field(ctx, vcpu, VMCB_OFF_ASID,
+ error = vm_get_vmcb_field(vcpu, VMCB_OFF_ASID,
4, &vpid);
if (error == 0)
printf("%s[%d]\t\t0x%04lx\n",
- cpu_intel ? "vpid" : "asid", vcpu, vpid);
+ cpu_intel ? "vpid" : "asid", vcpuid, vpid);
}
if (!error && (get_guest_pat || get_all)) {
if (cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_GUEST_IA32_PAT, &pat);
else
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_GUEST_PAT, 8, &pat);
if (error == 0)
- printf("guest_pat[%d]\t\t0x%016lx\n", vcpu, pat);
+ printf("guest_pat[%d]\t\t0x%016lx\n", vcpuid, pat);
}
if (!error && (get_guest_sysenter || get_all)) {
if (cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_GUEST_IA32_SYSENTER_CS,
&cs);
else
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_SYSENTER_CS, 8,
&cs);
if (error == 0)
- printf("guest_sysenter_cs[%d]\t%#lx\n", vcpu, cs);
+ printf("guest_sysenter_cs[%d]\t%#lx\n", vcpuid, cs);
if (cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_GUEST_IA32_SYSENTER_ESP,
&rsp);
else
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_SYSENTER_ESP, 8,
&rsp);
if (error == 0)
- printf("guest_sysenter_sp[%d]\t%#lx\n", vcpu, rsp);
+ printf("guest_sysenter_sp[%d]\t%#lx\n", vcpuid, rsp);
if (cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu,
+ error = vm_get_vmcs_field(vcpu,
VMCS_GUEST_IA32_SYSENTER_EIP,
&rip);
else
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_SYSENTER_EIP, 8,
&rip);
if (error == 0)
- printf("guest_sysenter_ip[%d]\t%#lx\n", vcpu, rip);
+ printf("guest_sysenter_ip[%d]\t%#lx\n", vcpuid, rip);
}
if (!error && (get_exit_reason || get_all)) {
if (cpu_intel)
- error = vm_get_vmcs_field(ctx, vcpu, VMCS_EXIT_REASON,
+ error = vm_get_vmcs_field(vcpu, VMCS_EXIT_REASON,
&u64);
else
- error = vm_get_vmcb_field(ctx, vcpu,
+ error = vm_get_vmcb_field(vcpu,
VMCB_OFF_EXIT_REASON, 8,
&u64);
if (error == 0)
- printf("exit_reason[%d]\t%#lx\n", vcpu, u64);
+ printf("exit_reason[%d]\t%#lx\n", vcpuid, u64);
}
if (!error && setcap) {
int captype;
captype = vm_capability_name2type(capname);
- error = vm_set_capability(ctx, vcpu, captype, capval);
+ error = vm_set_capability(vcpu, captype, capval);
if (error != 0 && errno == ENOENT)
printf("Capability \"%s\" is not available\n", capname);
}
@@ -2303,11 +2305,11 @@
for (captype = 0; captype < VM_CAP_MAX; captype++) {
if (getcaptype >= 0 && captype != getcaptype)
continue;
- error = vm_get_capability(ctx, vcpu, captype, &val);
+ error = vm_get_capability(vcpu, captype, &val);
if (error == 0) {
printf("Capability \"%s\" is %s on vcpu %d\n",
vm_capability_type2name(captype),
- val ? "set" : "not set", vcpu);
+ val ? "set" : "not set", vcpuid);
} else if (errno == ENOENT) {
error = 0;
printf("Capability \"%s\" is not available\n",
@@ -2331,7 +2333,7 @@
}
if (!error && (get_intinfo || get_all)) {
- error = vm_get_intinfo(ctx, vcpu, &info[0], &info[1]);
+ error = vm_get_intinfo(vcpu, &info[0], &info[1]);
if (!error) {
print_intinfo("pending", info[0]);
print_intinfo("current", info[1]);
@@ -2344,9 +2346,9 @@
struct timeval tv;
const char *desc;
- stats = vm_get_stats(ctx, vcpu, &tv, &num_stats);
+ stats = vm_get_stats(vcpu, &tv, &num_stats);
if (stats != NULL) {
- printf("vcpu%d stats:\n", vcpu);
+ printf("vcpu%d stats:\n", vcpuid);
for (i = 0; i < num_stats; i++) {
desc = vm_get_stat_desc(ctx, i);
printf("%-40s\t%ld\n", desc, stats[i]);
@@ -2363,9 +2365,9 @@
}
if (!error && run) {
- error = vm_run(ctx, vcpu, &vmexit);
+ error = vm_run(vcpu, &vmexit);
if (error == 0)
- dump_vm_run_exitcode(&vmexit, vcpu);
+ dump_vm_run_exitcode(&vmexit, vcpuid);
else
printf("vm_run error %d\n", error);
}
diff --git a/usr.sbin/bhyveload/bhyveload.c b/usr.sbin/bhyveload/bhyveload.c
--- a/usr.sbin/bhyveload/bhyveload.c
+++ b/usr.sbin/bhyveload/bhyveload.c
@@ -67,6 +67,7 @@
#include <machine/specialreg.h>
#include <machine/vmm.h>
+#include <assert.h>
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
@@ -108,6 +109,7 @@
static char *vmname, *progname;
static struct vmctx *ctx;
+static struct vcpu *vcpu;
static uint64_t gdtbase, cr3, rsp;
@@ -410,7 +412,7 @@
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
- error = vm_set_register(ctx, BSP, vmreg, v);
+ error = vm_set_register(vcpu, vmreg, v);
if (error) {
perror("vm_set_register");
cb_exit(NULL, USERBOOT_EXIT_QUIT);
@@ -438,7 +440,7 @@
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
- error = vm_set_register(ctx, BSP, vmreg, v);
+ error = vm_set_register(vcpu, vmreg, v);
if (error) {
perror("vm_set_msr");
cb_exit(NULL, USERBOOT_EXIT_QUIT);
@@ -473,7 +475,7 @@
cb_exit(NULL, USERBOOT_EXIT_QUIT);
}
- error = vm_set_register(ctx, BSP, vmreg, v);
+ error = vm_set_register(vcpu, vmreg, v);
if (error) {
perror("vm_set_cr");
cb_exit(NULL, USERBOOT_EXIT_QUIT);
@@ -485,7 +487,7 @@
{
int error;
- error = vm_set_desc(ctx, BSP, VM_REG_GUEST_GDTR, base, size - 1, 0);
+ error = vm_set_desc(vcpu, VM_REG_GUEST_GDTR, base, size - 1, 0);
if (error != 0) {
perror("vm_set_desc(gdt)");
cb_exit(NULL, USERBOOT_EXIT_QUIT);
@@ -500,10 +502,10 @@
int error;
if (cr3 == 0)
- error = vm_setup_freebsd_registers_i386(ctx, BSP, rip, gdtbase,
+ error = vm_setup_freebsd_registers_i386(vcpu, rip, gdtbase,
rsp);
else
- error = vm_setup_freebsd_registers(ctx, BSP, rip, cr3, gdtbase,
+ error = vm_setup_freebsd_registers(vcpu, rip, cr3, gdtbase,
rsp);
if (error) {
perror("vm_setup_freebsd_registers");
@@ -578,18 +580,20 @@
}
static int
-cb_vm_set_register(void *arg __unused, int vcpu, int reg, uint64_t val)
+cb_vm_set_register(void *arg __unused, int vcpuid, int reg, uint64_t val)
{
- return (vm_set_register(ctx, vcpu, reg, val));
+ assert(vcpuid == BSP);
+ return (vm_set_register(vcpu, reg, val));
}
static int
-cb_vm_set_desc(void *arg __unused, int vcpu, int reg, uint64_t base,
+cb_vm_set_desc(void *arg __unused, int vcpuid, int reg, uint64_t base,
u_int limit, u_int access)
{
- return (vm_set_desc(ctx, vcpu, reg, base, limit, access));
+ assert(vcpuid == BSP);
+ return (vm_set_desc(vcpu, reg, base, limit, access));
}
static void
@@ -802,6 +806,8 @@
exit(1);
}
+ vcpu = vm_vcpu_open(ctx, BSP);
+
/*
* setjmp in the case the guest wants to swap out interpreter,
* cb_swap_interpreter will swap out loader as appropriate and set

File Metadata

Mime Type
text/plain
Expires
Sat, Nov 9, 6:44 AM (20 h, 41 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
14552204
Default Alt Text
D38124.diff (194 KB)

Event Timeline