Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F102701184
D37161.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
81 KB
Referenced Files
None
Subscribers
None
D37161.diff
View Options
diff --git a/lib/libvmmapi/vmmapi.h b/lib/libvmmapi/vmmapi.h
--- a/lib/libvmmapi/vmmapi.h
+++ b/lib/libvmmapi/vmmapi.h
@@ -238,6 +238,7 @@
int vm_activate_cpu(struct vmctx *ctx, int vcpu);
int vm_suspend_cpu(struct vmctx *ctx, int vcpu);
int vm_resume_cpu(struct vmctx *ctx, int vcpu);
+int vm_restart_instruction(struct vmctx *vmctx, int vcpu);
/* CPU topology */
int vm_set_topology(struct vmctx *ctx, uint16_t sockets, uint16_t cores,
diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c
--- a/lib/libvmmapi/vmmapi.c
+++ b/lib/libvmmapi/vmmapi.c
@@ -1671,9 +1671,8 @@
}
int
-vm_restart_instruction(void *arg, int vcpu)
+vm_restart_instruction(struct vmctx *ctx, int vcpu)
{
- struct vmctx *ctx = arg;
return (ioctl(ctx->fd, VM_RESTART_INSTRUCTION, &vcpu));
}
diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -251,16 +251,18 @@
int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
struct vm_object **objptr);
vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
-void *vm_gpa_hold(struct vm *, int vcpuid, vm_paddr_t gpa, size_t len,
+void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
+ int prot, void **cookie);
+void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
int prot, void **cookie);
void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
int prot, void **cookie);
void vm_gpa_release(void *cookie);
bool vm_mem_allocated(struct vm *vm, int vcpuid, vm_paddr_t gpa);
-int vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval);
-int vm_set_register(struct vm *vm, int vcpu, int reg, uint64_t val);
-int vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
+int vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval);
+int vm_set_register(struct vcpu *vcpu, int reg, uint64_t val);
+int vm_get_seg_desc(struct vcpu *vcpu, int reg,
struct seg_desc *ret_desc);
int vm_set_seg_desc(struct vm *vm, int vcpu, int reg,
struct seg_desc *desc);
@@ -275,7 +277,7 @@
int vcpu_vcpuid(struct vcpu *vcpu);
struct vm *vcpu_vm(struct vcpu *vcpu);
struct vcpu *vm_vcpu(struct vm *vm, int cpu);
-struct vlapic *vm_lapic(struct vm *vm, int cpu);
+struct vlapic *vm_lapic(struct vcpu *vcpu);
struct vioapic *vm_ioapic(struct vm *vm);
struct vhpet *vm_hpet(struct vm *vm);
int vm_get_capability(struct vm *vm, int vcpu, int type, int *val);
@@ -286,6 +288,7 @@
int vm_activate_cpu(struct vm *vm, int vcpu);
int vm_suspend_cpu(struct vm *vm, int vcpu);
int vm_resume_cpu(struct vm *vm, int vcpu);
+int vm_restart_instruction(struct vcpu *vcpu);
struct vm_exit *vm_exitinfo(struct vm *vm, int vcpuid);
void vm_exit_suspended(struct vm *vm, int vcpuid, uint64_t rip);
void vm_exit_debug(struct vm *vm, int vcpuid, uint64_t rip);
@@ -360,12 +363,12 @@
int vcpu_set_state(struct vm *vm, int vcpu, enum vcpu_state state,
bool from_idle);
-enum vcpu_state vcpu_get_state(struct vm *vm, int vcpu, int *hostcpu);
+enum vcpu_state vcpu_get_state(struct vcpu *vcpu, int *hostcpu);
static int __inline
vcpu_is_running(struct vm *vm, int vcpu, int *hostcpu)
{
- return (vcpu_get_state(vm, vcpu, hostcpu) == VCPU_RUNNING);
+ return (vcpu_get_state(vm_vcpu(vm, vcpu), hostcpu) == VCPU_RUNNING);
}
#ifdef _SYS_PROC_H_
@@ -398,7 +401,7 @@
* This function should only be called in the context of the thread that is
* executing this vcpu.
*/
-int vm_inject_exception(struct vm *vm, int vcpuid, int vector, int err_valid,
+int vm_inject_exception(struct vcpu *vcpu, int vector, int err_valid,
uint32_t errcode, int restart_instruction);
/*
@@ -460,7 +463,7 @@
* the return value is 0. The 'copyinfo[]' resources should be freed by calling
* 'vm_copy_teardown()' after the copy is done.
*/
-int vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+int vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
int num_copyinfo, int *is_fault);
void vm_copy_teardown(struct vm_copyinfo *copyinfo, int num_copyinfo);
@@ -753,6 +756,36 @@
};
/* APIs to inject faults into the guest */
+#ifdef _KERNEL
+void vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid,
+ int errcode);
+
+static __inline void
+vm_inject_ud(struct vcpu *vcpu)
+{
+ vm_inject_fault(vcpu, IDT_UD, 0, 0);
+}
+
+static __inline void
+vm_inject_gp(struct vcpu *vcpu)
+{
+ vm_inject_fault(vcpu, IDT_GP, 1, 0);
+}
+
+static __inline void
+vm_inject_ac(struct vcpu *vcpu, int errcode)
+{
+ vm_inject_fault(vcpu, IDT_AC, 1, errcode);
+}
+
+static __inline void
+vm_inject_ss(struct vcpu *vcpu, int errcode)
+{
+ vm_inject_fault(vcpu, IDT_SS, 1, errcode);
+}
+
+void vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2);
+#else
void vm_inject_fault(void *vm, int vcpuid, int vector, int errcode_valid,
int errcode);
@@ -781,7 +814,6 @@
}
void vm_inject_pf(void *vm, int vcpuid, int error_code, uint64_t cr2);
-
-int vm_restart_instruction(void *vm, int vcpuid);
+#endif
#endif /* _VMM_H_ */
diff --git a/sys/amd64/include/vmm_instruction_emul.h b/sys/amd64/include/vmm_instruction_emul.h
--- a/sys/amd64/include/vmm_instruction_emul.h
+++ b/sys/amd64/include/vmm_instruction_emul.h
@@ -33,13 +33,26 @@
#include <sys/mman.h>
+/*
+ * Allow for different arguments to identify vCPUs in userspace vs the
+ * kernel. Eventually we should add struct vcpu in userland and
+ * always use the kernel arguments removing these macros.
+ */
+#ifdef _KERNEL
+#define VCPU_DECL struct vcpu *vcpu
+#define VCPU_ARGS vcpu
+#else
+#define VCPU_DECL void *vm, int vcpuid
+#define VCPU_ARGS vm, vcpuid
+#endif
+
/*
* Callback functions to read and write memory regions.
*/
-typedef int (*mem_region_read_t)(void *vm, int cpuid, uint64_t gpa,
+typedef int (*mem_region_read_t)(VCPU_DECL, uint64_t gpa,
uint64_t *rval, int rsize, void *arg);
-typedef int (*mem_region_write_t)(void *vm, int cpuid, uint64_t gpa,
+typedef int (*mem_region_write_t)(VCPU_DECL, uint64_t gpa,
uint64_t wval, int wsize, void *arg);
/*
@@ -53,11 +66,11 @@
* 'struct vmctx *' when called from user context.
* s
*/
-int vmm_emulate_instruction(void *vm, int cpuid, uint64_t gpa, struct vie *vie,
+int vmm_emulate_instruction(VCPU_DECL, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t mrr,
mem_region_write_t mrw, void *mrarg);
-int vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
+int vie_update_register(VCPU_DECL, enum vm_reg_name reg,
uint64_t val, int size);
/*
@@ -81,7 +94,7 @@
*
* 'vie' must be initialized before calling 'vmm_fetch_instruction()'
*/
-int vmm_fetch_instruction(struct vm *vm, int cpuid,
+int vmm_fetch_instruction(struct vcpu *vcpu,
struct vm_guest_paging *guest_paging,
uint64_t rip, int inst_length, struct vie *vie,
int *is_fault);
@@ -94,14 +107,14 @@
* 0 1 An exception was injected into the guest
* EFAULT N/A An unrecoverable hypervisor error occurred
*/
-int vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+int vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
/*
* Like vm_gla2gpa, but no exceptions are injected into the guest and
* PTEs are not changed.
*/
-int vm_gla2gpa_nofault(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
#endif /* _KERNEL */
@@ -121,7 +134,7 @@
*/
#ifdef _KERNEL
#define VIE_INVALID_GLA (1UL << 63) /* a non-canonical address */
-int vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
+int vmm_decode_instruction(struct vcpu *vcpu, uint64_t gla,
enum vm_cpu_mode cpu_mode, int csd, struct vie *vie);
#else /* !_KERNEL */
/*
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -973,12 +973,10 @@
static void
svm_update_virqinfo(struct svm_vcpu *vcpu)
{
- struct vm *vm;
struct vlapic *vlapic;
struct vmcb_ctrl *ctrl;
- vm = vcpu->sc->vm;
- vlapic = vm_lapic(vm, vcpu->vcpuid);
+ vlapic = vm_lapic(vcpu->vcpu);
ctrl = svm_get_vmcb_ctrl(vcpu);
/* Update %cr8 in the emulated vlapic */
@@ -1210,7 +1208,7 @@
KASSERT(error == 0, ("%s: error %d updating efer", __func__, error));
return (0);
gpf:
- vm_inject_gp(sc->vm, vcpuid);
+ vm_inject_gp(vcpu->vcpu);
return (0);
}
@@ -1459,7 +1457,7 @@
/* Reflect the exception back into the guest */
SVM_CTR2(vcpu, "Reflecting exception "
"%d/%#x into the guest", idtvec, (int)info1);
- error = vm_inject_exception(svm_sc->vm, vcpuid, idtvec,
+ error = vm_inject_exception(vcpu->vcpu, idtvec,
errcode_valid, info1, 0);
KASSERT(error == 0, ("%s: vm_inject_exception error %d",
__func__, error));
@@ -1556,7 +1554,7 @@
case VMCB_EXIT_SKINIT:
case VMCB_EXIT_ICEBP:
case VMCB_EXIT_INVLPGA:
- vm_inject_ud(svm_sc->vm, vcpuid);
+ vm_inject_ud(vcpu->vcpu);
handled = 1;
break;
case VMCB_EXIT_INVD:
@@ -2017,7 +2015,7 @@
state = svm_get_vmcb_state(vcpu);
ctrl = svm_get_vmcb_ctrl(vcpu);
vmexit = vm_exitinfo(vm, vcpuid);
- vlapic = vm_lapic(vm, vcpuid);
+ vlapic = vm_lapic(vcpu->vcpu);
gctx = svm_get_guest_regctx(vcpu);
vmcb_pa = vcpu->vmcb_pa;
@@ -2346,7 +2344,7 @@
error = EINVAL;
break;
case VM_CAP_IPI_EXIT:
- vlapic = vm_lapic(vcpu->sc->vm, vcpu->vcpuid);
+ vlapic = vm_lapic(vcpu->vcpu);
vlapic->ipi_exit = val;
break;
default:
@@ -2379,7 +2377,7 @@
*retval = 1; /* unrestricted guest is always enabled */
break;
case VM_CAP_IPI_EXIT:
- vlapic = vm_lapic(vcpu->sc->vm, vcpu->vcpuid);
+ vlapic = vm_lapic(vcpu->vcpu);
*retval = vlapic->ipi_exit;
break;
default:
diff --git a/sys/amd64/vmm/amd/svm_msr.c b/sys/amd64/vmm/amd/svm_msr.c
--- a/sys/amd64/vmm/amd/svm_msr.c
+++ b/sys/amd64/vmm/amd/svm_msr.c
@@ -125,7 +125,7 @@
case MSR_MTRR64kBase:
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
if (vm_rdmtrr(&vcpu->mtrr, num, result) != 0) {
- vm_inject_gp(sc->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
}
break;
case MSR_SYSCFG:
@@ -158,7 +158,7 @@
case MSR_MTRR64kBase:
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
- vm_inject_gp(sc->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
}
break;
case MSR_SYSCFG:
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -1692,31 +1692,31 @@
/* Only xcr0 is supported. */
if (vmxctx->guest_rcx != 0) {
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
return (HANDLED);
}
/* We only handle xcr0 if both the host and guest have XSAVE enabled. */
if (!limits->xsave_enabled || !(vmcs_read(VMCS_GUEST_CR4) & CR4_XSAVE)) {
- vm_inject_ud(vmx->vm, vcpu->vcpuid);
+ vm_inject_ud(vcpu->vcpu);
return (HANDLED);
}
xcrval = vmxctx->guest_rdx << 32 | (vmxctx->guest_rax & 0xffffffff);
if ((xcrval & ~limits->xcr0_allowed) != 0) {
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
return (HANDLED);
}
if (!(xcrval & XFEATURE_ENABLED_X87)) {
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
return (HANDLED);
}
/* AVX (YMM_Hi128) requires SSE. */
if (xcrval & XFEATURE_ENABLED_AVX &&
(xcrval & XFEATURE_AVX) != XFEATURE_AVX) {
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
return (HANDLED);
}
@@ -1727,7 +1727,7 @@
if (xcrval & XFEATURE_AVX512 &&
(xcrval & (XFEATURE_AVX512 | XFEATURE_AVX)) !=
(XFEATURE_AVX512 | XFEATURE_AVX)) {
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
return (HANDLED);
}
@@ -1737,7 +1737,7 @@
*/
if (((xcrval & XFEATURE_ENABLED_BNDREGS) != 0) !=
((xcrval & XFEATURE_ENABLED_BNDCSR) != 0)) {
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
return (HANDLED);
}
@@ -1927,7 +1927,7 @@
return (UNHANDLED);
}
- vlapic = vm_lapic(vmx->vm, vcpu->vcpuid);
+ vlapic = vm_lapic(vcpu->vcpu);
regnum = (exitqual >> 8) & 0xf;
if (exitqual & 0x10) {
cr8 = vlapic_get_cr8(vlapic);
@@ -2721,7 +2721,7 @@
"the guest", intr_vec, errcode);
SDT_PROBE5(vmm, vmx, exit, exception,
vmx, vcpuid, vmexit, intr_vec, errcode);
- error = vm_inject_exception(vmx->vm, vcpuid, intr_vec,
+ error = vm_inject_exception(vcpu->vcpu, intr_vec,
errcode_valid, errcode, 0);
KASSERT(error == 0, ("%s: vm_inject_exception error %d",
__func__, error));
@@ -2777,7 +2777,7 @@
* pointing to the next instruction.
*/
vmexit->inst_length = 0;
- vlapic = vm_lapic(vmx->vm, vcpuid);
+ vlapic = vm_lapic(vcpu->vcpu);
SDT_PROBE4(vmm, vmx, exit, apicwrite,
vmx, vcpuid, vmexit, vlapic);
handled = vmx_handle_apic_write(vcpu, vlapic, qual);
@@ -2795,7 +2795,7 @@
vmexit->exitcode = VM_EXITCODE_MWAIT;
break;
case EXIT_REASON_TPR:
- vlapic = vm_lapic(vmx->vm, vcpuid);
+ vlapic = vm_lapic(vcpu->vcpu);
vlapic_sync_tpr(vlapic);
vmexit->inst_length = 0;
handled = HANDLED;
@@ -3030,7 +3030,7 @@
vcpuid = vcpu->vcpuid;
vmcs = vcpu->vmcs;
vmxctx = &vcpu->ctx;
- vlapic = vm_lapic(vm, vcpuid);
+ vlapic = vm_lapic(vcpu->vcpu);
vmexit = vm_exitinfo(vm, vcpuid);
launched = 0;
@@ -3644,7 +3644,7 @@
case VM_CAP_IPI_EXIT:
retval = 0;
- vlapic = vm_lapic(vcpu->vmx->vm, vcpu->vcpuid);
+ vlapic = vm_lapic(vcpu->vcpu);
vlapic->ipi_exit = val;
break;
default:
diff --git a/sys/amd64/vmm/intel/vmx_msr.c b/sys/amd64/vmm/intel/vmx_msr.c
--- a/sys/amd64/vmm/intel/vmx_msr.c
+++ b/sys/amd64/vmm/intel/vmx_msr.c
@@ -423,7 +423,7 @@
case MSR_MTRR64kBase:
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
if (vm_rdmtrr(&vcpu->mtrr, num, val) != 0) {
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
}
break;
case MSR_IA32_MISC_ENABLE:
@@ -466,7 +466,7 @@
case MSR_MTRR64kBase:
case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1:
if (vm_wrmtrr(&vcpu->mtrr, num, val) != 0) {
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
}
break;
case MSR_IA32_MISC_ENABLE:
@@ -493,7 +493,7 @@
if (pat_valid(val))
vcpu->guest_msrs[IDX_MSR_PAT] = val;
else
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
break;
case MSR_TSC:
error = vmx_set_tsc_offset(vmx, vcpu, val - rdtsc());
@@ -507,7 +507,7 @@
*/
vcpu->guest_msrs[IDX_MSR_TSC_AUX] = val;
else
- vm_inject_gp(vmx->vm, vcpu->vcpuid);
+ vm_inject_gp(vcpu->vcpu);
break;
default:
error = EINVAL;
diff --git a/sys/amd64/vmm/io/vhpet.h b/sys/amd64/vmm/io/vhpet.h
--- a/sys/amd64/vmm/io/vhpet.h
+++ b/sys/amd64/vmm/io/vhpet.h
@@ -40,9 +40,9 @@
struct vhpet *vhpet_init(struct vm *vm);
void vhpet_cleanup(struct vhpet *vhpet);
-int vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val,
+int vhpet_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t val,
int size, void *arg);
-int vhpet_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *val,
+int vhpet_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *val,
int size, void *arg);
int vhpet_getcap(struct vm_hpet_cap *cap);
#ifdef BHYVE_SNAPSHOT
diff --git a/sys/amd64/vmm/io/vhpet.c b/sys/amd64/vmm/io/vhpet.c
--- a/sys/amd64/vmm/io/vhpet.c
+++ b/sys/amd64/vmm/io/vhpet.c
@@ -472,7 +472,7 @@
}
int
-vhpet_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t val, int size,
+vhpet_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t val, int size,
void *arg)
{
struct vhpet *vhpet;
@@ -481,7 +481,7 @@
sbintime_t now, *nowptr;
int i, offset;
- vhpet = vm_hpet(vm);
+ vhpet = vm_hpet(vcpu_vm(vcpu));
offset = gpa - VHPET_BASE;
VHPET_LOCK(vhpet);
@@ -622,14 +622,14 @@
}
int
-vhpet_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *rval, int size,
+vhpet_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size,
void *arg)
{
int i, offset;
struct vhpet *vhpet;
uint64_t data;
- vhpet = vm_hpet(vm);
+ vhpet = vm_hpet(vcpu_vm(vcpu));
offset = gpa - VHPET_BASE;
VHPET_LOCK(vhpet);
diff --git a/sys/amd64/vmm/io/vioapic.h b/sys/amd64/vmm/io/vioapic.h
--- a/sys/amd64/vmm/io/vioapic.h
+++ b/sys/amd64/vmm/io/vioapic.h
@@ -45,9 +45,9 @@
int vioapic_deassert_irq(struct vm *vm, int irq);
int vioapic_pulse_irq(struct vm *vm, int irq);
-int vioapic_mmio_write(void *vm, int vcpuid, uint64_t gpa,
+int vioapic_mmio_write(struct vcpu *vcpu, uint64_t gpa,
uint64_t wval, int size, void *arg);
-int vioapic_mmio_read(void *vm, int vcpuid, uint64_t gpa,
+int vioapic_mmio_read(struct vcpu *vcpu, uint64_t gpa,
uint64_t *rval, int size, void *arg);
int vioapic_pincount(struct vm *vm);
diff --git a/sys/amd64/vmm/io/vioapic.c b/sys/amd64/vmm/io/vioapic.c
--- a/sys/amd64/vmm/io/vioapic.c
+++ b/sys/amd64/vmm/io/vioapic.c
@@ -245,7 +245,7 @@
int delmode, pin, vector;
bool level, phys;
- vlapic = vm_lapic(vm, vcpuid);
+ vlapic = vm_lapic(vm_vcpu(vm, vcpuid));
vioapic = vm_ioapic(vm);
VIOAPIC_LOCK(vioapic);
@@ -277,7 +277,7 @@
}
static uint32_t
-vioapic_read(struct vioapic *vioapic, int vcpuid, uint32_t addr)
+vioapic_read(struct vioapic *vioapic, struct vcpu *vcpu, uint32_t addr)
{
int regnum, pin, rshift;
@@ -312,13 +312,15 @@
}
static void
-vioapic_write(struct vioapic *vioapic, int vcpuid, uint32_t addr, uint32_t data)
+vioapic_write(struct vioapic *vioapic, struct vcpu *vcpu, uint32_t addr,
+ uint32_t data)
{
uint64_t data64, mask64;
uint64_t last, changed;
- int regnum, pin, lshift;
+ int regnum, pin, lshift, vcpuid;
cpuset_t allvcpus;
+ vcpuid = vcpu_vcpuid(vcpu);
regnum = addr & 0xff;
switch (regnum) {
case IOAPIC_ID:
@@ -392,7 +394,7 @@
}
static int
-vioapic_mmio_rw(struct vioapic *vioapic, int vcpuid, uint64_t gpa,
+vioapic_mmio_rw(struct vioapic *vioapic, struct vcpu *vcpu, uint64_t gpa,
uint64_t *data, int size, bool doread)
{
uint64_t offset;
@@ -417,10 +419,10 @@
vioapic->ioregsel = *data;
} else {
if (doread) {
- *data = vioapic_read(vioapic, vcpuid,
+ *data = vioapic_read(vioapic, vcpu,
vioapic->ioregsel);
} else {
- vioapic_write(vioapic, vcpuid, vioapic->ioregsel,
+ vioapic_write(vioapic, vcpu, vioapic->ioregsel,
*data);
}
}
@@ -430,26 +432,26 @@
}
int
-vioapic_mmio_read(void *vm, int vcpuid, uint64_t gpa, uint64_t *rval,
+vioapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval,
int size, void *arg)
{
int error;
struct vioapic *vioapic;
- vioapic = vm_ioapic(vm);
- error = vioapic_mmio_rw(vioapic, vcpuid, gpa, rval, size, true);
+ vioapic = vm_ioapic(vcpu_vm(vcpu));
+ error = vioapic_mmio_rw(vioapic, vcpu, gpa, rval, size, true);
return (error);
}
int
-vioapic_mmio_write(void *vm, int vcpuid, uint64_t gpa, uint64_t wval,
+vioapic_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval,
int size, void *arg)
{
int error;
struct vioapic *vioapic;
- vioapic = vm_ioapic(vm);
- error = vioapic_mmio_rw(vioapic, vcpuid, gpa, &wval, size, false);
+ vioapic = vm_ioapic(vcpu_vm(vcpu));
+ error = vioapic_mmio_rw(vioapic, vcpu, gpa, &wval, size, false);
return (error);
}
diff --git a/sys/amd64/vmm/io/vlapic.h b/sys/amd64/vmm/io/vlapic.h
--- a/sys/amd64/vmm/io/vlapic.h
+++ b/sys/amd64/vmm/io/vlapic.h
@@ -79,7 +79,7 @@
uint64_t vlapic_get_apicbase(struct vlapic *vlapic);
int vlapic_set_apicbase(struct vlapic *vlapic, uint64_t val);
-void vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state s);
+void vlapic_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state s);
bool vlapic_enabled(struct vlapic *vlapic);
void vlapic_deliver_intr(struct vm *vm, bool level, uint32_t dest, bool phys,
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -867,7 +867,7 @@
CPU_ZERO(dmask);
amask = vm_active_cpus(vm);
CPU_FOREACH_ISSET(vcpuid, &amask) {
- vlapic = vm_lapic(vm, vcpuid);
+ vlapic = vm_lapic(vm_vcpu(vm, vcpuid));
dfr = vlapic->apic_page->dfr;
ldr = vlapic->apic_page->ldr;
@@ -935,7 +935,7 @@
uint8_t tpr;
if (val & ~0xf) {
- vm_inject_gp(vlapic->vm, vlapic->vcpuid);
+ vm_inject_gp(vlapic->vcpu);
return;
}
@@ -1131,7 +1131,7 @@
* requires that the boot state is set to SIPI
* here.
*/
- vlapic2 = vm_lapic(vlapic->vm, i);
+ vlapic2 = vm_lapic(vm_vcpu(vlapic->vm, i));
vlapic2->boot_state = BS_SIPI;
break;
}
@@ -1155,7 +1155,7 @@
/*
* Ignore SIPIs in any state other than wait-for-SIPI
*/
- vlapic2 = vm_lapic(vlapic->vm, i);
+ vlapic2 = vm_lapic(vm_vcpu(vlapic->vm, i));
if (vlapic2->boot_state != BS_SIPI)
break;
vlapic2->boot_state = BS_RUNNING;
@@ -1170,7 +1170,7 @@
}
CPU_FOREACH_ISSET(i, &dmask) {
- vlapic2 = vm_lapic(vlapic->vm, i);
+ vlapic2 = vm_lapic(vm_vcpu(vlapic->vm, i));
/*
* Ignore SIPIs in any state other than wait-for-SIPI
@@ -1202,7 +1202,7 @@
static void
vlapic_handle_init(struct vm *vm, int vcpuid, void *arg)
{
- struct vlapic *vlapic = vm_lapic(vm, vcpuid);
+ struct vlapic *vlapic = vm_lapic(vm_vcpu(vm, vcpuid));
vlapic_reset(vlapic);
@@ -1659,12 +1659,12 @@
}
void
-vlapic_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
+vlapic_set_x2apic_state(struct vcpu *vcpu, enum x2apic_state state)
{
struct vlapic *vlapic;
struct LAPIC *lapic;
- vlapic = vm_lapic(vm, vcpuid);
+ vlapic = vm_lapic(vcpu);
if (state == X2APIC_DISABLED)
vlapic->msr_apicbase &= ~APICBASE_X2APIC;
@@ -1866,7 +1866,7 @@
maxcpus = vm_get_maxcpus(vm);
for (i = 0; i < maxcpus; i++) {
- vlapic = vm_lapic(vm, i);
+ vlapic = vm_lapic(vm_vcpu(vm, i));
/* snapshot the page first; timer period depends on icr_timer */
lapic = vlapic->apic_page;
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -687,7 +687,7 @@
#ifdef INVARIANTS
int hostcpu, state;
- state = vcpu_get_state(vm, vcpuid, &hostcpu);
+ state = vcpu_get_state(vm_vcpu(vm, vcpuid), &hostcpu);
KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
#endif
@@ -1064,7 +1064,7 @@
}
void *
-vm_gpa_hold(struct vm *vm, int vcpuid, vm_paddr_t gpa, size_t len, int reqprot,
+vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
void **cookie)
{
#ifdef INVARIANTS
@@ -1072,11 +1072,11 @@
* The current vcpu should be frozen to ensure 'vm_memmap[]'
* stability.
*/
- int state = vcpu_get_state(vm, vcpuid, NULL);
+ int state = vcpu_get_state(vcpu, NULL);
KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
__func__, state));
#endif
- return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
+ return (_vm_gpa_hold(vcpu->vm, gpa, len, reqprot, cookie));
}
void *
@@ -1091,7 +1091,7 @@
*/
int state;
for (int i = 0; i < vm->maxcpus; i++) {
- state = vcpu_get_state(vm, i, NULL);
+ state = vcpu_get_state(vm_vcpu(vm, i), NULL);
KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
__func__, state));
}
@@ -1108,37 +1108,29 @@
}
int
-vm_get_register(struct vm *vm, int vcpu, int reg, uint64_t *retval)
+vm_get_register(struct vcpu *vcpu, int reg, uint64_t *retval)
{
- if (vcpu < 0 || vcpu >= vm->maxcpus)
- return (EINVAL);
-
if (reg >= VM_REG_LAST)
return (EINVAL);
- return (vmmops_getreg(vcpu_cookie(vm, vcpu), reg, retval));
+ return (vmmops_getreg(vcpu->cookie, reg, retval));
}
int
-vm_set_register(struct vm *vm, int vcpuid, int reg, uint64_t val)
+vm_set_register(struct vcpu *vcpu, int reg, uint64_t val)
{
- struct vcpu *vcpu;
int error;
- if (vcpuid < 0 || vcpuid >= vm->maxcpus)
- return (EINVAL);
-
if (reg >= VM_REG_LAST)
return (EINVAL);
- vcpu = &vm->vcpu[vcpuid];
error = vmmops_setreg(vcpu->cookie, reg, val);
if (error || reg != VM_REG_GUEST_RIP)
return (error);
/* Set 'nextrip' to match the value of %rip */
- VCPU_CTR1(vm, vcpuid, "Setting nextrip to %#lx", val);
+ VMM_CTR1(vcpu, "Setting nextrip to %#lx", val);
vcpu->nextrip = val;
return (0);
}
@@ -1176,17 +1168,13 @@
}
int
-vm_get_seg_desc(struct vm *vm, int vcpu, int reg,
- struct seg_desc *desc)
+vm_get_seg_desc(struct vcpu *vcpu, int reg, struct seg_desc *desc)
{
- if (vcpu < 0 || vcpu >= vm->maxcpus)
- return (EINVAL);
-
if (!is_segment_register(reg) && !is_descriptor_table(reg))
return (EINVAL);
- return (vmmops_getdesc(vcpu_cookie(vm, vcpu), reg, desc));
+ return (vmmops_getdesc(vcpu->cookie, reg, desc));
}
int
@@ -1566,8 +1554,8 @@
/* Fetch, decode and emulate the faulting instruction */
if (vie->num_valid == 0) {
- error = vmm_fetch_instruction(vm, vcpuid, paging, vme->rip +
- cs_base, VIE_INST_SIZE, vie, &fault);
+ error = vmm_fetch_instruction(vcpu, paging, vme->rip + cs_base,
+ VIE_INST_SIZE, vie, &fault);
} else {
/*
* The instruction bytes have already been copied into 'vie'
@@ -1577,7 +1565,7 @@
if (error || fault)
return (error);
- if (vmm_decode_instruction(vm, vcpuid, gla, cpu_mode, cs_d, vie) != 0) {
+ if (vmm_decode_instruction(vcpu, gla, cpu_mode, cs_d, vie) != 0) {
VCPU_CTR1(vm, vcpuid, "Error decoding instruction at %#lx",
vme->rip + cs_base);
*retu = true; /* dump instruction bytes in userspace */
@@ -1607,8 +1595,8 @@
return (0);
}
- error = vmm_emulate_instruction(vm, vcpuid, gpa, vie, paging,
- mread, mwrite, retu);
+ error = vmm_emulate_instruction(vcpu, gpa, vie, paging, mread, mwrite,
+ retu);
return (error);
}
@@ -1862,7 +1850,7 @@
case VM_EXITCODE_MONITOR:
case VM_EXITCODE_MWAIT:
case VM_EXITCODE_VMINSN:
- vm_inject_ud(vm, vcpuid);
+ vm_inject_ud(vcpu);
break;
default:
retu = true; /* handled in userland */
@@ -1891,20 +1879,13 @@
}
int
-vm_restart_instruction(void *arg, int vcpuid)
+vm_restart_instruction(struct vcpu *vcpu)
{
- struct vm *vm;
- struct vcpu *vcpu;
enum vcpu_state state;
uint64_t rip;
int error __diagused;
- vm = arg;
- if (vcpuid < 0 || vcpuid >= vm->maxcpus)
- return (EINVAL);
-
- vcpu = &vm->vcpu[vcpuid];
- state = vcpu_get_state(vm, vcpuid, NULL);
+ state = vcpu_get_state(vcpu, NULL);
if (state == VCPU_RUNNING) {
/*
* When a vcpu is "running" the next instruction is determined
@@ -1913,7 +1894,7 @@
* instruction to be restarted.
*/
vcpu->exitinfo.inst_length = 0;
- VCPU_CTR1(vm, vcpuid, "restarting instruction at %#lx by "
+ VMM_CTR1(vcpu, "restarting instruction at %#lx by "
"setting inst_length to zero", vcpu->exitinfo.rip);
} else if (state == VCPU_FROZEN) {
/*
@@ -1922,9 +1903,9 @@
* instruction. Thus instruction restart is achieved by setting
* 'nextrip' to the vcpu's %rip.
*/
- error = vm_get_register(vm, vcpuid, VM_REG_GUEST_RIP, &rip);
+ error = vm_get_register(vcpu, VM_REG_GUEST_RIP, &rip);
KASSERT(!error, ("%s: error %d getting rip", __func__, error));
- VCPU_CTR2(vm, vcpuid, "restarting instruction by updating "
+ VMM_CTR2(vcpu, "restarting instruction by updating "
"nextrip from %#lx to %#lx", vcpu->nextrip, rip);
vcpu->nextrip = rip;
} else {
@@ -2109,7 +2090,7 @@
}
if (valid) {
- VCPU_CTR4(vm, vcpuid, "%s: info1(%#lx), info2(%#lx), "
+ VMM_CTR4(vcpu, "%s: info1(%#lx), info2(%#lx), "
"retinfo(%#lx)", __func__, info1, info2, *retinfo);
}
@@ -2131,16 +2112,12 @@
}
int
-vm_inject_exception(struct vm *vm, int vcpuid, int vector, int errcode_valid,
+vm_inject_exception(struct vcpu *vcpu, int vector, int errcode_valid,
uint32_t errcode, int restart_instruction)
{
- struct vcpu *vcpu;
uint64_t regval;
int error __diagused;
- if (vcpuid < 0 || vcpuid >= vm->maxcpus)
- return (EINVAL);
-
if (vector < 0 || vector >= 32)
return (EINVAL);
@@ -2152,10 +2129,8 @@
if (vector == IDT_DF)
return (EINVAL);
- vcpu = &vm->vcpu[vcpuid];
-
if (vcpu->exception_pending) {
- VCPU_CTR2(vm, vcpuid, "Unable to inject exception %d due to "
+ VMM_CTR2(vcpu, "Unable to inject exception %d due to "
"pending exception %d", vector, vcpu->exc_vector);
return (EBUSY);
}
@@ -2164,7 +2139,7 @@
/*
* Exceptions don't deliver an error code in real mode.
*/
- error = vm_get_register(vm, vcpuid, VM_REG_GUEST_CR0, ®val);
+ error = vm_get_register(vcpu, VM_REG_GUEST_CR0, ®val);
KASSERT(!error, ("%s: error %d getting CR0", __func__, error));
if (!(regval & CR0_PE))
errcode_valid = 0;
@@ -2176,50 +2151,45 @@
* Event blocking by "STI" or "MOV SS" is cleared after guest executes
* one instruction or incurs an exception.
*/
- error = vm_set_register(vm, vcpuid, VM_REG_GUEST_INTR_SHADOW, 0);
+ error = vm_set_register(vcpu, VM_REG_GUEST_INTR_SHADOW, 0);
KASSERT(error == 0, ("%s: error %d clearing interrupt shadow",
__func__, error));
if (restart_instruction)
- vm_restart_instruction(vm, vcpuid);
+ vm_restart_instruction(vcpu);
vcpu->exception_pending = 1;
vcpu->exc_vector = vector;
vcpu->exc_errcode = errcode;
vcpu->exc_errcode_valid = errcode_valid;
- VCPU_CTR1(vm, vcpuid, "Exception %d pending", vector);
+ VMM_CTR1(vcpu, "Exception %d pending", vector);
return (0);
}
void
-vm_inject_fault(void *vmarg, int vcpuid, int vector, int errcode_valid,
- int errcode)
+vm_inject_fault(struct vcpu *vcpu, int vector, int errcode_valid, int errcode)
{
- struct vm *vm;
int error __diagused, restart_instruction;
- vm = vmarg;
restart_instruction = 1;
- error = vm_inject_exception(vm, vcpuid, vector, errcode_valid,
+ error = vm_inject_exception(vcpu, vector, errcode_valid,
errcode, restart_instruction);
KASSERT(error == 0, ("vm_inject_exception error %d", error));
}
void
-vm_inject_pf(void *vmarg, int vcpuid, int error_code, uint64_t cr2)
+vm_inject_pf(struct vcpu *vcpu, int error_code, uint64_t cr2)
{
- struct vm *vm;
int error __diagused;
- vm = vmarg;
- VCPU_CTR2(vm, vcpuid, "Injecting page fault: error_code %#x, cr2 %#lx",
+ VMM_CTR2(vcpu, "Injecting page fault: error_code %#x, cr2 %#lx",
error_code, cr2);
- error = vm_set_register(vm, vcpuid, VM_REG_GUEST_CR2, cr2);
+ error = vm_set_register(vcpu, VM_REG_GUEST_CR2, cr2);
KASSERT(error == 0, ("vm_set_register(cr2) error %d", error));
- vm_inject_fault(vm, vcpuid, IDT_PF, 1, error_code);
+ vm_inject_fault(vcpu, IDT_PF, 1, error_code);
}
static VMM_STAT(VCPU_NMI_COUNT, "number of NMIs delivered to vcpu");
@@ -2359,9 +2329,9 @@
}
struct vlapic *
-vm_lapic(struct vm *vm, int cpu)
+vm_lapic(struct vcpu *vcpu)
{
- return (vm->vcpu[cpu].vlapic);
+ return (vcpu->vlapic);
}
struct vioapic *
@@ -2447,16 +2417,10 @@
}
enum vcpu_state
-vcpu_get_state(struct vm *vm, int vcpuid, int *hostcpu)
+vcpu_get_state(struct vcpu *vcpu, int *hostcpu)
{
- struct vcpu *vcpu;
enum vcpu_state state;
- if (vcpuid < 0 || vcpuid >= vm->maxcpus)
- panic("vm_get_run_state: invalid vcpuid %d", vcpuid);
-
- vcpu = &vm->vcpu[vcpuid];
-
vcpu_lock(vcpu);
state = vcpu->state;
if (hostcpu != NULL)
@@ -2572,15 +2536,18 @@
int
vm_set_x2apic_state(struct vm *vm, int vcpuid, enum x2apic_state state)
{
+ struct vcpu *vcpu;
+
if (vcpuid < 0 || vcpuid >= vm->maxcpus)
return (EINVAL);
if (state >= X2APIC_STATE_LAST)
return (EINVAL);
- vm->vcpu[vcpuid].x2apic_state = state;
+ vcpu = &vm->vcpu[vcpuid];
+ vcpu->x2apic_state = state;
- vlapic_set_x2apic_state(vm, vcpuid, state);
+ vlapic_set_x2apic_state(vcpu, state);
return (0);
}
@@ -2755,7 +2722,7 @@
}
int
-vm_copy_setup(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+vm_copy_setup(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, size_t len, int prot, struct vm_copyinfo *copyinfo,
int num_copyinfo, int *fault)
{
@@ -2770,7 +2737,7 @@
remaining = len;
while (remaining > 0) {
KASSERT(nused < num_copyinfo, ("insufficient vm_copyinfo"));
- error = vm_gla2gpa(vm, vcpuid, paging, gla, prot, &gpa, fault);
+ error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault);
if (error || *fault)
return (error);
off = gpa & PAGE_MASK;
@@ -2783,7 +2750,7 @@
}
for (idx = 0; idx < nused; idx++) {
- hva = vm_gpa_hold(vm, vcpuid, copyinfo[idx].gpa,
+ hva = vm_gpa_hold(vcpu, copyinfo[idx].gpa,
copyinfo[idx].len, prot, &cookie);
if (hva == NULL)
break;
diff --git a/sys/amd64/vmm/vmm_dev.c b/sys/amd64/vmm/vmm_dev.c
--- a/sys/amd64/vmm/vmm_dev.c
+++ b/sys/amd64/vmm/vmm_dev.c
@@ -140,7 +140,7 @@
{
enum vcpu_state state;
- state = vcpu_get_state(sc->vm, vcpu, NULL);
+ state = vcpu_get_state(vm_vcpu(sc->vm, vcpu), NULL);
if (state != VCPU_FROZEN) {
panic("vcpu %s(%d) has invalid state %d", vm_name(sc->vm),
vcpu, state);
@@ -218,6 +218,7 @@
vm_paddr_t gpa, maxaddr;
void *hpa, *cookie;
struct vmmdev_softc *sc;
+ struct vcpu *vcpu;
uint16_t lastcpu;
error = vmm_priv_check(curthread->td_ucred);
@@ -235,6 +236,7 @@
error = vcpu_lock_one(sc, lastcpu);
if (error)
return (error);
+ vcpu = vm_vcpu(sc->vm, lastcpu);
prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
maxaddr = vmm_sysmem_maxaddr(sc->vm);
@@ -251,8 +253,7 @@
* Since this device does not support lseek(2), dd(1) will
* read(2) blocks of data to simulate the lseek(2).
*/
- hpa = vm_gpa_hold(sc->vm, lastcpu, gpa, c,
- prot, &cookie);
+ hpa = vm_gpa_hold(vcpu, gpa, c, prot, &cookie);
if (hpa == NULL) {
if (uio->uio_rw == UIO_READ && gpa < maxaddr)
error = uiomove(__DECONST(void *, zero_region),
@@ -336,14 +337,14 @@
}
static int
-vm_get_register_set(struct vm *vm, int vcpu, unsigned int count, int *regnum,
+vm_get_register_set(struct vcpu *vcpu, unsigned int count, int *regnum,
uint64_t *regval)
{
int error, i;
error = 0;
for (i = 0; i < count; i++) {
- error = vm_get_register(vm, vcpu, regnum[i], ®val[i]);
+ error = vm_get_register(vcpu, regnum[i], ®val[i]);
if (error)
break;
}
@@ -351,14 +352,14 @@
}
static int
-vm_set_register_set(struct vm *vm, int vcpu, unsigned int count, int *regnum,
+vm_set_register_set(struct vcpu *vcpu, unsigned int count, int *regnum,
uint64_t *regval)
{
int error, i;
error = 0;
for (i = 0; i < count; i++) {
- error = vm_set_register(vm, vcpu, regnum[i], regval[i]);
+ error = vm_set_register(vcpu, regnum[i], regval[i]);
if (error)
break;
}
@@ -369,9 +370,10 @@
vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
struct thread *td)
{
- int error, vcpu, state_changed, size;
+ int error, vcpuid, state_changed, size;
cpuset_t *cpuset;
struct vmmdev_softc *sc;
+ struct vcpu *vcpu;
struct vm_register *vmreg;
struct vm_seg_desc *vmsegdesc;
struct vm_register_set *vmregset;
@@ -420,7 +422,8 @@
if (sc == NULL)
return (ENXIO);
- vcpu = -1;
+ vcpuid = -1;
+ vcpu = NULL;
state_changed = 0;
/*
@@ -450,11 +453,12 @@
* XXX fragile, handle with care
* Assumes that the first field of the ioctl data is the vcpu.
*/
- vcpu = *(int *)data;
- error = vcpu_lock_one(sc, vcpu);
+ vcpuid = *(int *)data;
+ error = vcpu_lock_one(sc, vcpuid);
if (error)
goto done;
state_changed = 1;
+ vcpu = vm_vcpu(sc->vm, vcpuid);
break;
case VM_MAP_PPTDEV_MMIO:
@@ -487,8 +491,8 @@
* Lock a vcpu to make sure that the memory map cannot be
* modified while it is being inspected.
*/
- vcpu = vm_get_maxcpus(sc->vm) - 1;
- error = vcpu_lock_one(sc, vcpu);
+ vcpuid = vm_get_maxcpus(sc->vm) - 1;
+ error = vcpu_lock_one(sc, vcpuid);
if (error)
goto done;
state_changed = 1;
@@ -577,7 +581,7 @@
break;
case VM_INJECT_EXCEPTION:
vmexc = (struct vm_exception *)data;
- error = vm_inject_exception(sc->vm, vmexc->cpuid,
+ error = vm_inject_exception(vcpu,
vmexc->vector, vmexc->error_code_valid, vmexc->error_code,
vmexc->restart_instruction);
break;
@@ -641,10 +645,10 @@
}
if (cmd == VM_SET_KERNEMU_DEV)
- error = mwrite(sc->vm, kernemu->vcpuid, kernemu->gpa,
+ error = mwrite(vcpu, kernemu->gpa,
kernemu->value, size, &arg);
else
- error = mread(sc->vm, kernemu->vcpuid, kernemu->gpa,
+ error = mread(vcpu, kernemu->gpa,
&kernemu->value, size, &arg);
break;
}
@@ -709,13 +713,11 @@
break;
case VM_GET_REGISTER:
vmreg = (struct vm_register *)data;
- error = vm_get_register(sc->vm, vmreg->cpuid, vmreg->regnum,
- &vmreg->regval);
+ error = vm_get_register(vcpu, vmreg->regnum, &vmreg->regval);
break;
case VM_SET_REGISTER:
vmreg = (struct vm_register *)data;
- error = vm_set_register(sc->vm, vmreg->cpuid, vmreg->regnum,
- vmreg->regval);
+ error = vm_set_register(vcpu, vmreg->regnum, vmreg->regval);
break;
case VM_SET_SEGMENT_DESCRIPTOR:
vmsegdesc = (struct vm_seg_desc *)data;
@@ -725,7 +727,7 @@
break;
case VM_GET_SEGMENT_DESCRIPTOR:
vmsegdesc = (struct vm_seg_desc *)data;
- error = vm_get_seg_desc(sc->vm, vmsegdesc->cpuid,
+ error = vm_get_seg_desc(vcpu,
vmsegdesc->regnum,
&vmsegdesc->desc);
break;
@@ -742,7 +744,7 @@
error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) *
vmregset->count);
if (error == 0)
- error = vm_get_register_set(sc->vm, vmregset->cpuid,
+ error = vm_get_register_set(vcpu,
vmregset->count, regnums, regvals);
if (error == 0)
error = copyout(regvals, vmregset->regvals,
@@ -766,7 +768,7 @@
error = copyin(vmregset->regvals, regvals,
sizeof(regvals[0]) * vmregset->count);
if (error == 0)
- error = vm_set_register_set(sc->vm, vmregset->cpuid,
+ error = vm_set_register_set(vcpu,
vmregset->count, regnums, regvals);
free(regvals, M_VMMDEV);
free(regnums, M_VMMDEV);
@@ -807,7 +809,7 @@
CTASSERT(PROT_WRITE == VM_PROT_WRITE);
CTASSERT(PROT_EXEC == VM_PROT_EXECUTE);
gg = (struct vm_gla2gpa *)data;
- error = vm_gla2gpa(sc->vm, gg->vcpuid, &gg->paging, gg->gla,
+ error = vm_gla2gpa(vcpu, &gg->paging, gg->gla,
gg->prot, &gg->gpa, &gg->fault);
KASSERT(error == 0 || error == EFAULT,
("%s: vm_gla2gpa unknown error %d", __func__, error));
@@ -815,8 +817,8 @@
}
case VM_GLA2GPA_NOFAULT:
gg = (struct vm_gla2gpa *)data;
- error = vm_gla2gpa_nofault(sc->vm, gg->vcpuid, &gg->paging,
- gg->gla, gg->prot, &gg->gpa, &gg->fault);
+ error = vm_gla2gpa_nofault(vcpu, &gg->paging, gg->gla,
+ gg->prot, &gg->gpa, &gg->fault);
KASSERT(error == 0 || error == EFAULT,
("%s: vm_gla2gpa unknown error %d", __func__, error));
break;
@@ -882,7 +884,7 @@
rtctime->secs = vrtc_get_time(sc->vm);
break;
case VM_RESTART_INSTRUCTION:
- error = vm_restart_instruction(sc->vm, vcpu);
+ error = vm_restart_instruction(vcpu);
break;
case VM_SET_TOPOLOGY:
topology = (struct vm_cpu_topology *)data;
@@ -910,7 +912,7 @@
}
if (state_changed == 1)
- vcpu_unlock_one(sc, vcpu);
+ vcpu_unlock_one(sc, vcpuid);
else if (state_changed == 2)
vcpu_unlock_all(sc);
diff --git a/sys/amd64/vmm/vmm_instruction_emul.c b/sys/amd64/vmm/vmm_instruction_emul.c
--- a/sys/amd64/vmm/vmm_instruction_emul.c
+++ b/sys/amd64/vmm/vmm_instruction_emul.c
@@ -290,11 +290,11 @@
};
static int
-vie_read_register(void *vm, int vcpuid, enum vm_reg_name reg, uint64_t *rval)
+vie_read_register(VCPU_DECL, enum vm_reg_name reg, uint64_t *rval)
{
int error;
- error = vm_get_register(vm, vcpuid, reg, rval);
+ error = vm_get_register(VCPU_ARGS, reg, rval);
return (error);
}
@@ -326,14 +326,14 @@
}
static int
-vie_read_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t *rval)
+vie_read_bytereg(VCPU_DECL, struct vie *vie, uint8_t *rval)
{
uint64_t val;
int error, lhbr;
enum vm_reg_name reg;
vie_calc_bytereg(vie, ®, &lhbr);
- error = vm_get_register(vm, vcpuid, reg, &val);
+ error = vm_get_register(VCPU_ARGS, reg, &val);
/*
* To obtain the value of a legacy high byte register shift the
@@ -347,14 +347,14 @@
}
static int
-vie_write_bytereg(void *vm, int vcpuid, struct vie *vie, uint8_t byte)
+vie_write_bytereg(VCPU_DECL, struct vie *vie, uint8_t byte)
{
uint64_t origval, val, mask;
int error, lhbr;
enum vm_reg_name reg;
vie_calc_bytereg(vie, ®, &lhbr);
- error = vm_get_register(vm, vcpuid, reg, &origval);
+ error = vm_get_register(VCPU_ARGS, reg, &origval);
if (error == 0) {
val = byte;
mask = 0xff;
@@ -367,13 +367,13 @@
mask <<= 8;
}
val |= origval & ~mask;
- error = vm_set_register(vm, vcpuid, reg, val);
+ error = vm_set_register(VCPU_ARGS, reg, val);
}
return (error);
}
int
-vie_update_register(void *vm, int vcpuid, enum vm_reg_name reg,
+vie_update_register(VCPU_DECL, enum vm_reg_name reg,
uint64_t val, int size)
{
int error;
@@ -382,7 +382,7 @@
switch (size) {
case 1:
case 2:
- error = vie_read_register(vm, vcpuid, reg, &origval);
+ error = vie_read_register(VCPU_ARGS, reg, &origval);
if (error)
return (error);
val &= size2mask[size];
@@ -397,7 +397,7 @@
return (EINVAL);
}
- error = vm_set_register(vm, vcpuid, reg, val);
+ error = vm_set_register(VCPU_ARGS, reg, val);
return (error);
}
@@ -509,7 +509,7 @@
}
static int
-emulate_mov(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_mov(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
{
int error, size;
@@ -528,9 +528,9 @@
* REX + 88/r: mov r/m8, r8 (%ah, %ch, %dh, %bh not available)
*/
size = 1; /* override for byte operation */
- error = vie_read_bytereg(vm, vcpuid, vie, &byte);
+ error = vie_read_bytereg(VCPU_ARGS, vie, &byte);
if (error == 0)
- error = memwrite(vm, vcpuid, gpa, byte, size, arg);
+ error = memwrite(VCPU_ARGS, gpa, byte, size, arg);
break;
case 0x89:
/*
@@ -540,10 +540,10 @@
* REX.W + 89/r mov r/m64, r64
*/
reg = gpr_map[vie->reg];
- error = vie_read_register(vm, vcpuid, reg, &val);
+ error = vie_read_register(VCPU_ARGS, reg, &val);
if (error == 0) {
val &= size2mask[size];
- error = memwrite(vm, vcpuid, gpa, val, size, arg);
+ error = memwrite(VCPU_ARGS, gpa, val, size, arg);
}
break;
case 0x8A:
@@ -553,9 +553,9 @@
* REX + 8A/r: mov r8, r/m8
*/
size = 1; /* override for byte operation */
- error = memread(vm, vcpuid, gpa, &val, size, arg);
+ error = memread(VCPU_ARGS, gpa, &val, size, arg);
if (error == 0)
- error = vie_write_bytereg(vm, vcpuid, vie, val);
+ error = vie_write_bytereg(VCPU_ARGS, vie, val);
break;
case 0x8B:
/*
@@ -564,10 +564,10 @@
* 8B/r: mov r32, r/m32
* REX.W 8B/r: mov r64, r/m64
*/
- error = memread(vm, vcpuid, gpa, &val, size, arg);
+ error = memread(VCPU_ARGS, gpa, &val, size, arg);
if (error == 0) {
reg = gpr_map[vie->reg];
- error = vie_update_register(vm, vcpuid, reg, val, size);
+ error = vie_update_register(VCPU_ARGS, reg, val, size);
}
break;
case 0xA1:
@@ -577,10 +577,10 @@
* A1: mov EAX, moffs32
* REX.W + A1: mov RAX, moffs64
*/
- error = memread(vm, vcpuid, gpa, &val, size, arg);
+ error = memread(VCPU_ARGS, gpa, &val, size, arg);
if (error == 0) {
reg = VM_REG_GUEST_RAX;
- error = vie_update_register(vm, vcpuid, reg, val, size);
+ error = vie_update_register(VCPU_ARGS, reg, val, size);
}
break;
case 0xA3:
@@ -590,10 +590,10 @@
* A3: mov moffs32, EAX
* REX.W + A3: mov moffs64, RAX
*/
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RAX, &val);
if (error == 0) {
val &= size2mask[size];
- error = memwrite(vm, vcpuid, gpa, val, size, arg);
+ error = memwrite(VCPU_ARGS, gpa, val, size, arg);
}
break;
case 0xC6:
@@ -603,7 +603,7 @@
* REX + C6/0 mov r/m8, imm8
*/
size = 1; /* override for byte operation */
- error = memwrite(vm, vcpuid, gpa, vie->immediate, size, arg);
+ error = memwrite(VCPU_ARGS, gpa, vie->immediate, size, arg);
break;
case 0xC7:
/*
@@ -613,7 +613,7 @@
* REX.W + C7/0 mov r/m64, imm32 (sign-extended to 64-bits)
*/
val = vie->immediate & size2mask[size];
- error = memwrite(vm, vcpuid, gpa, val, size, arg);
+ error = memwrite(VCPU_ARGS, gpa, val, size, arg);
break;
default:
break;
@@ -623,7 +623,7 @@
}
static int
-emulate_movx(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_movx(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -645,7 +645,7 @@
*/
/* get the first operand */
- error = memread(vm, vcpuid, gpa, &val, 1, arg);
+ error = memread(VCPU_ARGS, gpa, &val, 1, arg);
if (error)
break;
@@ -656,7 +656,7 @@
val = (uint8_t)val;
/* write the result */
- error = vie_update_register(vm, vcpuid, reg, val, size);
+ error = vie_update_register(VCPU_ARGS, reg, val, size);
break;
case 0xB7:
/*
@@ -666,7 +666,7 @@
* 0F B7/r movzx r32, r/m16
* REX.W + 0F B7/r movzx r64, r/m16
*/
- error = memread(vm, vcpuid, gpa, &val, 2, arg);
+ error = memread(VCPU_ARGS, gpa, &val, 2, arg);
if (error)
return (error);
@@ -675,7 +675,7 @@
/* zero-extend word */
val = (uint16_t)val;
- error = vie_update_register(vm, vcpuid, reg, val, size);
+ error = vie_update_register(VCPU_ARGS, reg, val, size);
break;
case 0xBE:
/*
@@ -688,7 +688,7 @@
*/
/* get the first operand */
- error = memread(vm, vcpuid, gpa, &val, 1, arg);
+ error = memread(VCPU_ARGS, gpa, &val, 1, arg);
if (error)
break;
@@ -699,7 +699,7 @@
val = (int8_t)val;
/* write the result */
- error = vie_update_register(vm, vcpuid, reg, val, size);
+ error = vie_update_register(VCPU_ARGS, reg, val, size);
break;
default:
break;
@@ -711,7 +711,7 @@
* Helper function to calculate and validate a linear address.
*/
static int
-get_gla(void *vm, int vcpuid, struct vie *vie __unused,
+get_gla(VCPU_DECL, struct vie *vie __unused,
struct vm_guest_paging *paging, int opsize, int addrsize, int prot,
enum vm_reg_name seg, enum vm_reg_name gpr, uint64_t *gla, int *fault)
{
@@ -719,39 +719,39 @@
uint64_t cr0, val, rflags;
int error __diagused;
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_CR0, &cr0);
KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
- error = vm_get_seg_desc(vm, vcpuid, seg, &desc);
+ error = vm_get_seg_desc(VCPU_ARGS, seg, &desc);
KASSERT(error == 0, ("%s: error %d getting segment descriptor %d",
__func__, error, seg));
- error = vie_read_register(vm, vcpuid, gpr, &val);
+ error = vie_read_register(VCPU_ARGS, gpr, &val);
KASSERT(error == 0, ("%s: error %d getting register %d", __func__,
error, gpr));
if (vie_calculate_gla(paging->cpu_mode, seg, &desc, val, opsize,
addrsize, prot, gla)) {
if (seg == VM_REG_GUEST_SS)
- vm_inject_ss(vm, vcpuid, 0);
+ vm_inject_ss(VCPU_ARGS, 0);
else
- vm_inject_gp(vm, vcpuid);
+ vm_inject_gp(VCPU_ARGS);
goto guest_fault;
}
if (vie_canonical_check(paging->cpu_mode, *gla)) {
if (seg == VM_REG_GUEST_SS)
- vm_inject_ss(vm, vcpuid, 0);
+ vm_inject_ss(VCPU_ARGS, 0);
else
- vm_inject_gp(vm, vcpuid);
+ vm_inject_gp(VCPU_ARGS);
goto guest_fault;
}
if (vie_alignment_check(paging->cpl, opsize, cr0, rflags, *gla)) {
- vm_inject_ac(vm, vcpuid, 0);
+ vm_inject_ac(VCPU_ARGS, 0);
goto guest_fault;
}
@@ -764,7 +764,7 @@
}
static int
-emulate_movs(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_movs(VCPU_DECL, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -791,7 +791,7 @@
repeat = vie->repz_present | vie->repnz_present;
if (repeat) {
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RCX, &rcx);
KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
/*
@@ -821,12 +821,12 @@
*/
seg = vie->segment_override ? vie->segment_register : VM_REG_GUEST_DS;
- error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
+ error = get_gla(VCPU_ARGS, vie, paging, opsize, vie->addrsize,
PROT_READ, seg, VM_REG_GUEST_RSI, &srcaddr, &fault);
if (error || fault)
goto done;
- error = vm_copy_setup(vm, vcpuid, paging, srcaddr, opsize, PROT_READ,
+ error = vm_copy_setup(VCPU_ARGS, paging, srcaddr, opsize, PROT_READ,
copyinfo, nitems(copyinfo), &fault);
if (error == 0) {
if (fault)
@@ -837,7 +837,7 @@
*/
vm_copyin(copyinfo, &val, opsize);
vm_copy_teardown(copyinfo, nitems(copyinfo));
- error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
+ error = memwrite(VCPU_ARGS, gpa, val, opsize, arg);
if (error)
goto done;
} else {
@@ -846,13 +846,13 @@
* if 'srcaddr' is in the mmio space.
*/
- error = get_gla(vm, vcpuid, vie, paging, opsize, vie->addrsize,
+ error = get_gla(VCPU_ARGS, vie, paging, opsize, vie->addrsize,
PROT_WRITE, VM_REG_GUEST_ES, VM_REG_GUEST_RDI, &dstaddr,
&fault);
if (error || fault)
goto done;
- error = vm_copy_setup(vm, vcpuid, paging, dstaddr, opsize,
+ error = vm_copy_setup(VCPU_ARGS, paging, dstaddr, opsize,
PROT_WRITE, copyinfo, nitems(copyinfo), &fault);
if (error == 0) {
if (fault)
@@ -867,7 +867,7 @@
* injected into the guest then it will happen
* before the MMIO read is attempted.
*/
- error = memread(vm, vcpuid, gpa, &val, opsize, arg);
+ error = memread(VCPU_ARGS, gpa, &val, opsize, arg);
if (error)
goto done;
@@ -882,33 +882,33 @@
* instruction is not going to be restarted due
* to address translation faults.
*/
- error = vm_gla2gpa(vm, vcpuid, paging, srcaddr,
+ error = vm_gla2gpa(VCPU_ARGS, paging, srcaddr,
PROT_READ, &srcgpa, &fault);
if (error || fault)
goto done;
- error = vm_gla2gpa(vm, vcpuid, paging, dstaddr,
+ error = vm_gla2gpa(VCPU_ARGS, paging, dstaddr,
PROT_WRITE, &dstgpa, &fault);
if (error || fault)
goto done;
- error = memread(vm, vcpuid, srcgpa, &val, opsize, arg);
+ error = memread(VCPU_ARGS, srcgpa, &val, opsize, arg);
if (error)
goto done;
- error = memwrite(vm, vcpuid, dstgpa, val, opsize, arg);
+ error = memwrite(VCPU_ARGS, dstgpa, val, opsize, arg);
if (error)
goto done;
}
}
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSI, &rsi);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RSI, &rsi);
KASSERT(error == 0, ("%s: error %d getting rsi", __func__, error));
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RDI, &rdi);
KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
if (rflags & PSL_D) {
@@ -919,17 +919,17 @@
rdi += opsize;
}
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSI, rsi,
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RSI, rsi,
vie->addrsize);
KASSERT(error == 0, ("%s: error %d updating rsi", __func__, error));
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RDI, rdi,
vie->addrsize);
KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
if (repeat) {
rcx = rcx - 1;
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RCX,
rcx, vie->addrsize);
KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
@@ -937,7 +937,7 @@
* Repeat the instruction if the count register is not zero.
*/
if ((rcx & vie_size2mask(vie->addrsize)) != 0)
- vm_restart_instruction(vm, vcpuid);
+ vm_restart_instruction(VCPU_ARGS);
}
done:
KASSERT(error == 0 || error == EFAULT, ("%s: unexpected error %d",
@@ -946,7 +946,7 @@
}
static int
-emulate_stos(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_stos(VCPU_DECL, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging __unused, mem_region_read_t memread __unused,
mem_region_write_t memwrite, void *arg)
{
@@ -958,7 +958,7 @@
repeat = vie->repz_present | vie->repnz_present;
if (repeat) {
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RCX, &rcx);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RCX, &rcx);
KASSERT(!error, ("%s: error %d getting rcx", __func__, error));
/*
@@ -969,17 +969,17 @@
return (0);
}
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RAX, &val);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RAX, &val);
KASSERT(!error, ("%s: error %d getting rax", __func__, error));
- error = memwrite(vm, vcpuid, gpa, val, opsize, arg);
+ error = memwrite(VCPU_ARGS, gpa, val, opsize, arg);
if (error)
return (error);
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RDI, &rdi);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RDI, &rdi);
KASSERT(error == 0, ("%s: error %d getting rdi", __func__, error));
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
if (rflags & PSL_D)
@@ -987,13 +987,13 @@
else
rdi += opsize;
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RDI, rdi,
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RDI, rdi,
vie->addrsize);
KASSERT(error == 0, ("%s: error %d updating rdi", __func__, error));
if (repeat) {
rcx = rcx - 1;
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RCX,
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RCX,
rcx, vie->addrsize);
KASSERT(!error, ("%s: error %d updating rcx", __func__, error));
@@ -1001,14 +1001,14 @@
* Repeat the instruction if the count register is not zero.
*/
if ((rcx & vie_size2mask(vie->addrsize)) != 0)
- vm_restart_instruction(vm, vcpuid);
+ vm_restart_instruction(VCPU_ARGS);
}
return (0);
}
static int
-emulate_and(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_and(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
{
int error, size;
@@ -1031,18 +1031,18 @@
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(vm, vcpuid, reg, &val1);
+ error = vie_read_register(VCPU_ARGS, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(vm, vcpuid, gpa, &val2, size, arg);
+ error = memread(VCPU_ARGS, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
result = val1 & val2;
- error = vie_update_register(vm, vcpuid, reg, result, size);
+ error = vie_update_register(VCPU_ARGS, reg, result, size);
break;
case 0x81:
case 0x83:
@@ -1060,7 +1060,7 @@
*/
/* get the first operand */
- error = memread(vm, vcpuid, gpa, &val1, size, arg);
+ error = memread(VCPU_ARGS, gpa, &val1, size, arg);
if (error)
break;
@@ -1069,7 +1069,7 @@
* operand and write the result
*/
result = val1 & vie->immediate;
- error = memwrite(vm, vcpuid, gpa, result, size, arg);
+ error = memwrite(VCPU_ARGS, gpa, result, size, arg);
break;
default:
break;
@@ -1077,7 +1077,7 @@
if (error)
return (error);
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1091,12 +1091,12 @@
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_or(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_or(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite, void *arg)
{
int error, size;
@@ -1119,18 +1119,18 @@
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(vm, vcpuid, reg, &val1);
+ error = vie_read_register(VCPU_ARGS, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(vm, vcpuid, gpa, &val2, size, arg);
+ error = memread(VCPU_ARGS, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
result = val1 | val2;
- error = vie_update_register(vm, vcpuid, reg, result, size);
+ error = vie_update_register(VCPU_ARGS, reg, result, size);
break;
case 0x81:
case 0x83:
@@ -1148,7 +1148,7 @@
*/
/* get the first operand */
- error = memread(vm, vcpuid, gpa, &val1, size, arg);
+ error = memread(VCPU_ARGS, gpa, &val1, size, arg);
if (error)
break;
@@ -1157,7 +1157,7 @@
* operand and write the result
*/
result = val1 | vie->immediate;
- error = memwrite(vm, vcpuid, gpa, result, size, arg);
+ error = memwrite(VCPU_ARGS, gpa, result, size, arg);
break;
default:
break;
@@ -1165,7 +1165,7 @@
if (error)
return (error);
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1179,12 +1179,12 @@
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_cmp(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_cmp(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1212,12 +1212,12 @@
/* Get the register operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(vm, vcpuid, reg, ®op);
+ error = vie_read_register(VCPU_ARGS, reg, ®op);
if (error)
return (error);
/* Get the memory operand */
- error = memread(vm, vcpuid, gpa, &memop, size, arg);
+ error = memread(VCPU_ARGS, gpa, &memop, size, arg);
if (error)
return (error);
@@ -1256,7 +1256,7 @@
size = 1;
/* get the first operand */
- error = memread(vm, vcpuid, gpa, &op1, size, arg);
+ error = memread(VCPU_ARGS, gpa, &op1, size, arg);
if (error)
return (error);
@@ -1265,18 +1265,18 @@
default:
return (EINVAL);
}
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & RFLAGS_STATUS_BITS;
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_test(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_test(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1300,7 +1300,7 @@
if ((vie->reg & 7) != 0)
return (EINVAL);
- error = memread(vm, vcpuid, gpa, &op1, size, arg);
+ error = memread(VCPU_ARGS, gpa, &op1, size, arg);
if (error)
return (error);
@@ -1309,7 +1309,7 @@
default:
return (EINVAL);
}
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1320,12 +1320,12 @@
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & (PSL_PF | PSL_Z | PSL_N);
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
return (error);
}
static int
-emulate_bextr(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_bextr(VCPU_DECL, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite __unused, void *arg)
{
@@ -1353,13 +1353,13 @@
* operand) using an index and length specified in the second /source/
* operand (third operand).
*/
- error = memread(vm, vcpuid, gpa, &src1, size, arg);
+ error = memread(VCPU_ARGS, gpa, &src1, size, arg);
if (error)
return (error);
- error = vie_read_register(vm, vcpuid, gpr_map[vie->vex_reg], &src2);
+ error = vie_read_register(VCPU_ARGS, gpr_map[vie->vex_reg], &src2);
if (error)
return (error);
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
if (error)
return (error);
@@ -1385,7 +1385,7 @@
dst = src1;
done:
- error = vie_update_register(vm, vcpuid, gpr_map[vie->reg], dst, size);
+ error = vie_update_register(VCPU_ARGS, gpr_map[vie->reg], dst, size);
if (error)
return (error);
@@ -1396,13 +1396,13 @@
rflags &= ~RFLAGS_STATUS_BITS;
if (dst == 0)
rflags |= PSL_Z;
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags,
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags,
8);
return (error);
}
static int
-emulate_add(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_add(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1424,18 +1424,18 @@
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(vm, vcpuid, reg, &val1);
+ error = vie_read_register(VCPU_ARGS, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(vm, vcpuid, gpa, &val2, size, arg);
+ error = memread(VCPU_ARGS, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
nval = val1 + val2;
- error = vie_update_register(vm, vcpuid, reg, nval, size);
+ error = vie_update_register(VCPU_ARGS, reg, nval, size);
break;
default:
break;
@@ -1443,14 +1443,14 @@
if (!error) {
rflags2 = getaddflags(size, val1, val2);
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
&rflags);
if (error)
return (error);
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & RFLAGS_STATUS_BITS;
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
rflags, 8);
}
@@ -1458,7 +1458,7 @@
}
static int
-emulate_sub(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_sub(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused, void *arg)
{
int error, size;
@@ -1480,18 +1480,18 @@
/* get the first operand */
reg = gpr_map[vie->reg];
- error = vie_read_register(vm, vcpuid, reg, &val1);
+ error = vie_read_register(VCPU_ARGS, reg, &val1);
if (error)
break;
/* get the second operand */
- error = memread(vm, vcpuid, gpa, &val2, size, arg);
+ error = memread(VCPU_ARGS, gpa, &val2, size, arg);
if (error)
break;
/* perform the operation and write the result */
nval = val1 - val2;
- error = vie_update_register(vm, vcpuid, reg, nval, size);
+ error = vie_update_register(VCPU_ARGS, reg, nval, size);
break;
default:
break;
@@ -1499,14 +1499,14 @@
if (!error) {
rflags2 = getcc(size, val1, val2);
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
&rflags);
if (error)
return (error);
rflags &= ~RFLAGS_STATUS_BITS;
rflags |= rflags2 & RFLAGS_STATUS_BITS;
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS,
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS,
rflags, 8);
}
@@ -1514,7 +1514,7 @@
}
static int
-emulate_stack_op(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
+emulate_stack_op(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -1552,7 +1552,7 @@
* stack-segment descriptor determines the size of the
* stack pointer.
*/
- error = vm_get_seg_desc(vm, vcpuid, VM_REG_GUEST_SS, &ss_desc);
+ error = vm_get_seg_desc(VCPU_ARGS, VM_REG_GUEST_SS, &ss_desc);
KASSERT(error == 0, ("%s: error %d getting SS descriptor",
__func__, error));
if (SEG_DESC_DEF32(ss_desc.access))
@@ -1561,13 +1561,13 @@
stackaddrsize = 2;
}
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_CR0, &cr0);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_CR0, &cr0);
KASSERT(error == 0, ("%s: error %d getting cr0", __func__, error));
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RSP, &rsp);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RSP, &rsp);
KASSERT(error == 0, ("%s: error %d getting rsp", __func__, error));
if (pushop) {
rsp -= size;
@@ -1576,39 +1576,39 @@
if (vie_calculate_gla(paging->cpu_mode, VM_REG_GUEST_SS, &ss_desc,
rsp, size, stackaddrsize, pushop ? PROT_WRITE : PROT_READ,
&stack_gla)) {
- vm_inject_ss(vm, vcpuid, 0);
+ vm_inject_ss(VCPU_ARGS, 0);
return (0);
}
if (vie_canonical_check(paging->cpu_mode, stack_gla)) {
- vm_inject_ss(vm, vcpuid, 0);
+ vm_inject_ss(VCPU_ARGS, 0);
return (0);
}
if (vie_alignment_check(paging->cpl, size, cr0, rflags, stack_gla)) {
- vm_inject_ac(vm, vcpuid, 0);
+ vm_inject_ac(VCPU_ARGS, 0);
return (0);
}
- error = vm_copy_setup(vm, vcpuid, paging, stack_gla, size,
+ error = vm_copy_setup(VCPU_ARGS, paging, stack_gla, size,
pushop ? PROT_WRITE : PROT_READ, copyinfo, nitems(copyinfo),
&fault);
if (error || fault)
return (error);
if (pushop) {
- error = memread(vm, vcpuid, mmio_gpa, &val, size, arg);
+ error = memread(VCPU_ARGS, mmio_gpa, &val, size, arg);
if (error == 0)
vm_copyout(&val, copyinfo, size);
} else {
vm_copyin(copyinfo, &val, size);
- error = memwrite(vm, vcpuid, mmio_gpa, val, size, arg);
+ error = memwrite(VCPU_ARGS, mmio_gpa, val, size, arg);
rsp += size;
}
vm_copy_teardown(copyinfo, nitems(copyinfo));
if (error == 0) {
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RSP, rsp,
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RSP, rsp,
stackaddrsize);
KASSERT(error == 0, ("error %d updating rsp", error));
}
@@ -1616,7 +1616,7 @@
}
static int
-emulate_push(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
+emulate_push(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -1631,13 +1631,13 @@
if ((vie->reg & 7) != 6)
return (EINVAL);
- error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
+ error = emulate_stack_op(VCPU_ARGS, mmio_gpa, vie, paging, memread,
memwrite, arg);
return (error);
}
static int
-emulate_pop(void *vm, int vcpuid, uint64_t mmio_gpa, struct vie *vie,
+emulate_pop(VCPU_DECL, uint64_t mmio_gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *arg)
{
@@ -1652,13 +1652,13 @@
if ((vie->reg & 7) != 0)
return (EINVAL);
- error = emulate_stack_op(vm, vcpuid, mmio_gpa, vie, paging, memread,
+ error = emulate_stack_op(VCPU_ARGS, mmio_gpa, vie, paging, memread,
memwrite, arg);
return (error);
}
static int
-emulate_group1(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_group1(VCPU_DECL, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging __unused, mem_region_read_t memread,
mem_region_write_t memwrite, void *memarg)
{
@@ -1666,15 +1666,15 @@
switch (vie->reg & 7) {
case 0x1: /* OR */
- error = emulate_or(vm, vcpuid, gpa, vie,
+ error = emulate_or(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case 0x4: /* AND */
- error = emulate_and(vm, vcpuid, gpa, vie,
+ error = emulate_and(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case 0x7: /* CMP */
- error = emulate_cmp(vm, vcpuid, gpa, vie,
+ error = emulate_cmp(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
default:
@@ -1686,7 +1686,7 @@
}
static int
-emulate_bittest(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_bittest(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused,
void *memarg)
{
@@ -1702,10 +1702,10 @@
if ((vie->reg & 7) != 4)
return (EINVAL);
- error = vie_read_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, &rflags);
+ error = vie_read_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, &rflags);
KASSERT(error == 0, ("%s: error %d getting rflags", __func__, error));
- error = memread(vm, vcpuid, gpa, &val, vie->opsize, memarg);
+ error = memread(VCPU_ARGS, gpa, &val, vie->opsize, memarg);
if (error)
return (error);
@@ -1722,14 +1722,14 @@
else
rflags &= ~PSL_C;
- error = vie_update_register(vm, vcpuid, VM_REG_GUEST_RFLAGS, rflags, 8);
+ error = vie_update_register(VCPU_ARGS, VM_REG_GUEST_RFLAGS, rflags, 8);
KASSERT(error == 0, ("%s: error %d updating rflags", __func__, error));
return (0);
}
static int
-emulate_twob_group15(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+emulate_twob_group15(VCPU_DECL, uint64_t gpa, struct vie *vie,
mem_region_read_t memread, mem_region_write_t memwrite __unused,
void *memarg)
{
@@ -1749,7 +1749,7 @@
* CLFLUSH, CLFLUSHOPT. Only check for access
* rights.
*/
- error = memread(vm, vcpuid, gpa, &buf, 1, memarg);
+ error = memread(VCPU_ARGS, gpa, &buf, 1, memarg);
}
break;
default:
@@ -1761,7 +1761,7 @@
}
int
-vmm_emulate_instruction(void *vm, int vcpuid, uint64_t gpa, struct vie *vie,
+vmm_emulate_instruction(VCPU_DECL, uint64_t gpa, struct vie *vie,
struct vm_guest_paging *paging, mem_region_read_t memread,
mem_region_write_t memwrite, void *memarg)
{
@@ -1772,68 +1772,68 @@
switch (vie->op.op_type) {
case VIE_OP_TYPE_GROUP1:
- error = emulate_group1(vm, vcpuid, gpa, vie, paging, memread,
+ error = emulate_group1(VCPU_ARGS, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_POP:
- error = emulate_pop(vm, vcpuid, gpa, vie, paging, memread,
+ error = emulate_pop(VCPU_ARGS, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_PUSH:
- error = emulate_push(vm, vcpuid, gpa, vie, paging, memread,
+ error = emulate_push(VCPU_ARGS, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_CMP:
- error = emulate_cmp(vm, vcpuid, gpa, vie,
+ error = emulate_cmp(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_MOV:
- error = emulate_mov(vm, vcpuid, gpa, vie,
+ error = emulate_mov(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_MOVSX:
case VIE_OP_TYPE_MOVZX:
- error = emulate_movx(vm, vcpuid, gpa, vie,
+ error = emulate_movx(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_MOVS:
- error = emulate_movs(vm, vcpuid, gpa, vie, paging, memread,
+ error = emulate_movs(VCPU_ARGS, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_STOS:
- error = emulate_stos(vm, vcpuid, gpa, vie, paging, memread,
+ error = emulate_stos(VCPU_ARGS, gpa, vie, paging, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_AND:
- error = emulate_and(vm, vcpuid, gpa, vie,
+ error = emulate_and(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_OR:
- error = emulate_or(vm, vcpuid, gpa, vie,
+ error = emulate_or(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_SUB:
- error = emulate_sub(vm, vcpuid, gpa, vie,
+ error = emulate_sub(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_BITTEST:
- error = emulate_bittest(vm, vcpuid, gpa, vie,
+ error = emulate_bittest(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_TWOB_GRP15:
- error = emulate_twob_group15(vm, vcpuid, gpa, vie,
+ error = emulate_twob_group15(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_ADD:
- error = emulate_add(vm, vcpuid, gpa, vie, memread,
+ error = emulate_add(VCPU_ARGS, gpa, vie, memread,
memwrite, memarg);
break;
case VIE_OP_TYPE_TEST:
- error = emulate_test(vm, vcpuid, gpa, vie,
+ error = emulate_test(VCPU_ARGS, gpa, vie,
memread, memwrite, memarg);
break;
case VIE_OP_TYPE_BEXTR:
- error = emulate_bextr(vm, vcpuid, gpa, vie, paging,
+ error = emulate_bextr(VCPU_ARGS, gpa, vie, paging,
memread, memwrite, memarg);
break;
default:
@@ -2056,17 +2056,17 @@
}
static void *
-ptp_hold(struct vm *vm, int vcpu, vm_paddr_t ptpphys, size_t len, void **cookie)
+ptp_hold(struct vcpu *vcpu, vm_paddr_t ptpphys, size_t len, void **cookie)
{
void *ptr;
ptp_release(cookie);
- ptr = vm_gpa_hold(vm, vcpu, ptpphys, len, VM_PROT_RW, cookie);
+ ptr = vm_gpa_hold(vcpu, ptpphys, len, VM_PROT_RW, cookie);
return (ptr);
}
static int
-_vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+_vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *guest_fault, bool check_only)
{
int nlevels, pfcode, ptpshift, ptpindex, retval, usermode, writable;
@@ -2094,7 +2094,7 @@
* should be generated.
*/
if (!check_only)
- vm_inject_gp(vm, vcpuid);
+ vm_inject_gp(vcpu);
goto fault;
}
@@ -2109,7 +2109,7 @@
/* Zero out the lower 12 bits. */
ptpphys &= ~0xfff;
- ptpbase32 = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE,
+ ptpbase32 = ptp_hold(vcpu, ptpphys, PAGE_SIZE,
&cookie);
if (ptpbase32 == NULL)
@@ -2127,7 +2127,7 @@
if (!check_only) {
pfcode = pf_error_code(usermode, prot, 0,
pte32);
- vm_inject_pf(vm, vcpuid, pfcode, gla);
+ vm_inject_pf(vcpu, pfcode, gla);
}
goto fault;
}
@@ -2171,7 +2171,7 @@
/* Zero out the lower 5 bits and the upper 32 bits */
ptpphys &= 0xffffffe0UL;
- ptpbase = ptp_hold(vm, vcpuid, ptpphys, sizeof(*ptpbase) * 4,
+ ptpbase = ptp_hold(vcpu, ptpphys, sizeof(*ptpbase) * 4,
&cookie);
if (ptpbase == NULL)
goto error;
@@ -2183,7 +2183,7 @@
if ((pte & PG_V) == 0) {
if (!check_only) {
pfcode = pf_error_code(usermode, prot, 0, pte);
- vm_inject_pf(vm, vcpuid, pfcode, gla);
+ vm_inject_pf(vcpu, pfcode, gla);
}
goto fault;
}
@@ -2201,7 +2201,7 @@
/* Zero out the lower 12 bits and the upper 12 bits */
ptpphys >>= 12; ptpphys <<= 24; ptpphys >>= 12;
- ptpbase = ptp_hold(vm, vcpuid, ptpphys, PAGE_SIZE, &cookie);
+ ptpbase = ptp_hold(vcpu, ptpphys, PAGE_SIZE, &cookie);
if (ptpbase == NULL)
goto error;
@@ -2216,7 +2216,7 @@
(writable && (pte & PG_RW) == 0)) {
if (!check_only) {
pfcode = pf_error_code(usermode, prot, 0, pte);
- vm_inject_pf(vm, vcpuid, pfcode, gla);
+ vm_inject_pf(vcpu, pfcode, gla);
}
goto fault;
}
@@ -2234,7 +2234,7 @@
if (!check_only) {
pfcode = pf_error_code(usermode, prot, 1,
pte);
- vm_inject_pf(vm, vcpuid, pfcode, gla);
+ vm_inject_pf(vcpu, pfcode, gla);
}
goto fault;
}
@@ -2267,25 +2267,25 @@
}
int
-vm_gla2gpa(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+vm_gla2gpa(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *guest_fault)
{
- return (_vm_gla2gpa(vm, vcpuid, paging, gla, prot, gpa, guest_fault,
+ return (_vm_gla2gpa(vcpu, paging, gla, prot, gpa, guest_fault,
false));
}
int
-vm_gla2gpa_nofault(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t gla, int prot, uint64_t *gpa, int *guest_fault)
{
- return (_vm_gla2gpa(vm, vcpuid, paging, gla, prot, gpa, guest_fault,
+ return (_vm_gla2gpa(vcpu, paging, gla, prot, gpa, guest_fault,
true));
}
int
-vmm_fetch_instruction(struct vm *vm, int vcpuid, struct vm_guest_paging *paging,
+vmm_fetch_instruction(struct vcpu *vcpu, struct vm_guest_paging *paging,
uint64_t rip, int inst_length, struct vie *vie, int *faultptr)
{
struct vm_copyinfo copyinfo[2];
@@ -2295,7 +2295,7 @@
panic("vmm_fetch_instruction: invalid length %d", inst_length);
prot = PROT_READ | PROT_EXEC;
- error = vm_copy_setup(vm, vcpuid, paging, rip, inst_length, prot,
+ error = vm_copy_setup(vcpu, paging, rip, inst_length, prot,
copyinfo, nitems(copyinfo), faultptr);
if (error || *faultptr)
return (error);
@@ -2813,7 +2813,7 @@
* page table fault matches with our instruction decoding.
*/
static int
-verify_gla(struct vm *vm, int cpuid, uint64_t gla, struct vie *vie,
+verify_gla(struct vcpu *vcpu, uint64_t gla, struct vie *vie,
enum vm_cpu_mode cpu_mode)
{
int error;
@@ -2827,7 +2827,7 @@
base = 0;
if (vie->base_register != VM_REG_LAST) {
- error = vm_get_register(vm, cpuid, vie->base_register, &base);
+ error = vm_get_register(vcpu, vie->base_register, &base);
if (error) {
printf("verify_gla: error %d getting base reg %d\n",
error, vie->base_register);
@@ -2844,7 +2844,7 @@
idx = 0;
if (vie->index_register != VM_REG_LAST) {
- error = vm_get_register(vm, cpuid, vie->index_register, &idx);
+ error = vm_get_register(vcpu, vie->index_register, &idx);
if (error) {
printf("verify_gla: error %d getting index reg %d\n",
error, vie->index_register);
@@ -2876,7 +2876,7 @@
seg != VM_REG_GUEST_GS) {
segbase = 0;
} else {
- error = vm_get_seg_desc(vm, cpuid, seg, &desc);
+ error = vm_get_seg_desc(vcpu, seg, &desc);
if (error) {
printf("verify_gla: error %d getting segment"
" descriptor %d", error,
@@ -2903,7 +2903,7 @@
int
#ifdef _KERNEL
-vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
+vmm_decode_instruction(struct vcpu *vcpu, uint64_t gla,
enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
#else
vmm_decode_instruction(enum vm_cpu_mode cpu_mode, int cs_d, struct vie *vie)
@@ -2933,7 +2933,7 @@
#ifdef _KERNEL
if ((vie->op.op_flags & VIE_OP_F_NO_GLA_VERIFICATION) == 0) {
- if (verify_gla(vm, cpuid, gla, vie, cpu_mode))
+ if (verify_gla(vcpu, gla, vie, cpu_mode))
return (-1);
}
#endif
diff --git a/sys/amd64/vmm/vmm_ioport.c b/sys/amd64/vmm/vmm_ioport.c
--- a/sys/amd64/vmm/vmm_ioport.c
+++ b/sys/amd64/vmm/vmm_ioport.c
@@ -138,7 +138,7 @@
if (vmexit->u.inout.in) {
vmexit->u.inout.eax &= ~mask;
vmexit->u.inout.eax |= val & mask;
- error = vm_set_register(vm, vcpuid, VM_REG_GUEST_RAX,
+ error = vm_set_register(vm_vcpu(vm, vcpuid), VM_REG_GUEST_RAX,
vmexit->u.inout.eax);
KASSERT(error == 0, ("emulate_ioport: error %d setting guest "
"rax register", error));
diff --git a/sys/amd64/vmm/vmm_lapic.h b/sys/amd64/vmm/vmm_lapic.h
--- a/sys/amd64/vmm/vmm_lapic.h
+++ b/sys/amd64/vmm/vmm_lapic.h
@@ -39,9 +39,9 @@
int lapic_wrmsr(struct vm *vm, int cpu, u_int msr, uint64_t wval,
bool *retu);
-int lapic_mmio_read(void *vm, int cpu, uint64_t gpa,
+int lapic_mmio_read(struct vcpu *vcpu, uint64_t gpa,
uint64_t *rval, int size, void *arg);
-int lapic_mmio_write(void *vm, int cpu, uint64_t gpa,
+int lapic_mmio_write(struct vcpu *vcpu, uint64_t gpa,
uint64_t wval, int size, void *arg);
/*
diff --git a/sys/amd64/vmm/vmm_lapic.c b/sys/amd64/vmm/vmm_lapic.c
--- a/sys/amd64/vmm/vmm_lapic.c
+++ b/sys/amd64/vmm/vmm_lapic.c
@@ -66,7 +66,7 @@
if (vector < 16 || vector > 255)
return (EINVAL);
- vlapic = vm_lapic(vm, cpu);
+ vlapic = vm_lapic(vm_vcpu(vm, cpu));
if (vlapic_set_intr_ready(vlapic, vector, level))
vcpu_notify_event(vm, cpu, true);
return (0);
@@ -88,7 +88,7 @@
CPU_SETOF(cpu, &dmask);
error = 0;
CPU_FOREACH_ISSET(cpu, &dmask) {
- vlapic = vm_lapic(vm, cpu);
+ vlapic = vm_lapic(vm_vcpu(vm, cpu));
error = vlapic_trigger_lvt(vlapic, vector);
if (error)
break;
@@ -162,7 +162,7 @@
u_int offset;
struct vlapic *vlapic;
- vlapic = vm_lapic(vm, cpu);
+ vlapic = vm_lapic(vm_vcpu(vm, cpu));
if (msr == MSR_APICBASE) {
*rval = vlapic_get_apicbase(vlapic);
@@ -182,7 +182,7 @@
u_int offset;
struct vlapic *vlapic;
- vlapic = vm_lapic(vm, cpu);
+ vlapic = vm_lapic(vm_vcpu(vm, cpu));
if (msr == MSR_APICBASE) {
error = vlapic_set_apicbase(vlapic, val);
@@ -195,7 +195,7 @@
}
int
-lapic_mmio_write(void *vm, int cpu, uint64_t gpa, uint64_t wval, int size,
+lapic_mmio_write(struct vcpu *vcpu, uint64_t gpa, uint64_t wval, int size,
void *arg)
{
int error;
@@ -211,13 +211,13 @@
if (size != 4 || off & 0xf)
return (EINVAL);
- vlapic = vm_lapic(vm, cpu);
+ vlapic = vm_lapic(vcpu);
error = vlapic_write(vlapic, 1, off, wval, arg);
return (error);
}
int
-lapic_mmio_read(void *vm, int cpu, uint64_t gpa, uint64_t *rval, int size,
+lapic_mmio_read(struct vcpu *vcpu, uint64_t gpa, uint64_t *rval, int size,
void *arg)
{
int error;
@@ -235,7 +235,7 @@
if (off & 0xf)
return (EINVAL);
- vlapic = vm_lapic(vm, cpu);
+ vlapic = vm_lapic(vcpu);
error = vlapic_read(vlapic, 1, off, rval, arg);
return (error);
}
diff --git a/sys/amd64/vmm/x86.c b/sys/amd64/vmm/x86.c
--- a/sys/amd64/vmm/x86.c
+++ b/sys/amd64/vmm/x86.c
@@ -349,7 +349,7 @@
*/
regs[2] &= ~CPUID2_OSXSAVE;
if (regs[2] & CPUID2_XSAVE) {
- error = vm_get_register(vm, vcpu_id,
+ error = vm_get_register(vm_vcpu(vm, vcpu_id),
VM_REG_GUEST_CR4, &cr4);
if (error)
panic("x86_emulate_cpuid: error %d "
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sun, Nov 17, 1:29 AM (21 h, 30 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
14668732
Default Alt Text
D37161.diff (81 KB)
Attached To
Mode
D37161: vmm: Use struct vcpu in the instruction emulation code.
Attached
Detach File
Event Timeline
Log In to Comment