Page MenuHomeFreeBSD

D37157.diff
No OneTemporary

D37157.diff

diff --git a/sys/amd64/include/vmm.h b/sys/amd64/include/vmm.h
--- a/sys/amd64/include/vmm.h
+++ b/sys/amd64/include/vmm.h
@@ -377,7 +377,7 @@
}
#endif
-void *vcpu_stats(struct vm *vm, int vcpu);
+void *vcpu_stats(struct vcpu *vcpu);
void vcpu_notify_event(struct vm *vm, int vcpuid, bool lapic_intr);
struct vmspace *vm_get_vmspace(struct vm *vm);
struct vatpic *vm_atpic(struct vm *vm);
diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c
--- a/sys/amd64/vmm/amd/svm.c
+++ b/sys/amd64/vmm/amd/svm.c
@@ -1010,7 +1010,7 @@
*/
SVM_CTR2(vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo,
VMCB_EXITINTINFO_VECTOR(intinfo));
- vmm_stat_incr(svm_sc->vm, vcpuid, VCPU_EXITINTINFO, 1);
+ vmm_stat_incr(vcpu->vcpu, VCPU_EXITINTINFO, 1);
vm_exit_intinfo(svm_sc->vm, vcpuid, intinfo);
}
@@ -1355,7 +1355,7 @@
vmexit->rip = state->rip;
vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0;
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_COUNT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1);
/*
* #VMEXIT(INVALID) needs to be handled early because the VMCB is
@@ -1387,18 +1387,18 @@
handled = 1;
break;
case VMCB_EXIT_VINTR: /* interrupt window exiting */
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_VINTR, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_VINTR, 1);
handled = 1;
break;
case VMCB_EXIT_INTR: /* external interrupt */
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_EXTINT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1);
handled = 1;
break;
case VMCB_EXIT_NMI: /* external NMI */
handled = 1;
break;
case 0x40 ... 0x5F:
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_EXCEPTION, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
reflect = 1;
idtvec = code - 0x40;
switch (idtvec) {
@@ -1473,7 +1473,7 @@
retu = false;
if (info1) {
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_WRMSR, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
val = (uint64_t)edx << 32 | eax;
SVM_CTR2(vcpu, "wrmsr %#x val %#lx", ecx, val);
if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) {
@@ -1488,7 +1488,7 @@
}
} else {
SVM_CTR1(vcpu, "rdmsr %#x", ecx);
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_RDMSR, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) {
vmexit->exitcode = VM_EXITCODE_RDMSR;
vmexit->u.msr.code = ecx;
@@ -1502,21 +1502,21 @@
break;
case VMCB_EXIT_IO:
handled = svm_handle_io(vcpu, vmexit);
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_INOUT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1);
break;
case VMCB_EXIT_CPUID:
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_CPUID, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
handled = x86_emulate_cpuid(svm_sc->vm, vcpuid, &state->rax,
&ctx->sctx_rbx, &ctx->sctx_rcx, &ctx->sctx_rdx);
break;
case VMCB_EXIT_HLT:
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_HLT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
vmexit->exitcode = VM_EXITCODE_HLT;
vmexit->u.hlt.rflags = state->rflags;
break;
case VMCB_EXIT_PAUSE:
vmexit->exitcode = VM_EXITCODE_PAUSE;
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_PAUSE, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1);
break;
case VMCB_EXIT_NPF:
/* EXITINFO2 contains the faulting guest physical address */
@@ -1528,13 +1528,13 @@
vmexit->exitcode = VM_EXITCODE_PAGING;
vmexit->u.paging.gpa = info2;
vmexit->u.paging.fault_type = npf_fault_type(info1);
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_NESTED_FAULT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1);
SVM_CTR3(vcpu, "nested page fault "
"on gpa %#lx/%#lx at rip %#lx",
info2, info1, state->rip);
} else if (svm_npf_emul_fault(info1)) {
svm_handle_inst_emul(vmcb, info2, vmexit);
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_INST_EMUL, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1);
SVM_CTR3(vcpu, "inst_emul fault "
"for gpa %#lx/%#lx at rip %#lx",
info2, info1, state->rip);
@@ -1565,7 +1565,7 @@
handled = 1;
break;
default:
- vmm_stat_incr(svm_sc->vm, vcpuid, VMEXIT_UNKNOWN, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
break;
}
@@ -1610,7 +1610,7 @@
VMCB_EXITINTINFO_VECTOR(intinfo),
VMCB_EXITINTINFO_EC(intinfo),
VMCB_EXITINTINFO_EC_VALID(intinfo));
- vmm_stat_incr(svm_sc->vm, vcpuid, VCPU_INTINFO_INJECTED, 1);
+ vmm_stat_incr(vcpu->vcpu, VCPU_INTINFO_INJECTED, 1);
SVM_CTR1(vcpu, "Injected entry intinfo: %#lx", intinfo);
}
@@ -2044,7 +2044,7 @@
* migration should take this case into account.
*/
vcpu->lastcpu = curcpu;
- vmm_stat_incr(vm, vcpuid, VCPU_MIGRATIONS, 1);
+ vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
}
svm_msr_guest_enter(svm_sc, vcpu);
diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -1314,7 +1314,7 @@
invvpid_desc.vpid = vmxstate->vpid;
invvpid_desc.linear_addr = 0;
invvpid(INVVPID_TYPE_SINGLE_CONTEXT, invvpid_desc);
- vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_INVVPID_DONE, 1);
+ vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_DONE, 1);
} else {
/*
* The invvpid can be skipped if an invept is going to
@@ -1322,7 +1322,7 @@
* will invalidate combined mappings tagged with
* 'vmx->eptp' for all vpids.
*/
- vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_INVVPID_SAVED, 1);
+ vmm_stat_incr(vcpu->vcpu, VCPU_INVVPID_SAVED, 1);
}
}
@@ -1337,7 +1337,7 @@
vmxstate->lastcpu = curcpu;
- vmm_stat_incr(vmx->vm, vcpu->vcpuid, VCPU_MIGRATIONS, 1);
+ vmm_stat_incr(vcpu->vcpu, VCPU_MIGRATIONS, 1);
vmcs_write(VMCS_HOST_TR_BASE, vmm_get_host_trbase());
vmcs_write(VMCS_HOST_GDTR_BASE, vmm_get_host_gdtrbase());
@@ -2384,7 +2384,7 @@
reason = vmexit->u.vmx.exit_reason;
vmexit->exitcode = VM_EXITCODE_BOGUS;
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_COUNT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_COUNT, 1);
SDT_PROBE3(vmm, vmx, exit, entry, vmx, vcpuid, vmexit);
/*
@@ -2495,7 +2495,7 @@
((uint64_t)ts->errcode << 32) | ts->errcode_valid);
break;
case EXIT_REASON_CR_ACCESS:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_CR_ACCESS, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_CR_ACCESS, 1);
SDT_PROBE4(vmm, vmx, exit, craccess, vmx, vcpuid, vmexit, qual);
switch (qual & 0xf) {
case 0:
@@ -2510,7 +2510,7 @@
}
break;
case EXIT_REASON_RDMSR:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_RDMSR, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_RDMSR, 1);
retu = false;
ecx = vmxctx->guest_rcx;
VMX_CTR1(vcpu, "rdmsr 0x%08x", ecx);
@@ -2528,7 +2528,7 @@
}
break;
case EXIT_REASON_WRMSR:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_WRMSR, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_WRMSR, 1);
retu = false;
eax = vmxctx->guest_rax;
ecx = vmxctx->guest_rcx;
@@ -2552,7 +2552,7 @@
}
break;
case EXIT_REASON_HLT:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_HLT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_HLT, 1);
SDT_PROBE3(vmm, vmx, exit, halt, vmx, vcpuid, vmexit);
vmexit->exitcode = VM_EXITCODE_HLT;
vmexit->u.hlt.rflags = vmcs_read(VMCS_GUEST_RFLAGS);
@@ -2563,18 +2563,18 @@
vmexit->u.hlt.intr_status = 0;
break;
case EXIT_REASON_MTF:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_MTRAP, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_MTRAP, 1);
SDT_PROBE3(vmm, vmx, exit, mtrap, vmx, vcpuid, vmexit);
vmexit->exitcode = VM_EXITCODE_MTRAP;
vmexit->inst_length = 0;
break;
case EXIT_REASON_PAUSE:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_PAUSE, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_PAUSE, 1);
SDT_PROBE3(vmm, vmx, exit, pause, vmx, vcpuid, vmexit);
vmexit->exitcode = VM_EXITCODE_PAUSE;
break;
case EXIT_REASON_INTR_WINDOW:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INTR_WINDOW, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_INTR_WINDOW, 1);
SDT_PROBE3(vmm, vmx, exit, intrwindow, vmx, vcpuid, vmexit);
vmx_clear_int_window_exiting(vcpu);
return (1);
@@ -2607,7 +2607,7 @@
* This is special. We want to treat this as an 'handled'
* VM-exit but not increment the instruction pointer.
*/
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_EXTINT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_EXTINT, 1);
return (1);
case EXIT_REASON_NMI_WINDOW:
SDT_PROBE3(vmm, vmx, exit, nmiwindow, vmx, vcpuid, vmexit);
@@ -2615,10 +2615,10 @@
if (vm_nmi_pending(vmx->vm, vcpuid))
vmx_inject_nmi(vmx, vcpu);
vmx_clear_nmi_window_exiting(vcpu);
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NMI_WINDOW, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_NMI_WINDOW, 1);
return (1);
case EXIT_REASON_INOUT:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INOUT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_INOUT, 1);
vmexit->exitcode = VM_EXITCODE_INOUT;
vmexit->u.inout.bytes = (qual & 0x7) + 1;
vmexit->u.inout.in = in = (qual & 0x8) ? 1 : 0;
@@ -2641,12 +2641,12 @@
SDT_PROBE3(vmm, vmx, exit, inout, vmx, vcpuid, vmexit);
break;
case EXIT_REASON_CPUID:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_CPUID, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_CPUID, 1);
SDT_PROBE3(vmm, vmx, exit, cpuid, vmx, vcpuid, vmexit);
handled = vmx_handle_cpuid(vmx->vm, vcpuid, vmxctx);
break;
case EXIT_REASON_EXCEPTION:
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_EXCEPTION, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_EXCEPTION, 1);
intr_info = vmcs_read(VMCS_EXIT_INTR_INFO);
KASSERT((intr_info & VMCS_INTR_VALID) != 0,
("VM exit interruption info invalid: %#x", intr_info));
@@ -2740,12 +2740,12 @@
vmexit->inst_length = 0;
vmexit->u.paging.gpa = gpa;
vmexit->u.paging.fault_type = ept_fault_type(qual);
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_NESTED_FAULT, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_NESTED_FAULT, 1);
SDT_PROBE5(vmm, vmx, exit, nestedfault,
vmx, vcpuid, vmexit, gpa, qual);
} else if (ept_emulation_fault(qual)) {
vmexit_inst_emul(vmexit, gpa, vmcs_gla());
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_INST_EMUL, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_INST_EMUL, 1);
SDT_PROBE4(vmm, vmx, exit, mmiofault,
vmx, vcpuid, vmexit, gpa);
}
@@ -2821,7 +2821,7 @@
default:
SDT_PROBE4(vmm, vmx, exit, unknown,
vmx, vcpuid, vmexit, reason);
- vmm_stat_incr(vmx->vm, vcpuid, VMEXIT_UNKNOWN, 1);
+ vmm_stat_incr(vcpu->vcpu, VMEXIT_UNKNOWN, 1);
break;
}
diff --git a/sys/amd64/vmm/io/vlapic.c b/sys/amd64/vmm/io/vlapic.c
--- a/sys/amd64/vmm/io/vlapic.c
+++ b/sys/amd64/vmm/io/vlapic.c
@@ -600,7 +600,7 @@
}
}
VLAPIC_CTR0(vlapic, "Gratuitous EOI");
- vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_GRATUITOUS_EOI, 1);
+ vmm_stat_incr(vlapic->vcpu, VLAPIC_GRATUITOUS_EOI, 1);
}
static __inline int
@@ -636,7 +636,7 @@
return;
if (vlapic_fire_lvt(vlapic, APIC_LVT_ERROR)) {
- vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_ERROR, 1);
+ vmm_stat_incr(vlapic->vcpu, VLAPIC_INTR_ERROR, 1);
}
}
@@ -650,7 +650,7 @@
if (vlapic_fire_lvt(vlapic, APIC_LVT_TIMER)) {
VLAPIC_CTR0(vlapic, "vlapic timer fired");
- vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_TIMER, 1);
+ vmm_stat_incr(vlapic->vcpu, VLAPIC_INTR_TIMER, 1);
}
}
@@ -662,7 +662,7 @@
{
if (vlapic_fire_lvt(vlapic, APIC_LVT_CMCI)) {
- vmm_stat_incr(vlapic->vm, vlapic->vcpuid, VLAPIC_INTR_CMC, 1);
+ vmm_stat_incr(vlapic->vcpu, VLAPIC_INTR_CMC, 1);
}
}
@@ -701,8 +701,8 @@
case APIC_LVT_THERMAL:
case APIC_LVT_CMCI:
if (vlapic_fire_lvt(vlapic, vector)) {
- vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
- LVTS_TRIGGERRED, vector, 1);
+ vmm_stat_array_incr(vlapic->vcpu, LVTS_TRIGGERRED,
+ vector, 1);
}
break;
default:
@@ -1102,8 +1102,7 @@
CPU_FOREACH_ISSET(i, &dmask) {
lapic_intr_edge(vlapic->vm, i, vec);
- vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid,
- IPIS_SENT, i, 1);
+ vmm_stat_array_incr(vlapic->vcpu, IPIS_SENT, i, 1);
VLAPIC_CTR2(vlapic,
"vlapic sending ipi %d to vcpuid %d", vec, i);
}
@@ -1238,8 +1237,7 @@
vec = val & 0xff;
lapic_intr_edge(vlapic->vm, vlapic->vcpuid, vec);
- vmm_stat_array_incr(vlapic->vm, vlapic->vcpuid, IPIS_SENT,
- vlapic->vcpuid, 1);
+ vmm_stat_array_incr(vlapic->vcpu, IPIS_SENT, vlapic->vcpuid, 1);
VLAPIC_CTR1(vlapic, "vlapic self-ipi %d", vec);
}
diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -1447,7 +1447,7 @@
*/
msleep_spin(vcpu, &vcpu->mtx, wmesg, hz);
vcpu_require_state_locked(vm, vcpuid, VCPU_FROZEN);
- vmm_stat_incr(vm, vcpuid, VCPU_IDLE_TICKS, ticks - t);
+ vmm_stat_incr(vcpu, VCPU_IDLE_TICKS, ticks - t);
if (td_ast_pending(td, TDA_SUSPEND)) {
vcpu_unlock(vcpu);
error = thread_check_susp(td, false);
@@ -1729,7 +1729,7 @@
vmexit->rip = rip;
vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_RENDEZVOUS;
- vmm_stat_incr(vm, vcpuid, VMEXIT_RENDEZVOUS, 1);
+ vmm_stat_incr(vm_vcpu(vm, vcpuid), VMEXIT_RENDEZVOUS, 1);
}
void
@@ -1741,7 +1741,7 @@
vmexit->rip = rip;
vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_REQIDLE;
- vmm_stat_incr(vm, vcpuid, VMEXIT_REQIDLE, 1);
+ vmm_stat_incr(vm_vcpu(vm, vcpuid), VMEXIT_REQIDLE, 1);
}
void
@@ -1753,7 +1753,7 @@
vmexit->rip = rip;
vmexit->inst_length = 0;
vmexit->exitcode = VM_EXITCODE_BOGUS;
- vmm_stat_incr(vm, vcpuid, VMEXIT_ASTPENDING, 1);
+ vmm_stat_incr(vm_vcpu(vm, vcpuid), VMEXIT_ASTPENDING, 1);
}
int
@@ -1804,7 +1804,7 @@
save_guest_fpustate(vcpu);
- vmm_stat_incr(vm, vcpuid, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
+ vmm_stat_incr(vcpu, VCPU_TOTAL_RUNTIME, rdtsc() - tscval);
critical_exit();
@@ -1862,7 +1862,7 @@
if (error == 0 && retu == false)
goto restart;
- vmm_stat_incr(vm, vcpuid, VMEXIT_USERSPACE, 1);
+ vmm_stat_incr(vcpu, VMEXIT_USERSPACE, 1);
VCPU_CTR2(vm, vcpuid, "retu %d/%d", error, vme->exitcode);
/* copy the exit information */
@@ -2246,7 +2246,7 @@
panic("vm_nmi_clear: inconsistent nmi_pending state");
vcpu->nmi_pending = 0;
- vmm_stat_incr(vm, vcpuid, VCPU_NMI_COUNT, 1);
+ vmm_stat_incr(vcpu, VCPU_NMI_COUNT, 1);
}
static VMM_STAT(VCPU_EXTINT_COUNT, "number of ExtINTs delivered to vcpu");
@@ -2293,7 +2293,7 @@
panic("vm_extint_clear: inconsistent extint_pending state");
vcpu->extint_pending = 0;
- vmm_stat_incr(vm, vcpuid, VCPU_EXTINT_COUNT, 1);
+ vmm_stat_incr(vcpu, VCPU_EXTINT_COUNT, 1);
}
int
@@ -2532,10 +2532,10 @@
}
void *
-vcpu_stats(struct vm *vm, int vcpuid)
+vcpu_stats(struct vcpu *vcpu)
{
- return (vm->vcpu[vcpuid].stats);
+ return (vcpu->stats);
}
int
@@ -2827,7 +2827,7 @@
{
if (vcpu == 0) {
- vmm_stat_set(vm, vcpu, VMM_MEM_RESIDENT,
+ vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_RESIDENT,
PAGE_SIZE * vmspace_resident_count(vm->vmspace));
}
}
@@ -2837,7 +2837,7 @@
{
if (vcpu == 0) {
- vmm_stat_set(vm, vcpu, VMM_MEM_WIRED,
+ vmm_stat_set(vm_vcpu(vm, vcpu), VMM_MEM_WIRED,
PAGE_SIZE * pmap_wired_count(vmspace_pmap(vm->vmspace)));
}
}
diff --git a/sys/amd64/vmm/vmm_stat.h b/sys/amd64/vmm/vmm_stat.h
--- a/sys/amd64/vmm/vmm_stat.h
+++ b/sys/amd64/vmm/vmm_stat.h
@@ -92,13 +92,13 @@
int vmm_stat_desc_copy(int index, char *buf, int buflen);
static void __inline
-vmm_stat_array_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
- int statidx, uint64_t x)
+vmm_stat_array_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
+ uint64_t x)
{
#ifdef VMM_KEEP_STATS
uint64_t *stats;
- stats = vcpu_stats(vm, vcpu);
+ stats = vcpu_stats(vcpu);
if (vst->index >= 0 && statidx < vst->nelems)
stats[vst->index + statidx] += x;
@@ -106,13 +106,13 @@
}
static void __inline
-vmm_stat_array_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst,
- int statidx, uint64_t val)
+vmm_stat_array_set(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
+ uint64_t val)
{
#ifdef VMM_KEEP_STATS
uint64_t *stats;
- stats = vcpu_stats(vm, vcpu);
+ stats = vcpu_stats(vcpu);
if (vst->index >= 0 && statidx < vst->nelems)
stats[vst->index + statidx] = val;
@@ -120,20 +120,20 @@
}
static void __inline
-vmm_stat_incr(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t x)
+vmm_stat_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t x)
{
#ifdef VMM_KEEP_STATS
- vmm_stat_array_incr(vm, vcpu, vst, 0, x);
+ vmm_stat_array_incr(vcpu, vst, 0, x);
#endif
}
static void __inline
-vmm_stat_set(struct vm *vm, int vcpu, struct vmm_stat_type *vst, uint64_t val)
+vmm_stat_set(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t val)
{
#ifdef VMM_KEEP_STATS
- vmm_stat_array_set(vm, vcpu, vst, 0, val);
+ vmm_stat_array_set(vcpu, vst, 0, val);
#endif
}
diff --git a/sys/amd64/vmm/vmm_stat.c b/sys/amd64/vmm/vmm_stat.c
--- a/sys/amd64/vmm/vmm_stat.c
+++ b/sys/amd64/vmm/vmm_stat.c
@@ -113,7 +113,7 @@
}
/* Copy over the stats */
- stats = vcpu_stats(vm, vcpu);
+ stats = vcpu_stats(vm_vcpu(vm, vcpu));
memcpy(buf, stats + index, tocopy * sizeof(stats[0]));
*num_stats = tocopy;
return (0);

File Metadata

Mime Type
text/plain
Expires
Sat, Sep 28, 11:20 PM (21 h, 55 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
13077635
Default Alt Text
D37157.diff (16 KB)

Event Timeline