Skip to content

Commit

Permalink
implement RDMSRLIST/WRMSRLIST instructions (+related VMX extensions)
Browse files Browse the repository at this point in the history
  • Loading branch information
Shwartsman committed Dec 16, 2023
1 parent accb975 commit 52d09a4
Show file tree
Hide file tree
Showing 12 changed files with 213 additions and 57 deletions.
4 changes: 2 additions & 2 deletions bochs/CHANGES
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ Brief summary :
! Implemented Linear Address Separation (LASS) extension
! Implemented User-Level Interrupt (UINTR) extension
! Implemented recently published Intel instruction sets:
- MOVDIRI/MOVDIR64B, AVX512 BF16, AVX IFMA52, VNNI-INT8/VNNI-INT16, AVX-NE-CONVERT, CMPCCXADD, SM3, SM4, SHA512, WRMSRNS, WAITPKG, SERIALIZE
- MOVDIRI/MOVDIR64B, AVX512 BF16, AVX IFMA52, VNNI-INT8/VNNI-INT16, AVX-NE-CONVERT, CMPCCXADD, SM3, SM4, SHA512, WRMSRNS, MSRLIST, WAITPKG, SERIALIZE
- Improved 64-bit guest support in Bochs internal debugger, added new internal debugger commands
- Bochs debugger enhanced with new commands (setpmem, loadmem, deref, ...)
Enhanced magic breakpoint capabilities. Refer to user documentation for more details.
Expand All @@ -37,7 +37,7 @@ Detailed change log :
- Implemented Linear Address Separation (LASS) extension
- Implemented User-Level Interrupt (UINTR) extension
- Implemented recently published Intel instruction sets:
- MOVDIRI/MOVDIR64B, AVX512 BF16, AVX IFMA52, VNNI-INT8/VNNI-INT16, AVX-NE-CONVERT, CMPCCXADD, SM3, SM4, SHA512, WRMSRNS, WAITPKG, SERIALIZE
- MOVDIRI/MOVDIR64B, AVX512 BF16, AVX IFMA52, VNNI-INT8/VNNI-INT16, AVX-NE-CONVERT, CMPCCXADD, SM3, SM4, SHA512, WRMSRNS, MSRLIST, WAITPKG, SERIALIZE

- Bochs Debugger and Instrumentation
- Updated Bochs instrumentation examples for new disassembler introduced in Bochs 2.7 release.
Expand Down
5 changes: 5 additions & 0 deletions bochs/cpu/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -4087,6 +4087,11 @@ class BOCHSAPI BX_CPU_C : public logfunctions {
BX_SMF void UMONITOR_Eq(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
BX_SMF void UMWAIT_Ed(bxInstruction_c *) BX_CPP_AttrRegparmN(1);

#if BX_SUPPORT_X86_64
BX_SMF void WRMSRLIST(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
BX_SMF void RDMSRLIST(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
#endif

#if BX_SUPPORT_PKEYS
BX_SMF void RDPKRU(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
BX_SMF void WRPKRU(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
Expand Down
4 changes: 4 additions & 0 deletions bochs/cpu/cpuid.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1190,7 +1190,11 @@ Bit32u bx_cpuid_t::get_std_cpuid_leaf_7_subleaf_1_eax(Bit32u extra) const

// [25:24] reserved
// [26:26] LAM: Linear Address Masking

// [27:27] MSRLIST: RDMSRLIST/WRMSRLIST instructions and the IA32_BARRIER MSR
if (is_cpu_extension_supported(BX_ISA_MSRLIST))
eax |= BX_CPUID_STD7_SUBLEAF1_EAX_MSRLIST;

// [31:28] reserved

return eax;
Expand Down
1 change: 1 addition & 0 deletions bochs/cpu/decoder/features.h
Original file line number Diff line number Diff line change
Expand Up @@ -143,3 +143,4 @@ x86_feature(BX_ISA_LASS, "lass") /* Linea
x86_feature(BX_ISA_UINTR, "uintr") /* User Level Interrupts support */
x86_feature(BX_ISA_MOVDIRI, "movdiri") /* MOVDIRI instruction support */
x86_feature(BX_ISA_MOVDIR64B, "movdir64b") /* MOVDIR64B instruction support */
x86_feature(BX_ISA_MSRLIST, "msrlist") /* RDMSRLIST/WRMSRLIST instructions support */
4 changes: 4 additions & 0 deletions bochs/cpu/decoder/fetchdecode_opmap.h
Original file line number Diff line number Diff line change
Expand Up @@ -1519,6 +1519,10 @@ static const Bit64u BxOpcodeTable0F01[] = {
form_opcode(ATTR_NNN0 | ATTR_RRR3 | ATTR_MODC0 | ATTR_SSE_NO_PREFIX, BX_IA_VMRESUME), // 0F 01 C3
form_opcode(ATTR_NNN0 | ATTR_RRR4 | ATTR_MODC0 | ATTR_SSE_NO_PREFIX, BX_IA_VMXOFF), // 0F 01 C4
form_opcode(ATTR_NNN0 | ATTR_RRR6 | ATTR_MODC0 | ATTR_SSE_NO_PREFIX, BX_IA_WRMSRNS), // 0F 01 C6
#if BX_SUPPORT_X86_64
form_opcode(ATTR_NNN0 | ATTR_RRR6 | ATTR_MODC0 | ATTR_SSE_PREFIX_F2 | ATTR_IS64, BX_IA_RDMSRLIST),
form_opcode(ATTR_NNN0 | ATTR_RRR6 | ATTR_MODC0 | ATTR_SSE_PREFIX_F3 | ATTR_IS64, BX_IA_WRMSRLIST),
#endif

form_opcode(ATTR_NNN1 | ATTR_RRR0 | ATTR_MODC0 | ATTR_SSE_NO_PREFIX, BX_IA_MONITOR), // 0F 01 C8
form_opcode(ATTR_NNN1 | ATTR_RRR1 | ATTR_MODC0 | ATTR_SSE_NO_PREFIX, BX_IA_MWAIT), // 0F 01 C9
Expand Down
6 changes: 5 additions & 1 deletion bochs/cpu/decoder/ia_opcodes.def
Original file line number Diff line number Diff line change
Expand Up @@ -1801,8 +1801,12 @@ bx_define_opcode(BX_IA_SENDUIPI_Gq, "senduipi", "senduipi", NULL, &BX_CPU_C::SEN

bx_define_opcode(BX_IA_RDPID_Ed, "rdpid", "rdpid", NULL, &BX_CPU_C::RDPID_Ed, BX_ISA_RDPID, OP_Ed, OP_NONE, OP_NONE, OP_NONE, 0)

bx_define_opcode(BX_IA_WRMSRNS, "wrmsrns", "wrmsrns", NULL, &BX_CPU_C::WRMSR, BX_ISA_WRMSRNS, OP_NONE, OP_NONE, OP_NONE, OP_NONE, BX_TRACE_END)
bx_define_opcode(BX_IA_SERIALIZE, "serialize", "serialize", &BX_CPU_C::BxError, &BX_CPU_C::NOP, BX_ISA_SERIALIZE, OP_NONE, OP_NONE, OP_NONE, OP_NONE, 0)
bx_define_opcode(BX_IA_WRMSRNS, "wrmsrns", "wrmsrns", NULL, &BX_CPU_C::WRMSR, BX_ISA_WRMSRNS, OP_NONE, OP_NONE, OP_NONE, OP_NONE, BX_TRACE_END)
#if BX_SUPPORT_X86_64
bx_define_opcode(BX_IA_RDMSRLIST, "rdmsrlist", "rdmsrlist", NULL, &BX_CPU_C::RDMSRLIST, BX_ISA_MSRLIST, OP_NONE, OP_NONE, OP_NONE, OP_NONE, BX_TRACE_END)
bx_define_opcode(BX_IA_WRMSRLIST, "wrmsrlist", "wrmsrlist", NULL, &BX_CPU_C::WRMSRLIST, BX_ISA_MSRLIST, OP_NONE, OP_NONE, OP_NONE, OP_NONE, BX_TRACE_END)
#endif

#if BX_SUPPORT_AVX && BX_CPU_LEVEL >= 6
// AVX1/AVX2
Expand Down
153 changes: 133 additions & 20 deletions bochs/cpu/msr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,22 @@ bool BX_CPP_AttrRegparmN(2) BX_CPU_C::rdmsr(Bit32u index, Bit64u *msr)
{
Bit64u val64 = 0;

#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL2_VIRTUALIZE_X2APIC_MODE)) {
if (index >= 0x800 && index <= 0x8FF) {
if (index == 0x808 || SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL2_VIRTUALIZE_APIC_REGISTERS)) {
unsigned vapic_offset = (index & 0xff) << 4;
Bit32u msr_lo = VMX_Read_Virtual_APIC(vapic_offset);
Bit32u msr_hi = VMX_Read_Virtual_APIC(vapic_offset + 4);
*msr = GET64_FROM_HI32_LO32(msr_hi, msr_lo);
return true;
}
}
}
}
#endif

#if BX_CPU_LEVEL >= 6
if (is_cpu_extension_supported(BX_ISA_X2APIC)) {
if (is_x2apic_msr_range(index)) {
Expand Down Expand Up @@ -556,21 +572,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDMSR(bxInstruction_c *i)
VMexit_MSR(VMX_VMEXIT_RDMSR, index);
#endif

#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL2_VIRTUALIZE_X2APIC_MODE)) {
if (index >= 0x800 && index <= 0x8FF) {
if (index == 0x808 || SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL2_VIRTUALIZE_APIC_REGISTERS)) {
unsigned vapic_offset = (index & 0xff) << 4;
RAX = VMX_Read_Virtual_APIC(vapic_offset);
RDX = VMX_Read_Virtual_APIC(vapic_offset + 4);
BX_NEXT_INSTR(i);
}
}
}
}
#endif

if (!rdmsr(index, &val64))
exception(BX_GP_EXCEPTION, 0);

Expand Down Expand Up @@ -632,6 +633,15 @@ bool BX_CPP_AttrRegparmN(2) BX_CPU_C::wrmsr(Bit32u index, Bit64u val_64)

BX_DEBUG(("WRMSR: write %08x:%08x to MSR %x", val32_hi, val32_lo, index));

#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL2_VIRTUALIZE_X2APIC_MODE)) {
if (Virtualize_X2APIC_Write(index, val_64))
return true;
}
}
#endif

#if BX_CPU_LEVEL >= 6
if (is_cpu_extension_supported(BX_ISA_X2APIC)) {
if (is_x2apic_msr_range(index)) {
Expand Down Expand Up @@ -1337,22 +1347,125 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRMSR(bxInstruction_c *i)
VMexit_MSR(VMX_VMEXIT_WRMSR, index);
#endif

#if BX_SUPPORT_VMX >= 2
if (! wrmsr(index, val_64))
exception(BX_GP_EXCEPTION, 0);
#endif

BX_NEXT_TRACE(i);
}

#if BX_SUPPORT_X86_64

#include "scalar_arith.h"

void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDMSRLIST(bxInstruction_c *i)
{
#if BX_SUPPORT_VMX
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL2_VIRTUALIZE_X2APIC_MODE)) {
if (Virtualize_X2APIC_Write(index, val_64))
BX_NEXT_INSTR(i);
if (! TERTIARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_ENABLE_MSRLIST))
exception(BX_UD_EXCEPTION, 0);
}
#endif

if (!long64_mode() || CPL!=0) {
BX_ERROR(("RDMSRLIST: CPL != 0 cause #GP(0)"));
exception(BX_GP_EXCEPTION, 0);
}

if (((ESI | EDI) & 0x7) != 0) {
BX_ERROR(("RDMSRLIST: RSI and RDI must be 8-byte aligned"));
exception(BX_GP_EXCEPTION, 0);
}

Bit64u val64;

while (RCX != 0) {
unsigned MSR_index = tzcntq(RCX); // position of least significant bit set in RCX
Bit64u MSR_mask = (BX_CONST64(1) << MSR_index);
Bit64u MSR_address = read_linear_qword(BX_SEG_REG_DS, RSI + MSR_index*8);
if (GET32H(MSR_address)) {
BX_ERROR(("RDMSRLIST index=%d #GP(0): reserved bits are set in MSR address table entry", MSR_index));
exception(BX_GP_EXCEPTION, 0);
}

#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest)
VMexit_MSR(VMX_VMEXIT_RDMSRLIST, (Bit32u) MSR_address);
#endif

if (!rdmsr((Bit32u) MSR_address, &val64))
exception(BX_GP_EXCEPTION, 0);

write_linear_qword(BX_SEG_REG_DS, RDI + MSR_index*8, val64);

RCX &= ~MSR_mask;

// allow delivery of any pending interrupts or traps
if (BX_CPU_THIS_PTR async_event) {
RIP = BX_CPU_THIS_PTR prev_rip; // loop not done, restore RIP
break;
}
}

BX_NEXT_TRACE(i);
}

void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRMSRLIST(bxInstruction_c *i)
{
#if BX_SUPPORT_VMX
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (! TERTIARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_ENABLE_MSRLIST))
exception(BX_UD_EXCEPTION, 0);
}
#endif

if (! wrmsr(index, val_64))
if (!long64_mode() || CPL!=0) {
BX_ERROR(("WRMSRLIST: CPL != 0 cause #GP(0)"));
exception(BX_GP_EXCEPTION, 0);
}

if (((ESI | EDI) & 0x7) != 0) {
BX_ERROR(("WRMSRLIST: RSI and RDI must be 8-byte aligned"));
exception(BX_GP_EXCEPTION, 0);
}

invalidate_prefetch_q();

while (RCX != 0) {
unsigned MSR_index = tzcntq(RCX); // position of least significant bit set in RCX
Bit64u MSR_mask = (BX_CONST64(1) << MSR_index);
Bit64u MSR_address = read_linear_qword(BX_SEG_REG_DS, RSI + MSR_index*8);
Bit64u MSR_data = read_linear_qword(BX_SEG_REG_DS, RDI + MSR_index*8);
if (GET32H(MSR_address)) {
BX_ERROR(("WRMSRLIST index=%d #GP(0): reserved bits are set in MSR address table entry", MSR_index));
exception(BX_GP_EXCEPTION, 0);
}

#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
vm->msr_data = MSR_data;
VMexit_MSR(VMX_VMEXIT_WRMSRLIST, (Bit32u) MSR_address);
}
#endif

if (! wrmsr((Bit32u) MSR_address, MSR_data))
exception(BX_GP_EXCEPTION, 0);

RCX &= ~MSR_mask;

// allow delivery of any pending interrupts or traps
if (BX_CPU_THIS_PTR async_event) {
RIP = BX_CPU_THIS_PTR prev_rip; // loop not done, restore RIP
break;
}
}

BX_NEXT_TRACE(i);
}

#endif

#if BX_CONFIGURE_MSRS

int BX_CPU_C::load_MSRs(const char *file)
Expand Down
2 changes: 2 additions & 0 deletions bochs/cpu/msr.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ enum MSR_Register {
BX_MSR_TSC_ADJUST = 0x03b,
BX_MSR_TSC_DEADLINE = 0x6e0,

BX_MSR_IA32_BARRIER = 0x02f,

BX_MSR_IA32_SPEC_CTRL = 0x048,
BX_MSR_IA32_PRED_CMD = 0x049,
BX_MSR_IA32_UMWAIT_CONTROL = 0x0e1,
Expand Down
10 changes: 10 additions & 0 deletions bochs/cpu/vmcs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -402,6 +402,10 @@ bool BX_CPU_C::vmcs_field_supported(Bit32u encoding)
case VMCS_64BIT_GUEST_PHYSICAL_ADDR:
case VMCS_64BIT_GUEST_PHYSICAL_ADDR_HI:
return BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT);

case VMCS_64BIT_MSR_DATA:
case VMCS_64BIT_MSR_DATA_HI:
return is_cpu_extension_supported(BX_ISA_MSRLIST);
#endif

/* VMCS 64-bit guest state fields */
Expand Down Expand Up @@ -854,9 +858,15 @@ void BX_CPU_C::init_tertiary_proc_based_vmexec_ctrls(void)
// [03] Guest Paging verification
// [04] IPI Virtualization
// ...
// [06] Enable MSRLIST instructions
// [07] Virtualize IA32_SPEC_CTRL
// ...

cap->vmx_vmexec_ctrl3_supported_bits = 0;

if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_MSRLIST)) {
cap->vmx_vmexec_ctrl3_supported_bits |= VMX_VM_EXEC_CTRL3_ENABLE_MSRLIST;
}
}

void BX_CPU_C::init_vmexit_ctrls(void)
Expand Down
50 changes: 27 additions & 23 deletions bochs/cpu/vmexit.cc
Original file line number Diff line number Diff line change
Expand Up @@ -327,36 +327,40 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_MSR(unsigned op, Bit32u msr)
{
BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);

bool vmexit = false;
if (! VMEXIT(VMX_VM_EXEC_CTRL1_MSR_BITMAPS)) vmexit = true;
else {
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
bool readmsr = (op == VMX_VMEXIT_RDMSR || op == VMX_VMEXIT_RDMSRLIST);

if (msr >= BX_VMX_HI_MSR_START) {
if (msr > BX_VMX_HI_MSR_END) vmexit = true;
else {
// check MSR-HI bitmaps
bx_phy_address pAddr = vm->msr_bitmap_addr + ((msr - BX_VMX_HI_MSR_START) >> 3) + 1024 + ((op == VMX_VMEXIT_RDMSR) ? 0 : 2048);
Bit8u field = read_physical_byte(pAddr, MEMTYPE(resolve_memtype(pAddr)), BX_MSR_BITMAP_ACCESS);
if (field & (1 << (msr & 7)))
vmexit = true;
}
if (! VMEXIT(VMX_VM_EXEC_CTRL1_MSR_BITMAPS)) {
BX_DEBUG(("VMEXIT: %sMSR 0x%08x", (readmsr) ? "RD" : "WR", msr));
VMexit(op, 0);
}

VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;

bool vmexit = false;
if (msr >= BX_VMX_HI_MSR_START) {
if (msr > BX_VMX_HI_MSR_END) vmexit = true;
else {
// check MSR-HI bitmaps
bx_phy_address pAddr = vm->msr_bitmap_addr + ((msr - BX_VMX_HI_MSR_START) >> 3) + 1024 + (readmsr ? 0 : 2048);
Bit8u field = read_physical_byte(pAddr, MEMTYPE(resolve_memtype(pAddr)), BX_MSR_BITMAP_ACCESS);
if (field & (1 << (msr & 7)))
vmexit = true;
}
}
else {
if (msr > BX_VMX_LO_MSR_END) vmexit = true;
else {
if (msr > BX_VMX_LO_MSR_END) vmexit = true;
else {
// check MSR-LO bitmaps
bx_phy_address pAddr = vm->msr_bitmap_addr + (msr >> 3) + ((op == VMX_VMEXIT_RDMSR) ? 0 : 2048);
Bit8u field = read_physical_byte(pAddr, MEMTYPE(resolve_memtype(pAddr)), BX_MSR_BITMAP_ACCESS);
if (field & (1 << (msr & 7)))
vmexit = true;
}
// check MSR-LO bitmaps
bx_phy_address pAddr = vm->msr_bitmap_addr + (msr >> 3) + (readmsr ? 0 : 2048);
Bit8u field = read_physical_byte(pAddr, MEMTYPE(resolve_memtype(pAddr)), BX_MSR_BITMAP_ACCESS);
if (field & (1 << (msr & 7)))
vmexit = true;
}
}

if (vmexit) {
BX_DEBUG(("VMEXIT: %sMSR 0x%08x", (op == VMX_VMEXIT_RDMSR) ? "RD" : "WR", msr));
VMexit(op, 0);
BX_DEBUG(("VMEXIT: %sMSR 0x%08x", (readmsr) ? "RD" : "WR", msr));
VMexit(op, (op >= VMX_VMEXIT_RDMSRLIST) ? msr : 0);
}
}

Expand Down
Loading

0 comments on commit 52d09a4

Please sign in to comment.