diff --git a/bochs/CHANGES b/bochs/CHANGES index 65672fb5c3..a1c77b1405 100644 --- a/bochs/CHANGES +++ b/bochs/CHANGES @@ -4,7 +4,7 @@ The Bochs source tree is transitioning from SVN to GIT hosted on github (https:/ We welcome every new contributor ! Brief summary : - - Bugfixes for CPU emulation correctness (MONITOR/MWAIT, VMX/SVM, AVX-512, SHA fixes) + - Bugfixes for CPU emulation correctness (MONITOR/MWAIT, VMX/SVM, AVX-512, CET, SHA fixes) ! Implemented VMX MBE (Mode Based Execution Control) emulation required for Windows 11 guest ! Implemented Linear Address Separation (LASS) extension ! Implemented recently published Intel instruction sets: @@ -23,7 +23,7 @@ Brief summary : Detailed change log : - CPU/CPUDB - - Bugfixes for CPU emulation correctness (MONITOR/MWAIT, VMX/SVM, AVX-512, SHA fixes) + - Bugfixes for CPU emulation correctness (MONITOR/MWAIT, VMX/SVM, AVX-512, CET, SHA fixes) - Implemented VMX MBE (Mode Based Execution Control) emulation required for Windows 11 guest - Implemented Linear Address Separation (LASS) extension - Implemented recently published Intel instruction sets: diff --git a/bochs/config.h.in b/bochs/config.h.in index c1d40b5893..b7aa8a9be6 100644 --- a/bochs/config.h.in +++ b/bochs/config.h.in @@ -465,6 +465,8 @@ #define GET32L(val64) ((Bit32u)(((Bit64u)(val64)) & 0xFFFFFFFF)) #define GET32H(val64) ((Bit32u)(((Bit64u)(val64)) >> 32)) +#define GET64_FROM_HI32_LO32(hi, lo) (Bit64u(lo) | (Bit64u(hi) << 32)) + // now that Bit32u and Bit64u exist, defined bx_address #if BX_SUPPORT_X86_64 typedef Bit64u bx_address; diff --git a/bochs/cpu/access.cc b/bochs/cpu/access.cc index b58ba6eb64..cef747cf4f 100644 --- a/bochs/cpu/access.cc +++ b/bochs/cpu/access.cc @@ -299,9 +299,8 @@ BX_CPU_C::system_read_byte(bx_address laddr) bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0); if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (tlbEntry->accessBits & 0x01) { + // See if the TLB entry privilege level allows us read access from CPL=0 + if (isReadOK(tlbEntry, 0)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); @@ -325,9 +324,8 @@ BX_CPU_C::system_read_word(bx_address laddr) bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1); if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (tlbEntry->accessBits & 0x01) { + // See if the TLB entry privilege level allows us read access from CPL=0 + if (isReadOK(tlbEntry, 0)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset); @@ -351,9 +349,8 @@ BX_CPU_C::system_read_dword(bx_address laddr) bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3); if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (tlbEntry->accessBits & 0x01) { + // See if the TLB entry privilege level allows us read access from CPL=0 + if (isReadOK(tlbEntry, 0)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); @@ -377,9 +374,8 @@ BX_CPU_C::system_read_qword(bx_address laddr) bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7); if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (tlbEntry->accessBits & 0x01) { + // See if the TLB entry privilege level allows us read access from CPL=0 + if (isReadOK(tlbEntry, 0)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); @@ -401,8 +397,7 @@ BX_CPU_C::system_write_byte(bx_address laddr, Bit8u data) bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0); if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. + // See if the TLB entry privilege level allows us write access from CPL=0 if (isWriteOK(tlbEntry, 0)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); @@ -425,8 +420,7 @@ BX_CPU_C::system_write_word(bx_address laddr, Bit16u data) bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1); if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. + // See if the TLB entry privilege level allows us write access from CPL=0 if (isWriteOK(tlbEntry, 0)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); @@ -449,8 +443,7 @@ BX_CPU_C::system_write_dword(bx_address laddr, Bit32u data) bx_address lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3); if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. + // See if the TLB entry privilege level allows us write access from CPL=0 if (isWriteOK(tlbEntry, 0)) { bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; Bit32u pageOffset = PAGE_OFFSET(laddr); @@ -467,6 +460,29 @@ BX_CPU_C::system_write_dword(bx_address laddr, Bit32u data) exception(BX_GP_EXCEPTION, 0); } + void BX_CPP_AttrRegparmN(2) +BX_CPU_C::system_write_qword(bx_address laddr, Bit64u data) +{ + bx_address lpf = LPFOf(laddr); + bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7); + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access from CPL=0 + if (isWriteOK(tlbEntry, 0)) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + bx_phy_address pAddr = tlbEntry->ppf | pageOffset; + BX_NOTIFY_LIN_MEMORY_ACCESS(laddr, pAddr, 8, tlbEntry->get_memtype(), BX_WRITE, (Bit8u*) &data); + Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); + pageWriteStampTable.decWriteStamp(pAddr, 8); + WriteHostQWordToLittleEndian(hostAddr, data); + return; + } + } + + if (access_write_linear(laddr, 8, 0, BX_WRITE, 0x0, (void *) &data) < 0) + exception(BX_GP_EXCEPTION, 0); +} + Bit8u* BX_CPP_AttrRegparmN(2) BX_CPU_C::v2h_read_byte(bx_address laddr, bool user) { diff --git a/bochs/cpu/access2.cc b/bochs/cpu/access2.cc index 449c0eb904..574f840678 100644 --- a/bochs/cpu/access2.cc +++ b/bochs/cpu/access2.cc @@ -1232,7 +1232,7 @@ void BX_CPP_AttrRegparmN(3) BX_CPU_C::shadow_stack_write_qword(bx_address offset bool BX_CPP_AttrRegparmN(4) BX_CPU_C::shadow_stack_lock_cmpxchg8b(bx_address offset, unsigned curr_pl, Bit64u data, Bit64u expected_data) { - Bit64u val64 = shadow_stack_read_qword(offset, curr_pl); + Bit64u val64 = shadow_stack_read_qword(offset, curr_pl); // should be locked and RMW if (val64 == expected_data) { shadow_stack_write_qword(offset, curr_pl, data); return true; diff --git a/bochs/cpu/apic.cc b/bochs/cpu/apic.cc index 67468cbd6e..f97def7404 100644 --- a/bochs/cpu/apic.cc +++ b/bochs/cpu/apic.cc @@ -1239,7 +1239,7 @@ bool bx_local_apic_c::read_x2apic(unsigned index, Bit64u *val_64) break; // full 64-bit access to ICR case BX_LAPIC_ICR_LO: - *val_64 = ((Bit64u) icr_lo) | (((Bit64u) icr_hi) << 32); + *val_64 = GET64_FROM_HI32_LO32(icr_hi, icr_lo); break; // not supported/not readable in x2apic mode case BX_LAPIC_ARBITRATION_PRIORITY: diff --git a/bochs/cpu/arith32.cc b/bochs/cpu/arith32.cc index 7a83c3507f..2a3f85943e 100644 --- a/bochs/cpu/arith32.cc +++ b/bochs/cpu/arith32.cc @@ -582,11 +582,11 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPXCHG8B(bxInstruction_c *i) // check write permission for following write Bit64u op1_64 = read_RMW_virtual_qword(i->seg(), eaddr); - Bit64u op2_64 = ((Bit64u) EDX << 32) | EAX; + Bit64u op2_64 = GET64_FROM_HI32_LO32(EDX, EAX); if (op1_64 == op2_64) { // if accumulator == dest // dest <-- src (ECX:EBX) - op2_64 = ((Bit64u) ECX << 32) | EBX; + op2_64 = GET64_FROM_HI32_LO32(ECX, EBX); write_RMW_linear_qword(op2_64); assert_ZF(); } diff --git a/bochs/cpu/avx/avx512_helpers.cc b/bochs/cpu/avx/avx512_helpers.cc index 8297169d13..e9a85b030a 100644 --- a/bochs/cpu/avx/avx512_helpers.cc +++ b/bochs/cpu/avx/avx512_helpers.cc @@ -162,7 +162,7 @@ void BX_CPU_C::avx_masked_store8(bxInstruction_c *i, bx_address eaddr, const BxP // see if you can successfully write all the elements first for (int n=BYTE_ELEMENTS(len)-1; n >= 0; n--) { if (mask & (BX_CONST64(1)<seg(), eaddr + n); + read_RMW_virtual_byte(i->seg(), eaddr + n); // no lock } for (unsigned n=0; n < BYTE_ELEMENTS(len); n++) { @@ -195,7 +195,7 @@ void BX_CPU_C::avx_masked_store16(bxInstruction_c *i, bx_address eaddr, const Bx // see if you can successfully write all the elements first for (int n=WORD_ELEMENTS(len)-1; n >= 0; n--) { if (mask & (1<seg(), eaddr + 2*n); + read_RMW_virtual_word(i->seg(), eaddr + 2*n); // no lock } for (unsigned n=0; n < WORD_ELEMENTS(len); n++) { @@ -232,7 +232,7 @@ void BX_CPU_C::avx_masked_store32(bxInstruction_c *i, bx_address eaddr, const Bx // see if you can successfully write all the elements first for (int n=DWORD_ELEMENTS(len)-1; n >= 0; n--) { if (mask & (1<seg(), eaddr + 4*n); + read_RMW_virtual_dword(i->seg(), eaddr + 4*n); // no lock } for (unsigned n=0; n < DWORD_ELEMENTS(len); n++) { @@ -269,7 +269,7 @@ void BX_CPU_C::avx_masked_store64(bxInstruction_c *i, bx_address eaddr, const Bx // see if you can successfully write all the elements first for (int n=QWORD_ELEMENTS(len)-1; n >= 0; n--) { if (mask & (1<seg(), eaddr + 8*n); + read_RMW_virtual_qword(i->seg(), eaddr + 8*n); // no lock } for (unsigned n=0; n < QWORD_ELEMENTS(len); n++) { diff --git a/bochs/cpu/cet.cc b/bochs/cpu/cet.cc index 8b70a9f63a..4a839997ff 100644 --- a/bochs/cpu/cet.cc +++ b/bochs/cpu/cet.cc @@ -238,7 +238,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::RSTORSSP(bxInstruction_c *i) Bit64u previous_ssp_token = SSP | long64_mode() | 0x02; -// should be done atomically +// should be done atomically using RMW Bit64u SSP_tmp = shadow_stack_read_qword(laddr, CPL); // should be LWSI if ((SSP_tmp & 0x03) != long64_mode()) { BX_ERROR(("%s: CS.L of shadow stack token doesn't match or bit1 is not 0", i->getIaOpcodeNameShort())); @@ -256,7 +256,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::RSTORSSP(bxInstruction_c *i) exception(BX_CP_EXCEPTION, BX_CP_RSTORSSP); } shadow_stack_write_qword(laddr, CPL, previous_ssp_token); -// should be done atomically +// should be done atomically using RMW SSP = laddr; diff --git a/bochs/cpu/cmpccxadd32.cc b/bochs/cpu/cmpccxadd32.cc index a8cb279f09..5ea8ffd8ce 100644 --- a/bochs/cpu/cmpccxadd32.cc +++ b/bochs/cpu/cmpccxadd32.cc @@ -38,7 +38,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPBEXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword((get_CF() || get_ZF()) ? op1_32 + op3_32 : op1_32); @@ -59,7 +59,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPBXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword(get_CF() ? op1_32 + op3_32 : op1_32); @@ -80,7 +80,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPLEXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword((get_ZF() || getB_SF() != getB_OF()) ? op1_32 + op3_32 : op1_32); @@ -101,7 +101,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPLXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword((getB_SF() != getB_OF()) ? op1_32 + op3_32 : op1_32); @@ -122,7 +122,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNBEXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword((!get_CF() && !get_ZF()) ? op1_32 + op3_32 : op1_32); @@ -143,7 +143,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNBXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword(!get_CF() ? op1_32 + op3_32 : op1_32); @@ -164,7 +164,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNLEXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword((!get_ZF() && getB_SF() == getB_OF()) ? op1_32 + op3_32 : op1_32); @@ -185,7 +185,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNLXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword((getB_SF() == getB_OF()) ? op1_32 + op3_32 : op1_32); @@ -206,7 +206,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNOXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword(!get_OF() ? op1_32 + op3_32 : op1_32); @@ -227,7 +227,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNPXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword(!get_PF() ? op1_32 + op3_32 : op1_32); @@ -248,7 +248,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNSXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword(!get_SF() ? op1_32 + op3_32 : op1_32); @@ -269,7 +269,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNZXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword((!get_ZF()) ? op1_32 + op3_32 : op1_32); @@ -290,7 +290,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPOXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword(get_OF() ? op1_32 + op3_32 : op1_32); @@ -311,7 +311,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPPXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword(get_PF() ? op1_32 + op3_32 : op1_32); @@ -332,7 +332,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword(get_SF() ? op1_32 + op3_32 : op1_32); @@ -353,7 +353,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPZXADD_EdGdBd(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); + Bit32u op1_32 = read_RMW_linear_dword(i->seg(), laddr); // implicit lock Bit32u diff_32 = op1_32 - op2_32; SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32); write_RMW_linear_dword(get_ZF() ? op1_32 + op3_32 : op1_32); diff --git a/bochs/cpu/cmpccxadd64.cc b/bochs/cpu/cmpccxadd64.cc index d4430d22f9..f1b8f42f74 100644 --- a/bochs/cpu/cmpccxadd64.cc +++ b/bochs/cpu/cmpccxadd64.cc @@ -38,7 +38,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPBEXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword((get_CF() || get_ZF()) ? op1_64 + op3_64 : op1_64); @@ -59,7 +59,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPBXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword(get_CF() ? op1_64 + op3_64 : op1_64); @@ -80,7 +80,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPLEXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword((get_ZF() || getB_SF() != getB_OF()) ? op1_64 + op3_64 : op1_64); @@ -101,7 +101,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPLXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword((getB_SF() != getB_OF()) ? op1_64 + op3_64 : op1_64); @@ -122,7 +122,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNBEXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword((!get_CF() && !get_ZF()) ? op1_64 + op3_64 : op1_64); @@ -143,7 +143,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNBXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword(!get_CF() ? op1_64 + op3_64 : op1_64); @@ -164,7 +164,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNLEXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword((!get_ZF() && getB_SF() == getB_OF()) ? op1_64 + op3_64 : op1_64); @@ -185,7 +185,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNLXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword((getB_SF() == getB_OF()) ? op1_64 + op3_64 : op1_64); @@ -206,7 +206,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNOXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword(!get_OF() ? op1_64 + op3_64 : op1_64); @@ -227,7 +227,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNPXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword(!get_PF() ? op1_64 + op3_64 : op1_64); @@ -248,7 +248,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNSXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword(!get_SF() ? op1_64 + op3_64 : op1_64); @@ -269,7 +269,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPNZXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword((!get_ZF()) ? op1_64 + op3_64 : op1_64); @@ -290,7 +290,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPOXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword(get_OF() ? op1_64 + op3_64 : op1_64); @@ -311,7 +311,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPPXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword(get_PF() ? op1_64 + op3_64 : op1_64); @@ -332,7 +332,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword(get_SF() ? op1_64 + op3_64 : op1_64); @@ -353,7 +353,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPZXADD_EqGqBq(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0); } - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), laddr); // implicit lock Bit64u diff_64 = op1_64 - op2_64; SET_FLAGS_OSZAPC_SUB_64(op1_64, op2_64, diff_64); write_RMW_linear_qword(get_ZF() ? op1_64 + op3_64 : op1_64); diff --git a/bochs/cpu/cpu.h b/bochs/cpu/cpu.h index a468e3330f..0db6337cbb 100644 --- a/bochs/cpu/cpu.h +++ b/bochs/cpu/cpu.h @@ -1054,12 +1054,11 @@ class BOCHSAPI BX_CPU_C : public logfunctions { BX_ACTIVITY_STATE_HLT, BX_ACTIVITY_STATE_SHUTDOWN, BX_ACTIVITY_STATE_WAIT_FOR_SIPI, + BX_VMX_LAST_ACTIVITY_STATE = BX_ACTIVITY_STATE_WAIT_FOR_SIPI, BX_ACTIVITY_STATE_MWAIT, BX_ACTIVITY_STATE_MWAIT_IF }; -#define BX_VMX_LAST_ACTIVITY_STATE (BX_ACTIVITY_STATE_WAIT_FOR_SIPI) - unsigned activity_state; #define BX_EVENT_NMI (1 << 0) @@ -4280,6 +4279,7 @@ class BOCHSAPI BX_CPU_C : public logfunctions { BX_SMF void system_write_byte(bx_address laddr, Bit8u data) BX_CPP_AttrRegparmN(2); BX_SMF void system_write_word(bx_address laddr, Bit16u data) BX_CPP_AttrRegparmN(2); BX_SMF void system_write_dword(bx_address laddr, Bit32u data) BX_CPP_AttrRegparmN(2); + BX_SMF void system_write_qword(bx_address laddr, Bit64u data) BX_CPP_AttrRegparmN(2); BX_SMF Bit8u* v2h_read_byte(bx_address laddr, bool user) BX_CPP_AttrRegparmN(2); BX_SMF Bit8u* v2h_write_byte(bx_address laddr, bool user) BX_CPP_AttrRegparmN(2); @@ -4388,6 +4388,7 @@ class BOCHSAPI BX_CPU_C : public logfunctions { #endif #if BX_CPU_LEVEL >= 5 BX_SMF bool SetEFER(bx_address val) BX_CPP_AttrRegparmN(1); + BX_SMF Bit32u get_efer_allow_mask(void); #endif BX_SMF bx_address read_CR0(void); diff --git a/bochs/cpu/crregs.cc b/bochs/cpu/crregs.cc index 08bac49daa..2436a5b1d7 100644 --- a/bochs/cpu/crregs.cc +++ b/bochs/cpu/crregs.cc @@ -1774,6 +1774,33 @@ void BX_CPU_C::xsave_xrestor_init(void) #endif } +#if BX_CPU_LEVEL >= 5 + +Bit32u BX_CPU_C::get_efer_allow_mask(void) +{ + Bit32u efer_allowed_mask = 0; + + if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_NX)) + efer_allowed_mask |= BX_EFER_NXE_MASK; + if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SYSCALL_SYSRET_LEGACY)) + efer_allowed_mask |= BX_EFER_SCE_MASK; +#if BX_SUPPORT_X86_64 + if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_LONG_MODE)) { + efer_allowed_mask |= (BX_EFER_SCE_MASK | BX_EFER_LME_MASK | BX_EFER_LMA_MASK); + if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_FFXSR)) + efer_allowed_mask |= BX_EFER_FFXSR_MASK; + if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SVM)) + efer_allowed_mask |= BX_EFER_SVME_MASK; + if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_TCE)) + efer_allowed_mask |= BX_EFER_TCE_MASK; + } +#endif + + return efer_allowed_mask; +} + +#endif + Bit32u BX_CPU_C::get_xcr0_allow_mask(void) { Bit32u allowMask = 0x3; diff --git a/bochs/cpu/crregs.h b/bochs/cpu/crregs.h index ac48dde815..c846c57618 100644 --- a/bochs/cpu/crregs.h +++ b/bochs/cpu/crregs.h @@ -257,6 +257,7 @@ const unsigned XSAVE_HI_ZMM_STATE_LEN = 1024; const unsigned XSAVE_PKRU_STATE_LEN = 64; const unsigned XSAVE_CET_U_STATE_LEN = 16; const unsigned XSAVE_CET_S_STATE_LEN = 24; +const unsigned XSAVE_UINTR_STATE_LEN = 48; const unsigned XSAVE_SSE_STATE_OFFSET = 160; const unsigned XSAVE_YMM_STATE_OFFSET = 576; @@ -272,18 +273,22 @@ struct xcr0_t { BX_XCR0_FPU_BIT = 0, BX_XCR0_SSE_BIT = 1, BX_XCR0_YMM_BIT = 2, - BX_XCR0_BNDREGS_BIT = 3, - BX_XCR0_BNDCFG_BIT = 4, + BX_XCR0_BNDREGS_BIT = 3, // not implemented, deprecated + BX_XCR0_BNDCFG_BIT = 4, // not implemented, deprecated BX_XCR0_OPMASK_BIT = 5, BX_XCR0_ZMM_HI256_BIT = 6, BX_XCR0_HI_ZMM_BIT = 7, - BX_XCR0_PT_BIT = 8, + BX_XCR0_PT_BIT = 8, // not implemented yet BX_XCR0_PKRU_BIT = 9, + BX_XCR0_PASID_BIT = 10, // not implemented yet BX_XCR0_CET_U_BIT = 11, BX_XCR0_CET_S_BIT = 12, + BX_XCR0_HDC_BIT = 13, // not implemented yet BX_XCR0_UINTR_BIT = 14, - BX_XCR0_XTILECFG_BIT = 17, - BX_XCR0_XTILEDATA_BIT = 18, + BX_XCR0_LBR_BIT = 15, // not implemented yet + BX_XCR0_HWP_BIT = 16, // not implemented yet + BX_XCR0_XTILECFG_BIT = 17, // not implemnted yet + BX_XCR0_XTILEDATA_BIT = 18, // not implemnted yet BX_XCR0_LAST }; @@ -297,9 +302,13 @@ struct xcr0_t { #define BX_XCR0_HI_ZMM_MASK (1 << xcr0_t::BX_XCR0_HI_ZMM_BIT) #define BX_XCR0_PT_MASK (1 << xcr0_t::BX_XCR0_PT_BIT) #define BX_XCR0_PKRU_MASK (1 << xcr0_t::BX_XCR0_PKRU_BIT) +#define BX_XCR0_PASID_MASK (1 << xcr0_t::BX_XCR0_PASID_BIT) #define BX_XCR0_CET_U_MASK (1 << xcr0_t::BX_XCR0_CET_U_BIT) #define BX_XCR0_CET_S_MASK (1 << xcr0_t::BX_XCR0_CET_S_BIT) +#define BX_XCR0_HDC_MASK (1 << xcr0_t::BX_XCR0_HDC_BIT) #define BX_XCR0_UINTR_MASK (1 << xcr0_t::BX_XCR0_UINTR_BIT) +#define BX_XCR0_LBR_MASK (1 << xcr0_t::BX_XCR0_LBR_BIT) +#define BX_XCR0_HWP_MASK (1 << xcr0_t::BX_XCR0_HWP_BIT) #define BX_XCR0_XTILECFG_MASK (1 << xcr0_t::BX_XCR0_XTILECFG_BIT) #define BX_XCR0_XTILEDATA_MASK (1 << xcr0_t::BX_XCR0_XTILEDATA_BIT) @@ -313,8 +322,12 @@ struct xcr0_t { IMPLEMENT_CRREG_ACCESSORS(HI_ZMM, BX_XCR0_HI_ZMM_BIT); IMPLEMENT_CRREG_ACCESSORS(PT, BX_XCR0_PT_BIT); IMPLEMENT_CRREG_ACCESSORS(PKRU, BX_XCR0_PKRU_BIT); + IMPLEMENT_CRREG_ACCESSORS(PASID, BX_XCR0_PASID_BIT); IMPLEMENT_CRREG_ACCESSORS(CET_U, BX_XCR0_CET_U_BIT); IMPLEMENT_CRREG_ACCESSORS(CET_S, BX_XCR0_CET_S_BIT); + IMPLEMENT_CRREG_ACCESSORS(UINTR, BX_XCR0_UINTR_BIT); + IMPLEMENT_CRREG_ACCESSORS(LBR, BX_XCR0_LBR_BIT); + IMPLEMENT_CRREG_ACCESSORS(HWP, BX_XCR0_HWP_BIT); IMPLEMENT_CRREG_ACCESSORS(XTILECFG, BX_XCR0_XTILECFG_BIT); IMPLEMENT_CRREG_ACCESSORS(XTILEDATA, BX_XCR0_XTILEDATA_BIT); diff --git a/bochs/cpu/data_xfer16.cc b/bochs/cpu/data_xfer16.cc index d7bd71c401..2dd74229bc 100644 --- a/bochs/cpu/data_xfer16.cc +++ b/bochs/cpu/data_xfer16.cc @@ -201,14 +201,9 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSX_GwEbR(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::XCHG_EwGwM(bxInstruction_c *i) { - Bit16u op1_16, op2_16; - bx_address eaddr = BX_CPU_RESOLVE_ADDR(i); - - op1_16 = read_RMW_virtual_word(i->seg(), eaddr); - op2_16 = BX_READ_16BIT_REG(i->src()); - - write_RMW_linear_word(op2_16); + Bit16u op1_16 = read_RMW_virtual_word(i->seg(), eaddr); // always locked + write_RMW_linear_word(BX_READ_16BIT_REG(i->src())); BX_WRITE_16BIT_REG(i->src(), op1_16); BX_NEXT_INSTR(i); diff --git a/bochs/cpu/data_xfer32.cc b/bochs/cpu/data_xfer32.cc index 3d843550bc..bec074d9dd 100644 --- a/bochs/cpu/data_xfer32.cc +++ b/bochs/cpu/data_xfer32.cc @@ -198,10 +198,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSX_GdEwR(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::XCHG_EdGdM(bxInstruction_c *i) { bx_address eaddr = BX_CPU_RESOLVE_ADDR(i); - - Bit32u op1_32 = read_RMW_virtual_dword(i->seg(), eaddr); - Bit32u op2_32 = BX_READ_32BIT_REG(i->src()); - write_RMW_linear_dword(op2_32); + Bit32u op1_32 = read_RMW_virtual_dword(i->seg(), eaddr); // always locked + write_RMW_linear_dword(BX_READ_32BIT_REG(i->src())); BX_WRITE_32BIT_REGZ(i->src(), op1_32); BX_NEXT_INSTR(i); diff --git a/bochs/cpu/data_xfer64.cc b/bochs/cpu/data_xfer64.cc index e842ef2342..be4f73a511 100644 --- a/bochs/cpu/data_xfer64.cc +++ b/bochs/cpu/data_xfer64.cc @@ -292,11 +292,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSX_GqEdR(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::XCHG_EqGqM(bxInstruction_c *i) { bx_address eaddr = BX_CPU_RESOLVE_ADDR_64(i); - - Bit64u op1_64 = read_RMW_linear_qword(i->seg(), get_laddr64(i->seg(), eaddr)); - Bit64u op2_64 = BX_READ_64BIT_REG(i->src()); - - write_RMW_linear_qword(op2_64); + Bit64u op1_64 = read_RMW_linear_qword(i->seg(), get_laddr64(i->seg(), eaddr)); // always locked + write_RMW_linear_qword(BX_READ_64BIT_REG(i->src())); BX_WRITE_64BIT_REG(i->src(), op1_64); BX_NEXT_INSTR(i); diff --git a/bochs/cpu/data_xfer8.cc b/bochs/cpu/data_xfer8.cc index 353f08816e..df0ecef6e6 100644 --- a/bochs/cpu/data_xfer8.cc +++ b/bochs/cpu/data_xfer8.cc @@ -100,7 +100,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::XCHG_EbGbM(bxInstruction_c *i) { bx_address eaddr = BX_CPU_RESOLVE_ADDR(i); - Bit8u op1 = read_RMW_virtual_byte(i->seg(), eaddr); + Bit8u op1 = read_RMW_virtual_byte(i->seg(), eaddr); // always locked Bit8u op2 = BX_READ_8BIT_REGx(i->src(), i->extend8bitL()); write_RMW_linear_byte(op2); diff --git a/bochs/cpu/init.cc b/bochs/cpu/init.cc index 2974705dfa..8637cc15ed 100644 --- a/bochs/cpu/init.cc +++ b/bochs/cpu/init.cc @@ -651,8 +651,6 @@ void BX_CPU_C::param_restore(bx_param_c *param, Bit64s val) void BX_CPU_C::after_restore_state(void) { - handleCpuContextChange(); - BX_CPU_THIS_PTR prev_rip = RIP; if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_IA32_REAL) CPL = 0; @@ -672,6 +670,8 @@ void BX_CPU_C::after_restore_state(void) set_PKeys(BX_CPU_THIS_PTR pkru, BX_CPU_THIS_PTR pkrs); #endif + handleCpuContextChange(); + assert_checks(); debug(RIP); } @@ -912,22 +912,7 @@ void BX_CPU_C::reset(unsigned source) #endif BX_CPU_THIS_PTR efer.set32(0); - BX_CPU_THIS_PTR efer_suppmask = 0; - if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_NX)) - BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_NXE_MASK; - if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SYSCALL_SYSRET_LEGACY)) - BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_SCE_MASK; -#if BX_SUPPORT_X86_64 - if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_LONG_MODE)) { - BX_CPU_THIS_PTR efer_suppmask |= (BX_EFER_SCE_MASK | BX_EFER_LME_MASK | BX_EFER_LMA_MASK); - if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_FFXSR)) - BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_FFXSR_MASK; - if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_SVM)) - BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_SVME_MASK; - if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_TCE)) - BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_TCE_MASK; - } -#endif + BX_CPU_THIS_PTR efer_suppmask = get_efer_allow_mask(); BX_CPU_THIS_PTR msr.star = 0; #if BX_SUPPORT_X86_64 diff --git a/bochs/cpu/io.cc b/bochs/cpu/io.cc index eff1038954..c1507afd32 100644 --- a/bochs/cpu/io.cc +++ b/bochs/cpu/io.cc @@ -226,7 +226,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_INSB_YbDX(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSB16_YbDX(bxInstruction_c *i) { // trigger any segment or page faults before reading from IO port - Bit8u value8 = read_RMW_virtual_byte_32(BX_SEG_REG_ES, DI); + Bit8u value8 = read_RMW_virtual_byte_32(BX_SEG_REG_ES, DI); // no lock value8 = BX_INP(DX, 1); @@ -242,7 +242,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSB16_YbDX(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSB32_YbDX(bxInstruction_c *i) { // trigger any segment or page faults before reading from IO port - Bit8u value8 = read_RMW_virtual_byte(BX_SEG_REG_ES, EDI); + Bit8u value8 = read_RMW_virtual_byte(BX_SEG_REG_ES, EDI); // no lock value8 = BX_INP(DX, 1); @@ -263,7 +263,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSB32_YbDX(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSB64_YbDX(bxInstruction_c *i) { // trigger any segment or page faults before reading from IO port - Bit8u value8 = read_RMW_linear_byte(BX_SEG_REG_ES, RDI); + Bit8u value8 = read_RMW_linear_byte(BX_SEG_REG_ES, RDI); // no lock value8 = BX_INP(DX, 1); @@ -305,7 +305,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_INSW_YwDX(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSW16_YwDX(bxInstruction_c *i) { // trigger any segment or page faults before reading from IO port - Bit16u value16 = read_RMW_virtual_word_32(BX_SEG_REG_ES, DI); + Bit16u value16 = read_RMW_virtual_word_32(BX_SEG_REG_ES, DI); // no lock value16 = BX_INP(DX, 2); @@ -344,7 +344,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSW32_YwDX(bxInstruction_c *i) } else { // trigger any segment or page faults before reading from IO port - value16 = read_RMW_virtual_word(BX_SEG_REG_ES, edi); + value16 = read_RMW_virtual_word(BX_SEG_REG_ES, edi); // no lock value16 = BX_INP(DX, 2); @@ -355,7 +355,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSW32_YwDX(bxInstruction_c *i) #endif { // trigger any segment or page faults before reading from IO port - value16 = read_RMW_virtual_word_32(BX_SEG_REG_ES, edi); + value16 = read_RMW_virtual_word_32(BX_SEG_REG_ES, edi); // no lock value16 = BX_INP(DX, 2); @@ -374,7 +374,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSW32_YwDX(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSW64_YwDX(bxInstruction_c *i) { // trigger any segment or page faults before reading from IO port - Bit16u value16 = read_RMW_linear_word(BX_SEG_REG_ES, RDI); + Bit16u value16 = read_RMW_linear_word(BX_SEG_REG_ES, RDI); // no lock value16 = BX_INP(DX, 2); @@ -416,7 +416,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::REP_INSD_YdDX(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSD16_YdDX(bxInstruction_c *i) { // trigger any segment or page faults before reading from IO port - Bit32u value32 = read_RMW_virtual_dword_32(BX_SEG_REG_ES, DI); + Bit32u value32 = read_RMW_virtual_dword_32(BX_SEG_REG_ES, DI); // no lock value32 = BX_INP(DX, 4); @@ -432,7 +432,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSD16_YdDX(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSD32_YdDX(bxInstruction_c *i) { // trigger any segment or page faults before reading from IO port - Bit32u value32 = read_RMW_virtual_dword(BX_SEG_REG_ES, EDI); + Bit32u value32 = read_RMW_virtual_dword(BX_SEG_REG_ES, EDI); // no lock value32 = BX_INP(DX, 4); @@ -450,7 +450,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSD32_YdDX(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSD64_YdDX(bxInstruction_c *i) { // trigger any segment or page faults before reading from IO port - Bit32u value32 = read_RMW_linear_dword(BX_SEG_REG_ES, RDI); + Bit32u value32 = read_RMW_linear_dword(BX_SEG_REG_ES, RDI); // no lock value32 = BX_INP(DX, 4); diff --git a/bochs/cpu/mmx.cc b/bochs/cpu/mmx.cc index f3dcadf7cc..df26517ade 100644 --- a/bochs/cpu/mmx.cc +++ b/bochs/cpu/mmx.cc @@ -2374,7 +2374,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MASKMOVQ_PqNq(bxInstruction_c *i) mask = BX_READ_MMX_REG(i->src2()); /* do read-modify-write for efficiency */ - MMXUQ(tmp) = read_RMW_virtual_qword(i->seg(), rdi); + MMXUQ(tmp) = read_RMW_virtual_qword(i->seg(), rdi); // no lock if(!MMXUQ(mask)) { BX_NEXT_INSTR(i); diff --git a/bochs/cpu/msr.cc b/bochs/cpu/msr.cc index 067be94029..c3de378213 100644 --- a/bochs/cpu/msr.cc +++ b/bochs/cpu/msr.cc @@ -1172,7 +1172,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::WRMSR(bxInstruction_c *i) invalidate_prefetch_q(); - Bit64u val_64 = ((Bit64u) EDX << 32) | EAX; + Bit64u val_64 = GET64_FROM_HI32_LO32(EDX, EAX); Bit32u index = ECX; #if BX_SUPPORT_SVM @@ -1251,9 +1251,9 @@ int BX_CPU_C::load_MSRs(const char *file) reset_hi, reset_lo, rsrv_hi, rsrv_lo, ignr_hi, ignr_lo)); BX_CPU_THIS_PTR msrs[index] = new MSR(index, type, - ((Bit64u)(reset_hi) << 32) | reset_lo, - ((Bit64u) (rsrv_hi) << 32) | rsrv_lo, - ((Bit64u) (ignr_hi) << 32) | ignr_lo); + GET64_FROM_HI32_LO32(reset_hi, reset_lo), + GET64_FROM_HI32_LO32(rsrv_hi, rsrv_lo), + GET64_FROM_HI32_LO32(ignr_hi, ignr_lo)); } } while (!feof(fd)); diff --git a/bochs/cpu/mult32.cc b/bochs/cpu/mult32.cc index 4742961cd0..1ba553e7a1 100644 --- a/bochs/cpu/mult32.cc +++ b/bochs/cpu/mult32.cc @@ -80,7 +80,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::DIV_EAXEdR(bxInstruction_c *i) exception(BX_DE_EXCEPTION, 0); } - Bit64u op1_64 = (((Bit64u) EDX) << 32) + ((Bit64u) EAX); + Bit64u op1_64 = GET64_FROM_HI32_LO32(EDX, EAX); Bit64u quotient_64 = op1_64 / op2_32; Bit32u remainder_32 = (Bit32u) (op1_64 % op2_32); @@ -104,7 +104,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::DIV_EAXEdR(bxInstruction_c *i) void BX_CPP_AttrRegparmN(1) BX_CPU_C::IDIV_EAXEdR(bxInstruction_c *i) { - Bit64s op1_64 = (((Bit64u) EDX) << 32) | ((Bit64u) EAX); + Bit64s op1_64 = GET64_FROM_HI32_LO32(EDX, EAX); /* check MIN_INT case */ if (op1_64 == ((Bit64s)BX_CONST64(0x8000000000000000))) diff --git a/bochs/cpu/paging.cc b/bochs/cpu/paging.cc index e4ec41d19b..39c05bd607 100644 --- a/bochs/cpu/paging.cc +++ b/bochs/cpu/paging.cc @@ -864,7 +864,7 @@ void BX_CPU_C::update_access_dirty_PAE(bx_phy_address *entry_addr, Bit64u *entry for (unsigned level=max_level; level > leaf; level--) { if (!(entry[level] & 0x20)) { entry[level] |= 0x20; - access_write_physical(entry_addr[level], 8, &entry[level]); + access_write_physical(entry_addr[level], 8, &entry[level]); // should be done with locked RMW BX_NOTIFY_PHY_MEMORY_ACCESS(entry_addr[level], 8, entry_memtype[level], BX_WRITE, (BX_PTE_ACCESS + level), (Bit8u*)(&entry[level])); } @@ -873,7 +873,7 @@ void BX_CPU_C::update_access_dirty_PAE(bx_phy_address *entry_addr, Bit64u *entry // Update A/D bits if needed if (!(entry[leaf] & 0x20) || (write && !(entry[leaf] & 0x40))) { entry[leaf] |= (0x20 | (write<<6)); // Update A and possibly D bits - access_write_physical(entry_addr[leaf], 8, &entry[leaf]); + access_write_physical(entry_addr[leaf], 8, &entry[leaf]); // should be done with locked RMW BX_NOTIFY_PHY_MEMORY_ACCESS(entry_addr[leaf], 8, entry_memtype[leaf], BX_WRITE, (BX_PTE_ACCESS + leaf), (Bit8u*)(&entry[leaf])); } diff --git a/bochs/cpu/segment_ctrl_pro.cc b/bochs/cpu/segment_ctrl_pro.cc index c573933613..2c51975474 100644 --- a/bochs/cpu/segment_ctrl_pro.cc +++ b/bochs/cpu/segment_ctrl_pro.cc @@ -511,6 +511,7 @@ BX_CPU_C::touch_segment(bx_selector_t *selector, bx_descriptor_t *descriptor) AR_byte |= 1; descriptor->type |= 1; + // should be done with locked RMW if (selector->ti == 0) { /* GDT */ system_write_byte(BX_CPU_THIS_PTR gdtr.base + selector->index*8 + 5, AR_byte); } diff --git a/bochs/cpu/sse_move.cc b/bochs/cpu/sse_move.cc index c64e2d336e..8b93127182 100644 --- a/bochs/cpu/sse_move.cc +++ b/bochs/cpu/sse_move.cc @@ -616,8 +616,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MASKMOVDQU_VdqUdq(bxInstruction_c *i) mask = BX_READ_XMM_REG(i->src2()), temp; // check for write permissions before writing even if mask is all 0s - temp.xmm64u(0) = read_RMW_virtual_qword(i->seg(), rdi); - temp.xmm64u(1) = read_RMW_virtual_qword(i->seg(), (rdi + 8) & i->asize_mask()); + temp.xmm64u(0) = read_RMW_virtual_qword(i->seg(), rdi); // no lock + temp.xmm64u(1) = read_RMW_virtual_qword(i->seg(), (rdi + 8) & i->asize_mask()); // no lock /* no data will be written to memory if mask is all 0s */ if ((mask.xmm64u(0) | mask.xmm64u(1)) == 0) { diff --git a/bochs/cpu/stack16.cc b/bochs/cpu/stack16.cc index 68cace89ce..cc22f9a8b2 100644 --- a/bochs/cpu/stack16.cc +++ b/bochs/cpu/stack16.cc @@ -206,7 +206,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER16_IwIb(bxInstruction_c *i) // ENTER finishes with memory write check on the final stack pointer // the memory is touched but no write actually occurs // emulate it by doing RMW read access from SS:ESP - read_RMW_virtual_word_32(BX_SEG_REG_SS, ESP); + read_RMW_virtual_word_32(BX_SEG_REG_SS, ESP); // no lock, should be touch only BP = frame_ptr16; } @@ -230,7 +230,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER16_IwIb(bxInstruction_c *i) // ENTER finishes with memory write check on the final stack pointer // the memory is touched but no write actually occurs // emulate it by doing RMW read access from SS:SP - read_RMW_virtual_word_32(BX_SEG_REG_SS, SP); + read_RMW_virtual_word_32(BX_SEG_REG_SS, SP); // no lock, should be touch only } BP = frame_ptr16; diff --git a/bochs/cpu/stack32.cc b/bochs/cpu/stack32.cc index 9d33880755..cf33fbb264 100644 --- a/bochs/cpu/stack32.cc +++ b/bochs/cpu/stack32.cc @@ -223,7 +223,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER32_IwIb(bxInstruction_c *i) // ENTER finishes with memory write check on the final stack pointer // the memory is touched but no write actually occurs // emulate it by doing RMW read access from SS:ESP - read_RMW_virtual_dword_32(BX_SEG_REG_SS, ESP); + read_RMW_virtual_dword_32(BX_SEG_REG_SS, ESP); // no lock, should be touch only } else { Bit16u bp = BP; @@ -245,7 +245,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER32_IwIb(bxInstruction_c *i) // ENTER finishes with memory write check on the final stack pointer // the memory is touched but no write actually occurs // emulate it by doing RMW read access from SS:SP - read_RMW_virtual_dword_32(BX_SEG_REG_SS, SP); + read_RMW_virtual_dword_32(BX_SEG_REG_SS, SP); // no lock, should be touch only } EBP = frame_ptr32; diff --git a/bochs/cpu/stack64.cc b/bochs/cpu/stack64.cc index ba8a8893aa..e56c9c2370 100644 --- a/bochs/cpu/stack64.cc +++ b/bochs/cpu/stack64.cc @@ -124,7 +124,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER64_IwIb(bxInstruction_c *i) // ENTER finishes with memory write check on the final stack pointer // the memory is touched but no write actually occurs // emulate it by doing RMW read access from SS:RSP - read_RMW_linear_qword(BX_SEG_REG_SS, temp_RSP); + read_RMW_linear_qword(BX_SEG_REG_SS, temp_RSP); // no lock, should be touch only RBP = frame_ptr64; RSP = temp_RSP; diff --git a/bochs/cpu/vmx.h b/bochs/cpu/vmx.h index 3e98becd36..f6466dfd4b 100644 --- a/bochs/cpu/vmx.h +++ b/bochs/cpu/vmx.h @@ -961,7 +961,7 @@ const Bit32u BX_VMCS_SHADOW_BIT_MASK = 0x80000000; (is_cpu_extension_supported(BX_ISA_CET) ? (1<<24) : 0) #define VMX_MSR_VMX_BASIC \ - ((((Bit64u) VMX_MSR_VMX_BASIC_HI) << 32) | VMX_MSR_VMX_BASIC_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_BASIC_HI, VMX_MSR_VMX_BASIC_LO) // ------------------------------------------------------------------------ @@ -992,7 +992,7 @@ const Bit32u VMX_MSR_VMX_PINBASED_CTRLS_LO = 0x00000016; (VMX_VM_EXEC_CTRL1_SUPPORTED_BITS | VMX_MSR_VMX_PINBASED_CTRLS_LO) #define VMX_MSR_VMX_PINBASED_CTRLS \ - ((((Bit64u) VMX_MSR_VMX_PINBASED_CTRLS_HI) << 32) | VMX_MSR_VMX_PINBASED_CTRLS_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_PINBASED_CTRLS_HI, VMX_MSR_VMX_PINBASED_CTRLS_LO) // IA32_MSR_VMX_TRUE_PINBASED_CTRLS MSR (0x48d) // -------------------------------- @@ -1002,7 +1002,7 @@ const Bit32u VMX_MSR_VMX_PINBASED_CTRLS_LO = 0x00000016; #define VMX_MSR_VMX_TRUE_PINBASED_CTRLS_HI (VMX_MSR_VMX_PINBASED_CTRLS_HI) #define VMX_MSR_VMX_TRUE_PINBASED_CTRLS \ - ((((Bit64u) VMX_MSR_VMX_TRUE_PINBASED_CTRLS_HI) << 32) | VMX_MSR_VMX_TRUE_PINBASED_CTRLS_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_TRUE_PINBASED_CTRLS_HI, VMX_MSR_VMX_TRUE_PINBASED_CTRLS_LO) // IA32_MSR_VMX_PROCBASED_CTRLS MSR (0x482) @@ -1019,7 +1019,7 @@ const Bit32u VMX_MSR_VMX_PROCBASED_CTRLS_LO = 0x0401E172; (VMX_VM_EXEC_CTRL2_SUPPORTED_BITS | VMX_MSR_VMX_PROCBASED_CTRLS_LO) #define VMX_MSR_VMX_PROCBASED_CTRLS \ - ((((Bit64u) VMX_MSR_VMX_PROCBASED_CTRLS_HI) << 32) | VMX_MSR_VMX_PROCBASED_CTRLS_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_PROCBASED_CTRLS_HI, VMX_MSR_VMX_PROCBASED_CTRLS_LO) // IA32_MSR_VMX_TRUE_PROCBASED_CTRLS MSR (0x48e) // --------------------------------- @@ -1029,7 +1029,7 @@ const Bit32u VMX_MSR_VMX_PROCBASED_CTRLS_LO = 0x0401E172; #define VMX_MSR_VMX_TRUE_PROCBASED_CTRLS_HI (VMX_MSR_VMX_PROCBASED_CTRLS_HI) #define VMX_MSR_VMX_TRUE_PROCBASED_CTRLS \ - ((((Bit64u) VMX_MSR_VMX_TRUE_PROCBASED_CTRLS_HI) << 32) | VMX_MSR_VMX_TRUE_PROCBASED_CTRLS_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_TRUE_PROCBASED_CTRLS_HI, VMX_MSR_VMX_TRUE_PROCBASED_CTRLS_LO) // IA32_MSR_VMX_VMEXIT_CTRLS MSR (0x483) @@ -1044,7 +1044,7 @@ const Bit32u VMX_MSR_VMX_VMEXIT_CTRLS_LO = 0x00036DFF; (VMX_VMEXIT_CTRL1_SUPPORTED_BITS | VMX_MSR_VMX_VMEXIT_CTRLS_LO) #define VMX_MSR_VMX_VMEXIT_CTRLS \ - ((((Bit64u) VMX_MSR_VMX_VMEXIT_CTRLS_HI) << 32) | VMX_MSR_VMX_VMEXIT_CTRLS_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_VMEXIT_CTRLS_HI, VMX_MSR_VMX_VMEXIT_CTRLS_LO) // IA32_MSR_VMX_TRUE_VMEXIT_CTRLS MSR (0x48f) // ------------------------------ @@ -1054,7 +1054,7 @@ const Bit32u VMX_MSR_VMX_VMEXIT_CTRLS_LO = 0x00036DFF; #define VMX_MSR_VMX_TRUE_VMEXIT_CTRLS_HI (VMX_MSR_VMX_VMEXIT_CTRLS_HI) #define VMX_MSR_VMX_TRUE_VMEXIT_CTRLS \ - ((((Bit64u) VMX_MSR_VMX_TRUE_VMEXIT_CTRLS_HI) << 32) | VMX_MSR_VMX_TRUE_VMEXIT_CTRLS_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_TRUE_VMEXIT_CTRLS_HI, VMX_MSR_VMX_TRUE_VMEXIT_CTRLS_LO) // IA32_MSR_VMX_VMENTRY_CTRLS MSR (0x484) @@ -1069,7 +1069,7 @@ const Bit32u VMX_MSR_VMX_VMENTRY_CTRLS_LO = 0x000011FF; (VMX_VMENTRY_CTRL1_SUPPORTED_BITS | VMX_MSR_VMX_VMENTRY_CTRLS_LO) #define VMX_MSR_VMX_VMENTRY_CTRLS \ - ((((Bit64u) VMX_MSR_VMX_VMENTRY_CTRLS_HI) << 32) | VMX_MSR_VMX_VMENTRY_CTRLS_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_VMENTRY_CTRLS_HI, VMX_MSR_VMX_VMENTRY_CTRLS_LO) // IA32_MSR_VMX_TRUE_VMENTRY_CTRLS MSR (0x490) // ------------------------------- @@ -1079,7 +1079,7 @@ const Bit32u VMX_MSR_VMX_VMENTRY_CTRLS_LO = 0x000011FF; #define VMX_MSR_VMX_TRUE_VMENTRY_CTRLS_HI (VMX_MSR_VMX_VMENTRY_CTRLS_HI) #define VMX_MSR_VMX_TRUE_VMENTRY_CTRLS \ - ((((Bit64u) VMX_MSR_VMX_TRUE_VMENTRY_CTRLS_HI) << 32) | VMX_MSR_VMX_TRUE_VMENTRY_CTRLS_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_TRUE_VMENTRY_CTRLS_HI, VMX_MSR_VMX_TRUE_VMENTRY_CTRLS_LO) // IA32_MSR_VMX_MISC MSR (0x485) @@ -1135,14 +1135,14 @@ const Bit32u VMX_MSR_CR0_FIXED0_LO = 0x80000021; const Bit32u VMX_MSR_CR0_FIXED0_HI = 0x00000000; const Bit64u VMX_MSR_CR0_FIXED0 = - ((((Bit64u) VMX_MSR_CR0_FIXED0_HI) << 32) | VMX_MSR_CR0_FIXED0_LO); + GET64_FROM_HI32_LO32(VMX_MSR_CR0_FIXED0_HI, VMX_MSR_CR0_FIXED0_LO); // allowed 1-setting in CR0 in VMX mode const Bit32u VMX_MSR_CR0_FIXED1_LO = 0xFFFFFFFF; const Bit32u VMX_MSR_CR0_FIXED1_HI = 0x00000000; const Bit64u VMX_MSR_CR0_FIXED1 = - ((((Bit64u) VMX_MSR_CR0_FIXED1_HI) << 32) | VMX_MSR_CR0_FIXED1_LO); + GET64_FROM_HI32_LO32(VMX_MSR_CR0_FIXED1_HI, VMX_MSR_CR0_FIXED1_LO); // // IA32_VMX_CR4_FIXED0 MSR (0x488) IA32_VMX_CR4_FIXED1 MSR (0x489) @@ -1154,14 +1154,14 @@ const Bit32u VMX_MSR_CR4_FIXED0_LO = 0x00002000; const Bit32u VMX_MSR_CR4_FIXED0_HI = 0x00000000; const Bit64u VMX_MSR_CR4_FIXED0 = - ((((Bit64u) VMX_MSR_CR4_FIXED0_HI) << 32) | VMX_MSR_CR4_FIXED0_LO); + GET64_FROM_HI32_LO32(VMX_MSR_CR4_FIXED0_HI, VMX_MSR_CR4_FIXED0_LO); // allowed 1-setting in CR0 in VMX mode #define VMX_MSR_CR4_FIXED1_LO (BX_CPU_THIS_PTR cr4_suppmask) #define VMX_MSR_CR4_FIXED1_HI (0) #define VMX_MSR_CR4_FIXED1 \ - ((((Bit64u) VMX_MSR_CR4_FIXED1_HI) << 32) | VMX_MSR_CR4_FIXED1_LO) + GET64_FROM_HI32_LO32(VMX_MSR_CR4_FIXED1_HI, VMX_MSR_CR4_FIXED1_LO) // @@ -1177,7 +1177,7 @@ const Bit32u VMX_MSR_VMCS_ENUM_LO = VMX_HIGHEST_VMCS_ENCODING; const Bit32u VMX_MSR_VMCS_ENUM_HI = 0x00000000; const Bit64u VMX_MSR_VMCS_ENUM = - ((((Bit64u) VMX_MSR_VMCS_ENUM_HI) << 32) | VMX_MSR_VMCS_ENUM_LO); + GET64_FROM_HI32_LO32(VMX_MSR_VMCS_ENUM_HI, VMX_MSR_VMCS_ENUM_LO); // IA32_VMX_MSR_PROCBASED_CTRLS2 MSR (0x48b) @@ -1190,7 +1190,7 @@ const Bit32u VMX_MSR_VMX_PROCBASED_CTRLS2_LO = 0x00000000; (VMX_VM_EXEC_CTRL3_SUPPORTED_BITS | VMX_MSR_VMX_PROCBASED_CTRLS2_LO) #define VMX_MSR_VMX_PROCBASED_CTRLS2 \ - ((((Bit64u) VMX_MSR_VMX_PROCBASED_CTRLS2_HI) << 32) | VMX_MSR_VMX_PROCBASED_CTRLS2_LO) + GET64_FROM_HI32_LO32(VMX_MSR_VMX_PROCBASED_CTRLS2_HI, VMX_MSR_VMX_PROCBASED_CTRLS2_LO) #if BX_SUPPORT_VMX >= 2 diff --git a/bochs/cpu/xsave.cc b/bochs/cpu/xsave.cc index 5be580ee7b..0913264965 100644 --- a/bochs/cpu/xsave.cc +++ b/bochs/cpu/xsave.cc @@ -147,7 +147,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::XSAVEC(bxInstruction_c *i) } VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs; - Bit64u requested_features = (((Bit64u) EDX) << 32) | EAX; + Bit64u requested_features = GET64_FROM_HI32_LO32(EDX, EAX); if (requested_features & BX_CPU_THIS_PTR msr.ia32_xss & vm->xss_exiting_bitmap) VMexit_Instruction(i, VMX_VMEXIT_XSAVES); } @@ -250,7 +250,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::XRSTOR(bxInstruction_c *i) } VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs; - Bit64u requested_features = (((Bit64u) EDX) << 32) | EAX; + Bit64u requested_features = GET64_FROM_HI32_LO32(EDX, EAX); if (requested_features & BX_CPU_THIS_PTR msr.ia32_xss & vm->xss_exiting_bitmap) VMexit_Instruction(i, VMX_VMEXIT_XRSTORS); } @@ -923,8 +923,8 @@ void BX_CPU_C::xrstor_init_cet_u_state(void) bool BX_CPU_C::xsave_cet_u_state_xinuse(void) { - return BX_CPU_THIS_PTR msr.ia32_cet_control[1] == 0 && - BX_CPU_THIS_PTR msr.ia32_pl_ssp[3] == 0; + return BX_CPU_THIS_PTR msr.ia32_cet_control[1] != 0 || + BX_CPU_THIS_PTR msr.ia32_pl_ssp[3] != 0; } // CET S state management // diff --git a/bochs/iodev/network/e1000.cc b/bochs/iodev/network/e1000.cc index 0e59fd8549..6cee856284 100644 --- a/bochs/iodev/network/e1000.cc +++ b/bochs/iodev/network/e1000.cc @@ -12,7 +12,7 @@ // Copyright (c) 2007 Dan Aloni // Copyright (c) 2004 Antony T Curtis // -// Copyright (C) 2011-2021 The Bochs Project +// Copyright (C) 2011-2023 The Bochs Project // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -459,7 +459,7 @@ void bx_e1000_main_c::init(void) bx_list_c *base = (bx_list_c*) SIM->get_param(pname); if (SIM->get_param_bool("enabled", base)->get()) { theE1000Dev[card] = new bx_e1000_c(); - theE1000Dev[card]->init(card); + theE1000Dev[card]->init_card(card); count++; } } @@ -527,7 +527,7 @@ bx_e1000_c::~bx_e1000_c() BX_DEBUG(("Exit")); } -void bx_e1000_c::init(Bit8u card) +void bx_e1000_c::init_card(Bit8u card) { char pname[20]; Bit8u macaddr[6]; diff --git a/bochs/iodev/network/e1000.h b/bochs/iodev/network/e1000.h index 80203d31ee..2ba2de69f0 100644 --- a/bochs/iodev/network/e1000.h +++ b/bochs/iodev/network/e1000.h @@ -12,7 +12,7 @@ // Copyright (c) 2007 Dan Aloni // Copyright (c) 2004 Antony T Curtis // -// Copyright (C) 2011-2021 The Bochs Project +// Copyright (C) 2011-2023 The Bochs Project // // This library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public @@ -114,7 +114,7 @@ class bx_e1000_c : public bx_pci_device_c { public: bx_e1000_c(); virtual ~bx_e1000_c(); - virtual void init(Bit8u card); + virtual void init_card(Bit8u card); virtual void reset(unsigned type); void e1000_register_state(bx_list_c *parent, Bit8u card); virtual void after_restore_state(void);