代码是之前写的,由于之前比较习惯c语言。所以很多东西包括方法都是放在一个main函数中,具体实现的功能就是一个简单的读写和执行分离。也抄了不少人的代码(不这是我自己写的,cv之后就是自己的了)
首先是main函数,用的sublime中文会乱码,vt最核心的还是嵌套(但是我不会),还有就是各个版本的兼容。
#include "CPU.h"
#include "MSR.h"
#include "comm.h"
#include "EPT.h"
#include "WMX.h"
extern void inline Breakpoint(void);
extern void inline Enable_VMX_Operation(void);
extern void inline Disable_VMX_Operation(void);
extern ULONG64 inline Get_GDT_Base(void);
extern ULONG64 inline Get_IDT_Base(void);
extern VOID GetCpuIdInfo( __in ULONG32 fn, __out PULONG32 ret_eax, __out PULONG32 ret_ebx,__out PULONG32 ret_ecx,__out PULONG32 ret_edx);
extern USHORT GetCs(VOID);
extern USHORT GetDs(VOID);
extern USHORT GetEs(VOID);
extern USHORT GetSs(VOID);
extern USHORT GetFs(VOID);
extern USHORT GetGs(VOID);
extern USHORT GetLdtr(VOID);
extern USHORT GetTr(VOID);
extern USHORT Get_IDT_Limit(VOID);
extern USHORT Get_GDT_Limit(VOID);
extern ULONG64 Get_RFLAGS(VOID);
extern void inline AsmVmxSaveState(VOID);
extern void inline retfc(VOID);
extern void AsmVmxRestoreState(VOID);
extern void AsmVmexitHandler(VOID);
extern void Wmcall(VOID);
extern void inline AsmVmxRestore(VOID);
extern unsigned short AsmGetGdtLimit();
extern unsigned short AsmGetIdtLimit();
extern unsigned long long inline AsmGetGdtBase();
extern unsigned short AsmGetRflags();
UINT64 g_StackPointerForReturning;
UINT64 g_BasePointerForReturning;
extern unsigned char inline AsmInvvpid(unsigned long Type, void* Descriptors);
extern unsigned char inline INVEPT_Instruction(_In_ unsigned long type, _In_ void* descriptor);
extern unsigned char inline AsmInvept(unsigned long Type, void* Descriptors);
extern void MSRWrite(ULONG32 reg, ULONG64 MsrValue);
extern ULONG64 MSRRead(ULONG32 reg);
EPT_STATE* EptState;
//ÅжÏÊÇ·ñÖ§³Övt
BOOLEAN Is_VMX_Supported(){
CPUID data = { 0 };
// VMX bit µÚÎåλҪÇóΪ1£¬intel23.6ÕÂ
__cpuid((int*)&data, 1);
if ((data.ecx & (1 << 5)) == 0)
return FALSE;
IA32_FEATURE_CONTROL_MSR Control = { 0 };//¶ÔÓ¦biosµÄÉ趨
Control.All = __readmsr(MSR_IA32_FEATURE_CONTROL);
// BIOS lock check intel23.7ÕÂ
if (Control.Fields.Lock == 0){//ÕâÀïÐèÒªµÈÓÚ1²Å´ú±íÄÜ¿ªÆô
Control.Fields.Lock = TRUE;
Control.Fields.EnableVmxon = TRUE;
__writemsr(MSR_IA32_FEATURE_CONTROL, Control.All);
}else if (Control.Fields.EnableVmxon == FALSE){
DbgPrint("VMX locked off in BIOS");
return FALSE;
}
return TRUE;
}
//ÕûÀívmxon
#define ALIGNMENT_PAGE_SIZE 4096
#define MAXIMUM_ADDRESS 0xffffffffffffffff
#define VMCS_SIZE 4096
#define VMXON_SIZE 4096
UINT64 VirtualAddress_to_PhysicallAddress(void* va){
return MmGetPhysicalAddress(va).QuadPart;//ÐéÄâµØַתÎïÀíµØÖ·
}
BOOLEAN Allocate_VMXON_Region(IN PVirtualMachineState vmState){
// at IRQL > DISPATCH_LEVEL memory allocation routines don't work
if (KeGetCurrentIrql() > DISPATCH_LEVEL)
KeRaiseIrqlToDpcLevel();
PHYSICAL_ADDRESS PhysicalMax = { 0 };
PhysicalMax.QuadPart = MAXULONG64;
int VMXONSize = 2 * VMXON_SIZE;
PUCHAR Buffer = MmAllocateContiguousMemory(VMXONSize + ALIGNMENT_PAGE_SIZE, PhysicalMax); // Allocating a 4-KByte Contigous Memory region
PHYSICAL_ADDRESS Highest = { 0 }, Lowest = { 0 };
Highest.QuadPart = ~0;
//BYTE* Buffer = MmAllocateContiguousMemorySpecifyCache(VMXONSize + ALIGNMENT_PAGE_SIZE, Lowest, Highest, Lowest, MmNonCached);
if (Buffer == NULL) {
DbgPrint("Error : Couldn't Allocate Buffer for VMXON Region.\n");
return FALSE;// ntStatus = STATUS_INSUFFICIENT_RESOURCES;
}
UINT64 PhysicalBuffer = VirtualAddress_to_PhysicallAddress(Buffer);
// zero-out memory
RtlSecureZeroMemory(Buffer, VMXONSize + ALIGNMENT_PAGE_SIZE);
UINT64 alignedPhysicalBuffer = (PUCHAR)((ULONG_PTR)(PhysicalBuffer + ALIGNMENT_PAGE_SIZE - 1) &~(ALIGNMENT_PAGE_SIZE - 1));
UINT64 alignedVirtualBuffer = (PUCHAR)((ULONG_PTR)(Buffer + ALIGNMENT_PAGE_SIZE - 1) &~(ALIGNMENT_PAGE_SIZE - 1));
DbgPrint("Virtual allocated buffer for VMXON at %llx\n", Buffer);
DbgPrint("Virtual aligned allocated buffer for VMXON at %llx\n", alignedVirtualBuffer);
DbgPrint("Aligned physical buffer allocated for VMXON at %llx\n", alignedPhysicalBuffer);
// get IA32_VMX_BASIC_MSR RevisionId
IA32_VMX_BASIC_MSR basic = { 0 };
basic.All = __readmsr(MSR_IA32_VMX_BASIC);
DbgPrint("MSR_IA32_VMX_BASIC (MSR 0x480) Revision Identifier %llx\n", basic.Fields.RevisionIdentifier);
//Changing Revision Identifier
*(UINT64 *)alignedVirtualBuffer = basic.Fields.RevisionIdentifier;
int status = __vmx_on(&alignedPhysicalBuffer);
if (status){
DbgPrint("VMXON failed with status %d\n", status);
return FALSE;
}
vmState->VMXON_REGION = alignedPhysicalBuffer;
return TRUE;
}
BOOLEAN Allocate_VMCS_Region(IN PVirtualMachineState vmState){
// at IRQL > DISPATCH_LEVEL memory allocation routines don't work
if (KeGetCurrentIrql() > DISPATCH_LEVEL)
KeRaiseIrqlToDpcLevel();
PHYSICAL_ADDRESS PhysicalMax = { 0 };
PhysicalMax.QuadPart = MAXULONG64;
int VMCSSize = 2 * VMCS_SIZE;
PUCHAR Buffer = MmAllocateContiguousMemory(VMCSSize + ALIGNMENT_PAGE_SIZE, PhysicalMax); // Allocating a 4-KByte Contigous Memory region
PHYSICAL_ADDRESS Highest = { 0 }, Lowest = { 0 };
Highest.QuadPart = ~0;
//BYTE* Buffer = MmAllocateContiguousMemorySpecifyCache(VMXONSize + ALIGNMENT_PAGE_SIZE, Lowest, Highest, Lowest, MmNonCached);
UINT64 PhysicalBuffer = VirtualAddress_to_PhysicallAddress(Buffer);
if (Buffer == NULL) {
DbgPrint("Error : Couldn't Allocate Buffer for VMCS Region.\n");
return FALSE;// ntStatus = STATUS_INSUFFICIENT_RESOURCES;
}
// zero-out memory
RtlSecureZeroMemory(Buffer, VMCSSize + ALIGNMENT_PAGE_SIZE);
UINT64 alignedPhysicalBuffer = (PUCHAR)((ULONG_PTR)(PhysicalBuffer + ALIGNMENT_PAGE_SIZE - 1) &~(ALIGNMENT_PAGE_SIZE - 1));
UINT64 alignedVirtualBuffer = (PUCHAR)((ULONG_PTR)(Buffer + ALIGNMENT_PAGE_SIZE - 1) &~(ALIGNMENT_PAGE_SIZE - 1));
DbgPrint("Virtual allocated buffer for VMCS at %llx\n", Buffer);
DbgPrint("Virtual aligned allocated buffer for VMCS at %llx\n", alignedVirtualBuffer);
DbgPrint("Aligned physical buffer allocated for VMCS at %llx\n", alignedPhysicalBuffer);
// get IA32_VMX_BASIC_MSR RevisionId
IA32_VMX_BASIC_MSR basic = { 0 };
basic.All = __readmsr(MSR_IA32_VMX_BASIC);
DbgPrint("MSR_IA32_VMX_BASIC (MSR 0x480) Revision Identifier %llx\n", basic.Fields.RevisionIdentifier);
//Changing Revision Identifier
*(UINT64 *)alignedVirtualBuffer = basic.Fields.RevisionIdentifier;
vmState->VMCS_REGION = alignedPhysicalBuffer;
return TRUE;
}
//ÕâÀïÉèÖÃvmcs
BOOLEAN Clear_VMCS_State(IN PVirtualMachineState vmState) {
// Clear the state of the VMCS to inactive
int status = __vmx_vmclear(&vmState->VMCS_REGION);
DbgPrint("VMCS VMCLAEAR Status is : %d\n", status);
if (status){
// Otherwise terminate the VMX
DbgPrint("VMCS failed to clear with status %d\n", status);
__vmx_off();
return FALSE;
}
return TRUE;
}
BOOLEAN Load_VMCS(IN PVirtualMachineState vmState){
int status = __vmx_vmptrld(&vmState->VMCS_REGION);
if (status){
DbgPrint("VMCS failed with status %d\n", status);
return FALSE;
}
return TRUE;
}
BOOLEAN GetSegmentDescriptor(IN PSEGMENT_SELECTOR SegmentSelector, IN USHORT Selector, IN PUCHAR GdtBase){
PSEGMENT_DESCRIPTOR SegDesc;
if (!SegmentSelector)
return FALSE;
if (Selector & 0x4) {
return FALSE;
}
SegDesc = (PSEGMENT_DESCRIPTOR)((PUCHAR)GdtBase + (Selector & ~0x7));
SegmentSelector->SEL = Selector;
SegmentSelector->BASE = SegDesc->BASE0 | SegDesc->BASE1 << 16 | SegDesc->BASE2 << 24;
SegmentSelector->LIMIT = SegDesc->LIMIT0 | (SegDesc->LIMIT1ATTR1 & 0xf) << 16;
SegmentSelector->ATTRIBUTES.UCHARs = SegDesc->ATTR0 | (SegDesc->LIMIT1ATTR1 & 0xf0) << 4;
if (!(SegDesc->ATTR0 & 0x10)) { // LA_ACCESSED
ULONG64 tmp;
// this is a TSS or callgate etc, save the base high part
tmp = (*(PULONG64)((PUCHAR)SegDesc + 8));
SegmentSelector->BASE = (SegmentSelector->BASE & 0xffffffff) | (tmp << 32);
}
if (SegmentSelector->ATTRIBUTES.Fields.G) {
// 4096-bit granularity is enabled for this segment, scale the limit
SegmentSelector->LIMIT = (SegmentSelector->LIMIT << 12) + 0xfff;
}
return TRUE;
}
BOOLEAN SetGuestSelector(IN PVOID GDT_Base, IN ULONG Segment_Register, IN USHORT Selector){
SEGMENT_SELECTOR SegmentSelector = { 0 };
ULONG uAccessRights;
GetSegmentDescriptor(&SegmentSelector, Selector, GDT_Base);
uAccessRights = ((PUCHAR)& SegmentSelector.ATTRIBUTES)[0] + (((PUCHAR)& SegmentSelector.ATTRIBUTES)[1] << 12);
if (!Selector)
uAccessRights |= 0x10000;
__vmx_vmwrite(GUEST_ES_SELECTOR + Segment_Register * 2, Selector);
__vmx_vmwrite(GUEST_ES_LIMIT + Segment_Register * 2, SegmentSelector.LIMIT);
__vmx_vmwrite(GUEST_ES_AR_BYTES + Segment_Register * 2, uAccessRights);
__vmx_vmwrite(GUEST_ES_BASE + Segment_Register * 2, SegmentSelector.BASE);
return TRUE;
}
ULONG AdjustControls(IN ULONG Ctl, IN ULONG Msr){
MSR MsrValue = { 0 };
MsrValue.Content = __readmsr(Msr);
Ctl &= MsrValue.High; /* bit == 0 in high word ==> must be zero */
Ctl |= MsrValue.Low; /* bit == 1 in low word ==> must be one */
return Ctl;
}
void FillGuestSelectorData(__in PVOID GdtBase,__in ULONG Segreg,__in USHORT Selector){
SEGMENT_SELECTOR SegmentSelector = { 0 };
ULONG uAccessRights;
GetSegmentDescriptor(&SegmentSelector, Selector, GdtBase);
uAccessRights = ((PUCHAR)& SegmentSelector.ATTRIBUTES)[0] + (((PUCHAR)& SegmentSelector.ATTRIBUTES)[1] << 12);
if (!Selector)
uAccessRights |= 0x10000;
__vmx_vmwrite(GUEST_ES_SELECTOR + Segreg * 2, Selector);
__vmx_vmwrite(GUEST_ES_LIMIT + Segreg * 2, SegmentSelector.LIMIT);
__vmx_vmwrite(GUEST_ES_AR_BYTES + Segreg * 2, uAccessRights);
__vmx_vmwrite(GUEST_ES_BASE + Segreg * 2, SegmentSelector.BASE);
}
/* Handles Guest Access to control registers */
VOID HvHandleControlRegisterAccess(PGUEST_REGS GuestState){
ULONG ExitQualification = 0;
__vmx_vmread(EXIT_QUALIFICATION, &ExitQualification);
PMOV_CR_QUALIFICATION data = (PMOV_CR_QUALIFICATION)&ExitQualification;
PULONG64 regPtr = (PULONG64)&GuestState->rax + data->Fields.Register;
/* Because its RSP and as we didn't save RSP correctly (because of pushes) so we have make it points to the GUEST_RSP */
if (data->Fields.Register == 4){
INT64 RSP = 0;
__vmx_vmread(GUEST_RSP, &RSP);
*regPtr = RSP;
}
switch (data->Fields.AccessType){
case TYPE_MOV_TO_CR:{
switch (data->Fields.ControlRegister){
case 0:
__vmx_vmwrite(GUEST_CR0, *regPtr);
__vmx_vmwrite(CR0_READ_SHADOW, *regPtr);
break;
case 3:
__vmx_vmwrite(GUEST_CR3, (*regPtr & ~(1ULL << 63)));
break;
case 4:
__vmx_vmwrite(GUEST_CR4, *regPtr);
__vmx_vmwrite(CR4_READ_SHADOW, *regPtr);
break;
default:
DbgPrint("Unsupported register %d\n", data->Fields.ControlRegister);
break;
}
}
break;
case TYPE_MOV_FROM_CR:{
switch (data->Fields.ControlRegister){
case 0:
__vmx_vmread(GUEST_CR0, regPtr);
break;
case 3:
__vmx_vmread(GUEST_CR3, regPtr);
break;
case 4:
__vmx_vmread(GUEST_CR4, regPtr);
break;
default:
DbgPrint("Unsupported register %d\n", data->Fields.ControlRegister);
break;
}
}
break;
default:
DbgPrint("Unsupported operation %d\n", data->Fields.AccessType);
break;
}
}
/* Handles in the cases when RDMSR causes a Vmexit*/
// Hypervisor reserved range for RDMSR and WRMSR
#define RESERVED_MSR_RANGE_LOW 0x40000000
#define RESERVED_MSR_RANGE_HI 0x400000F0
VOID HvHandleMsrRead(PGUEST_REGS GuestRegs){
MSR msr = { 0 };
// RDMSR. The RDMSR instruction causes a VM exit if any of the following are true:
//
// The "use MSR bitmaps" VM-execution control is 0.
// The value of ECX is not in the ranges 00000000H - 00001FFFH and C0000000H - C0001FFFH
// The value of ECX is in the range 00000000H - 00001FFFH and bit n in read bitmap for low MSRs is 1,
// where n is the value of ECX.
// The value of ECX is in the range C0000000H - C0001FFFH and bit n in read bitmap for high MSRs is 1,
// where n is the value of ECX & 00001FFFH.
/*if (((GuestRegs->rcx <= 0x00001FFF)) || ((0xC0000000 <= GuestRegs->rcx) && (GuestRegs->rcx <= 0xC0001FFF)))
{*/
msr.Content = MSRRead((ULONG)GuestRegs->rcx);
/*}else{
msr.Content = 0;
}*/
GuestRegs->rax = msr.Low;
GuestRegs->rdx = msr.High;
}
/* Handles in the cases when RDMSR causes a Vmexit*/
VOID HvHandleMsrWrite(PGUEST_REGS GuestRegs){
MSR msr = { 0 };
// Check for sanity of MSR
/*if ((GuestRegs->rcx <= 0x00001FFF) || ((0xC0000000 <= GuestRegs->rcx) && (GuestRegs->rcx <= 0xC0001FFF)))
{*/
msr.Low = (ULONG)GuestRegs->rax;
msr.High = (ULONG)GuestRegs->rdx;
MSRWrite((ULONG)GuestRegs->rcx, msr.Content);
/*}*/
}
ULONG64 gust_rip;
ULONG64 retrip;
/* Handle Cpuid Vmexits*/
#define DPL_SYSTEM 0
BOOLEAN HvHandleCpuid(PGUEST_REGS RegistersState){
INT32 cpu_info[4];
// Check for the magic CPUID sequence, and check that it is coming from
// Ring 0. Technically we could also check the RIP and see if this falls
// in the expected function, but we may want to allow a separate "unload"
// driver or code at some point.
ULONG Mode = 0;
__vmx_vmread(GUEST_CS_SELECTOR, &Mode);
Mode = Mode & RPL_MASK;
if ((RegistersState->rax == 0x41414141) && (RegistersState->rcx == 0x42424242) && Mode == DPL_SYSTEM){
return TRUE; // Indicates we have to turn off VMX
}
// Otherwise, issue the CPUID to the logical processor based on the indexes
// on the VP's GPRs.
__cpuidex(cpu_info, (INT32)RegistersState->rax, (INT32)RegistersState->rcx);
// Check if this was CPUID 1h, which is the features request.
if (RegistersState->rax == 1){
// Set the Hypervisor Present-bit in RCX, which Intel and AMD have both
// reserved for this indication.
cpu_info[2] |= HYPERV_HYPERVISOR_PRESENT_BIT;
}else if (RegistersState->rax == HYPERV_CPUID_INTERFACE){
// Return our interface identifier
cpu_info[0] = 'HVFS'; // [H]yper[v]isor [F]rom [S]cratch
}
// Copy the values from the logical processor registers into the VP GPRs.
RegistersState->rax = cpu_info[0];
RegistersState->rbx = cpu_info[1];
RegistersState->rcx = cpu_info[2];
RegistersState->rdx = cpu_info[3];
return FALSE; // Indicates we don't have to turn off VMX
}
/* Check if this exit is due to a violation caused by a currently hooked page. Returns FALSE
* if the violation was not due to a page hook.
*
* If the memory access attempt was RW and the page was marked executable, the page is swapped with
* the original page.
*
* If the memory access attempt was execute and the page was marked not executable, the page is swapped with
* the hooked page.
*/
EPT_PML1_ENTRY OrigAddress;
EPTE fakepagephy;//Ö»¶ÁÒ³Ãæ
BOOLEAN EptHandlePageHookExit(VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification, UINT64 GuestPhysicalAddr){
SIZE_T PhysicalAddress;
PVOID VirtualTarget;
PEPT_PML1_ENTRY TargetPage;
/* Translate the page from a physical address to virtual so we can read its memory.
This function will return NULL if the physical address was not already mapped in
virtual memory.
*/
PhysicalAddress = PAGE_ALIGN(GuestPhysicalAddr);
if (!PhysicalAddress){
DbgPrint("Target address could not be mapped to physical memory");
return FALSE;
}
TargetPage = EptGetPml1Entry(EptState->EptPageTable, PhysicalAddress);
// Ensure the target is valid.
if (!TargetPage){
DbgPrint("Failed to get PML1 entry for target address");
return FALSE;
}
// If the violation was due to trying to execute a non-executable page, that means that the currently
// swapped in page is our original RW page. We need to swap in the hooked executable page (fake page)
/*if (!ViolationQualification.EptExecutable && ViolationQualification.ExecuteAccess){
TargetPage->ExecuteAccess = 1;
// InveptAllContexts();
INVEPT_DESCRIPTOR Descriptor;
Descriptor.EptPointer = EptState->EptPointer.Flags;
Descriptor.Reserved = 0;
AsmInvept(1, &Descriptor);
// Redo the instruction
DbgPrint("Set the Execute Access of a page (PFN = 0x%llx) to 1", TargetPage->PageFrameNumber);
return TRUE;
}*/
//ÕâÀïÊÇ´¦Àí¶ÁºÍÖ´ÐÐ
if (!ViolationQualification.EptExecutable && ViolationQualification.ExecuteAccess) {//²»¿ÉÖ´ÐÐ
//OrigAddress.ExecuteAccess = 1;
TargetPage->Flags = OrigAddress.Flags;
//*TargetPage = OrigAddress;//Õâ¸öÒ³ÃæÊÇ¿ÉÖ´ÐеĺͿÉд
}else {//²»¿É¶Áд
//*TargetPage = fakepagephy;//Õâ¸öÖ»ÓжÁµÄÊôÐÔ
//KdBreakPoint();
TargetPage->Flags = fakepagephy.Flags;
}
//½ûÓÃtlb»º´æ
INVEPT_DESCRIPTOR Descriptor;
Descriptor.EptPointer = EptState->EptPointer.Flags;
Descriptor.Reserved = 0;
AsmInvept(1, &Descriptor);
DbgPrint("Invalid page swapping logic in hooked page");
return TRUE;
}
/*
Handle VM exits for EPT violations. Violations are thrown whenever an operation is performed
on an EPT entry that does not provide permissions to access that page.
*/
BOOLEAN EptHandleEptViolation(ULONG ExitQualification, UINT64 GuestPhysicalAddr){
VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification;
//DbgBreakPoint();
ViolationQualification.Flags = ExitQualification;
if (EptHandlePageHookExit(ViolationQualification, GuestPhysicalAddr)){
// Handled by page hook code.
return TRUE;
}
DbgPrint("Unexpected EPT violation \n");
DbgBreakPoint();
// Redo the instruction that caused the exception.
return FALSE;
}
VOID ResumeToNextInstruction(VOID){
ULONG64 ResumeRIP = NULL;
ULONG64 CurrentRIP = NULL;
ULONG ExitInstructionLength = 0;
__vmx_vmread(GUEST_RIP, &CurrentRIP);
__vmx_vmread(VM_EXIT_INSTRUCTION_LEN, &ExitInstructionLength);
ResumeRIP = CurrentRIP + ExitInstructionLength;
__vmx_vmwrite(GUEST_RIP, ResumeRIP);
}
//Õâ¸öÊÇhookidt3Ò²¾ÍÊÇint3
typedef enum _EXCEPTION_VECTORS{
EXCEPTION_VECTOR_DIVIDE_ERROR,
EXCEPTION_VECTOR_DEBUG_BREAKPOINT,
EXCEPTION_VECTOR_NMI,
EXCEPTION_VECTOR_BREAKPOINT,
EXCEPTION_VECTOR_OVERFLOW,
EXCEPTION_VECTOR_BOUND_RANGE_EXCEEDED,
EXCEPTION_VECTOR_UNDEFINED_OPCODE,
EXCEPTION_VECTOR_NO_MATH_COPROCESSOR,
EXCEPTION_VECTOR_DOUBLE_FAULT,
EXCEPTION_VECTOR_RESERVED0,
EXCEPTION_VECTOR_INVALID_TASK_SEGMENT_SELECTOR,
EXCEPTION_VECTOR_SEGMENT_NOT_PRESENT,
EXCEPTION_VECTOR_STACK_SEGMENT_FAULT,
EXCEPTION_VECTOR_GENERAL_PROTECTION_FAULT,
EXCEPTION_VECTOR_PAGE_FAULT,
EXCEPTION_VECTOR_RESERVED1,
EXCEPTION_VECTOR_MATH_FAULT,
EXCEPTION_VECTOR_ALIGNMENT_CHECK,
EXCEPTION_VECTOR_MACHINE_CHECK,
EXCEPTION_VECTOR_SIMD_FLOATING_POINT_NUMERIC_ERROR,
EXCEPTION_VECTOR_VIRTUAL_EXCEPTION,
EXCEPTION_VECTOR_RESERVED2,
EXCEPTION_VECTOR_RESERVED3,
EXCEPTION_VECTOR_RESERVED4,
EXCEPTION_VECTOR_RESERVED5,
EXCEPTION_VECTOR_RESERVED6,
EXCEPTION_VECTOR_RESERVED7,
EXCEPTION_VECTOR_RESERVED8,
EXCEPTION_VECTOR_RESERVED9,
EXCEPTION_VECTOR_RESERVED10,
EXCEPTION_VECTOR_RESERVED11,
EXCEPTION_VECTOR_RESERVED12
}EXCEPTION_VECTORS;
typedef enum _INTERRUPT_TYPE{
INTERRUPT_TYPE_EXTERNAL_INTERRUPT = 0,
INTERRUPT_TYPE_RESERVED = 1,
INTERRUPT_TYPE_NMI = 2,
INTERRUPT_TYPE_HARDWARE_EXCEPTION = 3,
INTERRUPT_TYPE_SOFTWARE_INTERRUPT = 4,
INTERRUPT_TYPE_PRIVILEGED_SOFTWARE_INTERRUPT = 5,
INTERRUPT_TYPE_SOFTWARE_EXCEPTION = 6,
INTERRUPT_TYPE_OTHER_EVENT = 7
}INTERRUPT_TYPE;
// Injects interruption to a guest
typedef union _INTERRUPT_INFO {
struct {
UINT32 Vector : 8;
/* 0=Ext Int, 1=Rsvd, 2=NMI, 3=Exception, 4=Soft INT,
* 5=Priv Soft Trap, 6=Unpriv Soft Trap, 7=Other */
UINT32 InterruptType : 3;
UINT32 DeliverCode : 1; /* 0=Do not deliver, 1=Deliver */
UINT32 Reserved : 19;
UINT32 Valid : 1; /* 0=Not valid, 1=Valid. Must be checked first */
};
UINT32 Flags;
} INTERRUPT_INFO, *PINTERRUPT_INFO;
typedef union _VMEXIT_INTERRUPT_INFO {
struct {
UINT32 Vector : 8;
UINT32 InterruptionType : 3;
UINT32 ErrorCodeValid : 1;
UINT32 NmiUnblocking : 1;
UINT32 Reserved : 18;
UINT32 Valid : 1;
};
UINT32 Flags;
}VMEXIT_INTERRUPT_INFO, *PVMEXIT_INTERRUPT_INFO;
VOID EventInjectInterruption(INTERRUPT_TYPE InterruptionType, EXCEPTION_VECTORS Vector, BOOLEAN DeliverErrorCode, ULONG32 ErrorCode){
INTERRUPT_INFO Inject = { 0 };//Õâ¸öÊý¾Ý½á¹¹¿ÉÒÔÔÚ±í21-14ÖÐÕÒµ½×ֶεĺ¬Òå
Inject.Valid = TRUE;
Inject.InterruptType = InterruptionType;
Inject.Vector = Vector;
Inject.DeliverCode = DeliverErrorCode;
__vmx_vmwrite(VM_ENTRY_INTR_INFO, Inject.Flags);
if (DeliverErrorCode) {//ÕâÀï´ú±íÊÇ·ñÐèҪѹÈë´íÎóÐÅÏ¢
__vmx_vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, ErrorCode);
}
}
//Èç¹ûÎÒÃǽӹÜÁËint3£¬µ«ÊÇÓÖ²»Ïë×Ô¼º´¦Àí¾ÍÐèÒª×Ô¼º¹¹ÔìÒ»¸öint3
VOID EventInjectBreakpoint(){
EventInjectInterruption(INTERRUPT_TYPE_SOFTWARE_EXCEPTION, EXCEPTION_VECTOR_BREAKPOINT, FALSE, 0);
UINT32 ExitInstrLength;
__vmx_vmread(VM_EXIT_INSTRUCTION_LEN, &ExitInstrLength);
__vmx_vmwrite(VM_ENTRY_INSTRUCTION_LEN, ExitInstrLength);
}
ULONG flagModify = 0;
void VmxVmexitHandler(PGUEST_REGS GuestState){//
ULONG32 fEpt = 0;
ULONG64 ResumeRIP = NULL;
ULONG64 EXITReason;
ULONG ExitQualification;
UINT64 GuestPhysicalAddr;
__vmx_vmread(VM_EXIT_REASON, &EXITReason);
__vmx_vmread(GUEST_RIP, &ResumeRIP);
__vmx_vmread(EXIT_QUALIFICATION, &ExitQualification);
switch (EXITReason){
case EXIT_REASON_VMCALL: {
//Breakpoint();
retrip += 2;//ÕâÀïÐèÒª¼ÓÉÏÖ¸ÁîµÄ³¤¶È
if (flagModify == 1)
retfc();
break;
}case EXIT_REASON_MSR_READ:{
HvHandleMsrRead(GuestState);
break;
}case EXIT_REASON_CPUID:{
HvHandleCpuid(GuestState);
break;
}case EXIT_REASON_MSR_WRITE:{
HvHandleMsrWrite(GuestState);
break;
}case EXIT_REASON_CR_ACCESS:{
HvHandleControlRegisterAccess(GuestState);
break;
}case EXIT_REASON_EPT_VIOLATION: {
fEpt = 1;
// Reading guest physical address
GuestPhysicalAddr = 0;
__vmx_vmread(GUEST_PHYSICAL_ADDRESS, &GuestPhysicalAddr);
DbgPrint("Guest Physical Address : 0x%llX\n", GuestPhysicalAddr);
DbgPrint("Guest Rip : 0x%llX\n", ResumeRIP);
if (!EptHandleEptViolation(ExitQualification, GuestPhysicalAddr)){
DbgPrint("There were errors in handling Ept Violation\n");
}
break;
}case EXIT_REASON_EXCEPTION_NMI:{
VMEXIT_INTERRUPT_INFO InterruptExit;
/*
Exception or non-maskable interrupt (NMI). Either:
1: Guest software caused an exception and the bit in the exception bitmap associated with exception vector was set to 1
2: An NMI was delivered to the logical processor and the exiting?VM-execution control was 1.
VM_EXIT_INTR_INFO shows the exit infromation about event that occured and causes this exit
Don't forget to read VM_EXIT_INTR_ERROR_CODE in the case of re-injectiong event
*/
// read the exit reason
__vmx_vmread(VM_EXIT_INTR_INFO, &InterruptExit);
//DbgBreakPoint();
if (InterruptExit.InterruptionType == INTERRUPT_TYPE_SOFTWARE_EXCEPTION && InterruptExit.Vector == EXCEPTION_VECTOR_BREAKPOINT){
ULONG64 GuestRip;
// Reading guest's RIP
__vmx_vmread(GUEST_RIP, &GuestRip);
// Send the user
DbgPrint("Breakpoint Hit (Process Id : 0x%x) at : %llx \n", PsGetCurrentProcessId(), GuestRip);
// re-inject #BP back to the guest
fEpt = 1;//ÕâÀï²»ÐèÒª±ä»¯rip
EventInjectBreakpoint();
}else{
DbgPrint("Not expected event occured %llX \n", InterruptExit.Vector);
}
break;
}default:{
DbgPrint("Unkown Vmexit, reason : 0x%llx rip :%llX \n", EXITReason, ResumeRIP);
Breakpoint();
break;
}
}
if(fEpt==0){
/*ULONG64 ResumeRIP = NULL;
//ULONG64 gust_rip = 0;
ULONG ExitInstructionLength = 0;
__vmx_vmread(GUEST_RIP, &gust_rip);
__vmx_vmread(VM_EXIT_INSTRUCTION_LEN, &ExitInstructionLength);
ResumeRIP = gust_rip + ExitInstructionLength;
__vmx_vmwrite(GUEST_RIP, ResumeRIP);
ULONG64 ResumeRIP = NULL;
ULONG64 CurrentRIP = NULL;
ULONG ExitInstructionLength = 0;
__vmx_vmread(GUEST_RIP, &CurrentRIP);
__vmx_vmread(VM_EXIT_INSTRUCTION_LEN, &ExitInstructionLength);
ResumeRIP = CurrentRIP + ExitInstructionLength;
__vmx_vmwrite(GUEST_RIP, ResumeRIP);*/
ResumeToNextInstruction();
}
}
void GuestHandler(PGUEST_REGS GuestState) {
}
typedef struct _NT_KPROCESS{
DISPATCHER_HEADER Header;
LIST_ENTRY ProfileListHead;
ULONG_PTR DirectoryTableBase;
UCHAR Data[1];
}NT_KPROCESS, *PNT_KPROCESS;
UINT64 FindSystemDirectoryTableBase(){
// Return CR3 of the system process.
NT_KPROCESS* SystemProcess = (NT_KPROCESS*)(PsInitialSystemProcess);
return SystemProcess->DirectoryTableBase;
}
ULONG32 TSC_OFFSET_HIGH = 0x00002011;
ULONG32 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013;
ULONG32 VM_ENTRY_INTR_INFO_FIELD = 0x00004016;
void Setup_VMCS(IN PVirtualMachineState vmState, IN PEPTP EPTP) {
///1 host=============================ÕâÀïÈç¹û³ö´íµÄ»°ÊÇ8ºÅ´íÎó
//Ö÷ÒªÊÇ24.5
//1.1¼¸¸öcr¼Ä´æÆ÷
__vmx_vmwrite(HOST_CR0, __readcr0());
__vmx_vmwrite(HOST_CR3, __readcr3());
__vmx_vmwrite(HOST_CR4, __readcr4());
//1.2rspºÍrip
__vmx_vmwrite(HOST_RSP, ((ULONG64)vmState->VMM_Stack + VMM_STACK_SIZE*2));
__vmx_vmwrite(HOST_RIP, (ULONG64)AsmVmexitHandler);
//1.3¶ÎÑ¡Ôñ×Ó
__vmx_vmwrite(HOST_ES_SELECTOR, GetEs() & 0xF8);
__vmx_vmwrite(HOST_CS_SELECTOR, GetCs() & 0xF8);
__vmx_vmwrite(HOST_SS_SELECTOR, GetSs() & 0xF8);
__vmx_vmwrite(HOST_DS_SELECTOR, GetDs() & 0xF8);
__vmx_vmwrite(HOST_FS_SELECTOR, GetFs() & 0xF8);
__vmx_vmwrite(HOST_GS_SELECTOR, GetGs() & 0xF8);
__vmx_vmwrite(HOST_TR_SELECTOR, GetTr() & 0xF8);
//1.4¶Î»ùÖ·
SEGMENT_SELECTOR SegmentSelector = { 0 };
GetSegmentDescriptor(&SegmentSelector, GetTr(), (PUCHAR)Get_GDT_Base());
__vmx_vmwrite(HOST_TR_BASE, SegmentSelector.BASE);
__vmx_vmwrite(HOST_FS_BASE, __readmsr(MSR_FS_BASE));
__vmx_vmwrite(HOST_GS_BASE, __readmsr(MSR_GS_BASE));
__vmx_vmwrite(HOST_GDTR_BASE, Get_GDT_Base());
__vmx_vmwrite(HOST_IDTR_BASE, Get_IDT_Base());
//1.5sysenter
__vmx_vmwrite(HOST_SYSENTER_CS, __readmsr(MSR_IA32_SYSENTER_CS));
__vmx_vmwrite(HOST_SYSENTER_EIP, __readmsr(MSR_IA32_SYSENTER_EIP));
__vmx_vmwrite(HOST_SYSENTER_ESP, __readmsr(MSR_IA32_SYSENTER_ESP));
///2 intel24.4.1 guest=============================ÕâÀïÈç¹û³ö´íµÄ»°ÊÇ80000021ºÅ´íÎó
//2.1 cr¼Ä´æÆ÷
__vmx_vmwrite(GUEST_CR0, __readcr0());
__vmx_vmwrite(GUEST_CR3, __readcr3());
__vmx_vmwrite(GUEST_CR4, __readcr4());
//2.2 dr7
__vmx_vmwrite(GUEST_DR7, 0x400);
//2.3 rflg rsp rip
__vmx_vmwrite(GUEST_RFLAGS, AsmGetRflags());
//setup guest rsp ¶Ô°ë·Ö
__vmx_vmwrite(GUEST_RSP, ((ULONG64)vmState->VMM_Stack + VMM_STACK_SIZE));
//setup guest rip
__vmx_vmwrite(GUEST_RIP, (ULONG64)AsmVmxRestoreState);
//2.4¶ÎÑ¡Ôñ×ӺͶλùÖ·
ULONG64 GdtBase = 0;
GdtBase = AsmGetGdtBase();
FillGuestSelectorData((PVOID)GdtBase, ES, GetEs());
FillGuestSelectorData((PVOID)GdtBase, CS, GetCs());
FillGuestSelectorData((PVOID)GdtBase, SS, GetSs());
FillGuestSelectorData((PVOID)GdtBase, DS, GetDs());
FillGuestSelectorData((PVOID)GdtBase, FS, GetFs());
FillGuestSelectorData((PVOID)GdtBase, GS, GetGs());
FillGuestSelectorData((PVOID)GdtBase, LDTR, GetLdtr());
FillGuestSelectorData((PVOID)GdtBase, TR, GetTr());
__vmx_vmwrite(GUEST_FS_BASE, __readmsr(MSR_FS_BASE));
__vmx_vmwrite(GUEST_GS_BASE, __readmsr(MSR_GS_BASE));
//2.5gdtºÍidt
__vmx_vmwrite(GUEST_GDTR_BASE, Get_GDT_Base());
__vmx_vmwrite(GUEST_IDTR_BASE, Get_IDT_Base());
__vmx_vmwrite(GUEST_GDTR_LIMIT, AsmGetGdtLimit());
__vmx_vmwrite(GUEST_IDTR_LIMIT, AsmGetIdtLimit());
// Setting the link pointer to the required value for 4KB VMCS.
__vmx_vmwrite(VMCS_LINK_POINTER, ~0ULL);//Õâ¸öÊôÐÔÊÇ31.6µÄ
//2.6msr¼Ä´æÆ÷
__vmx_vmwrite(GUEST_SYSENTER_CS, __readmsr(MSR_IA32_SYSENTER_CS));
__vmx_vmwrite(GUEST_SYSENTER_EIP, __readmsr(MSR_IA32_SYSENTER_EIP));
__vmx_vmwrite(GUEST_SYSENTER_ESP, __readmsr(MSR_IA32_SYSENTER_ESP));
__vmx_vmwrite(GUEST_IA32_DEBUGCTL, __readmsr(MSR_IA32_DEBUGCTL) & 0xFFFFFFFF);
__vmx_vmwrite(GUEST_IA32_DEBUGCTL_HIGH, __readmsr(MSR_IA32_DEBUGCTL) >> 32);
//û±ØÒªÉèÖÃÕâ¸ö¶«Î÷
__vmx_vmwrite(HOST_CR3, FindSystemDirectoryTableBase());
///3 ¿ØÖÆÓò=============================ÕâÀïÈç¹û³ö´íµÄ»°ÊÇ7ºÅ´íÎó
//3.1Ö´ÐпØÖÆ ¶ÔÓ¦µÄÊÇ24.6.1 ¸ß32ÊÇÓë µÍ32±ØÐëÊÇ»ò
__vmx_vmwrite(PIN_BASED_VM_EXEC_CONTROL, AdjustControls(0, MSR_IA32_VMX_PINBASED_CTLS));
//3.1Ö´ÐпØÖÆ ¶ÔÓ¦µÄÊÇ24.6.2
__vmx_vmwrite(CPU_BASED_VM_EXEC_CONTROL, AdjustControls(CPU_BASED_ACTIVATE_MSR_BITMAP | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, MSR_IA32_VMX_PROCBASED_CTLS));//CPU_BASED_ACTIVATE_SECONDARY_CONTROLS EPT, virtual APIC µÈʹÄÜλ£» invpcidµÈÖ¸ÁîÊÇ·ñ²úÉúvm-exit£¬Èç¹ûÉèÖÃÁ˸Ãλ¾ÍÐèÒª½øÐÐÉèÖÃSECONDARY_VM_EXEC_CONTROL
//WindowsÄÚ²¿Ê¹ÓÃINVPCIDºÍXSAVE£¨ÔÚÖ§³ÖÕâЩº¯ÊýµÄ´¦ÀíÆ÷ÖУ©£¬Òò´Ë£¬Èç¹ûÄãÔÚÐéÄ⻯ÄÚºË֮ǰδÆôÓÃËüÃÇ£¬Ôò¿ÉÄܵ¼Ö´íÎó
__vmx_vmwrite(SECONDARY_VM_EXEC_CONTROL, AdjustControls(CPU_BASED_CTL2_ENABLE_EPT | CPU_BASED_CTL2_RDTSCP | CPU_BASED_CTL2_ENABLE_INVPCID | CPU_BASED_CTL2_ENABLE_XSAVE_XRSTORS, MSR_IA32_VMX_PROCBASED_CTLS2));//A32_VMX_TRUE_PINBASED_CTLS ¼ÓÒ»¸ötrue¿ÉÒÔÓÅ»¯Ò»Ð©²»±ØÒªµÄµ¯³ö
/*
INVEPT ¨Cʹ´¦ÀíÆ÷ÖеĸßËÙ»º´æÀ©Õ¹Ò³±í£¨EPT£©Ó³ÉäÎÞЧ£¬ÒÔʹÐéÄâ»úÖеĵØַת»»ÓëפÁôÄÚ´æµÄEPTҳͬ²½¡£
INVVPID ¨Cʹ»ùÓÚÐéÄâ´¦ÀíÆ÷ID£¨VPID£©µÄµØַת»»µÄ»º´æÓ³ÉäÎÞЧ¡£
*/ //3.2Í˳ö¿ØÖÆ ¶ÔÓ¦µÄÊÇ24.7
__vmx_vmwrite(VM_EXIT_CONTROLS, AdjustControls(VM_EXIT_IA32E_MODE, MSR_IA32_VMX_EXIT_CTLS));//µÚÒ»¸öÊôÐÔÊǵھÅ룬Òâ˼һ¸ÅÊÇÍ˳öµÄʱºòÊÇ64λ
//3.3½øÈë¿ØÖÆ ¶ÔÓ¦24.8
__vmx_vmwrite(VM_ENTRY_CONTROLS, AdjustControls(VM_ENTRY_IA32E_MODE, MSR_IA32_VMX_ENTRY_CTLS));
// Set MSR Bitmaps
__vmx_vmwrite(MSR_BITMAP, vmState->MSRBitMapPhysical);
/*¶ÔÓÚCPU_BASED_VM_EXEC_CONTROL£¬ÎÒÃÇÉèÖÃÁËCPU_BASED_ACTIVATE_MSR_BITMAP£¬ÕâÑùÄã¾Í¿ÉÒÔÆôÓÃMSR BITMAP¹ýÂËÆ÷¡£´Ë×ֶεÄÉèÖÃÊÇÇ¿ÖÆÐԵģ¬ÓÉÓÚWindowsÔÚ¼òµ¥µÄÄÚºËÖ´Ðйý³ÌÖлá·ÃÎÊÐí¶àMSR£¬Òò´Ë£¬Èç¹û²»ÉèÖôË룬Ôò½«ÔÚÿ´ÎMSR·ÃÎÊʱÍ˳ö£¬½«´ó´ó½µµÍϵͳËٶȡ£*/
// Set up EPT
__vmx_vmwrite(EPT_POINTER, EptState->EptPointer.Flags);
//ÕâÀï»á½Ó¹Üint3
//__vmx_vmwrite(EXCEPTION_BITMAP, 0x9); // breakpoint 3nd bit ˳±ãÉèÖÃÁËÒ»¸ö0ºÅÒì³£
}
ULONG64 ddd = 0;
void LaunchVM(int ProcessorID, PEPTP EPTP) {
DbgPrint("Current thread is executing in %d th logical processor.\n", ProcessorID);
// Allocate stack for the VM Exit Handler.
UINT64 VMM_STACK_VA = ExAllocatePoolWithTag(NonPagedPool, VMM_STACK_SIZE*2, POOLTAG);
vmState[ProcessorID].VMM_Stack = VMM_STACK_VA;
if (vmState[ProcessorID].VMM_Stack == NULL){
//DbgPrint("Error in allocating VMM Stack.\n");
return;
}
RtlZeroMemory(vmState[ProcessorID].VMM_Stack, VMM_STACK_SIZE);//³õʼ»¯
// Allocate memory for MSRBitMap λͼ
vmState[ProcessorID].MSRBitMap = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, POOLTAG+1);// should be aligned
if (vmState[ProcessorID].MSRBitMap == NULL){
//DbgPrint("Error in allocating MSRBitMap.\n");
return;
}
RtlZeroMemory(vmState[ProcessorID].MSRBitMap, PAGE_SIZE);
vmState[ProcessorID].MSRBitMapPhysical = VirtualAddress_to_PhysicallAddress(vmState[ProcessorID].MSRBitMap);
// Clear the VMCS State
if (!Clear_VMCS_State(&vmState[ProcessorID])) {
//DbgPrint("Error in Clear the VMCS State.\n");
}
// Load VMCS (Set the Current VMCS)
if (!Load_VMCS(&vmState[ProcessorID])){
//DbgPrint("Error in Set the Current VMCS.\n");
}
//DbgPrint("Setting up VMCS.\n");
Setup_VMCS(&vmState[ProcessorID], EPTP);
//±£´æ¼Ä´æÆ÷
//Breakpoint();
AsmVmxSaveState();
__vmx_vmlaunch();//Èç¹ûÕâÀïÆô¶¯Á˾Ͳ»»á×ßÕâÀïÁË
//Èç¹ûÕâÀï³ö´íÔÚ30.4¿ÉÒÔ¿´µ½´íÎóµÄÔÒò
//Ö´ÐÐÖ®ºóÌøµ½»ØÀ´
AsmVmxRestore();
DbgPrint("I Just Test\n");
//ddd = 1;//fffff800`61394010
}
//³õʼ»¯ept
ULONG64 retrsp;
ULONG64 retrip22;
PVirtualMachineState vmState;
int ProcessorCounts;
PVirtualMachineState Initiate_VMX(void) {
if (!Is_VMX_Supported()) {
//DbgPrint("VMX is not supported in this machine !\n");
return NULL;
}
ProcessorCounts = KeQueryActiveProcessorCount(0);
vmState = ExAllocatePoolWithTag(NonPagedPool, sizeof(VirtualMachineState)* ProcessorCounts, POOLTAG);//¶àºËÇé¿ö
KAFFINITY kAffinityMask;
for (size_t i = 0; i < ProcessorCounts; i++) {
kAffinityMask = ipow(2, i);
KeSetSystemAffinityThread(kAffinityMask);
// do st here !
//DbgPrint("\t\tCurrent thread is executing in %d th logical processor. \n", i);
//intel23.8ÕÂ
//ÕâÀïÆäʵ»¹ÐèÒª¼ì²âcr0ÊÇ·ñ¿ªÆô±£»¤Ä£Ê½
Enable_VMX_Operation(); // Enabling VMX Operation Õâ¸öÀïÐèÒª·ÅÔÚÀïÃæ
//intel 31.5 wmxon
Allocate_VMXON_Region(&vmState[i]);//Õâ¸öÊǸøhostÓõÄ
//intel 31.6 wmcs
Allocate_VMCS_Region(&vmState[i]);//Õâ¸öÊǸøguestÓõÄ
KIRQL OldIrql;
OldIrql = KeRaiseIrqlToDpcLevel();
//ÕâÀï¾ÍÊÇÐèÒªÉèÖÃvmcs±È½ÏÂé·³
LaunchVM(i, NULL);
KeLowerIrql(OldIrql);
//ddd = 2; //×ßµ½ÕâÀï±»¶ÏÏÂÀ´È¥vmcs
DbgPrint("VMCS Region is allocated at ===============> %llx\n", vmState[i].VMCS_REGION);
//ddd = 3;
DbgPrint("VMXON Region is allocated at ===============> %llx\n", vmState[i].VMXON_REGION);
KeRevertToUserAffinityThread();
}
}
/* Check whether EPT features are present or not */
BOOLEAN EptCheckFeatures(){
IA32_VMX_EPT_VPID_CAP_REGISTER VpidRegister;
IA32_MTRR_DEF_TYPE_REGISTER MTRRDefType;
VpidRegister.Flags = __readmsr(MSR_IA32_VMX_EPT_VPID_CAP);
MTRRDefType.Flags = __readmsr(MSR_IA32_MTRR_DEF_TYPE);
if (!VpidRegister.PageWalkLength4 || !VpidRegister.MemoryTypeWriteBack || !VpidRegister.Pde2MbPages){
return FALSE;
}
if (!VpidRegister.AdvancedVmexitEptViolationsInformation){
DbgPrint("The processor doesn't report advanced VM-exit information for EPT violations\n");
}
if (!MTRRDefType.MtrrEnable){
DbgPrint("Mtrr Dynamic Ranges not supported\n");
return FALSE;
}
DbgPrint(" *** All EPT features are present *** \n");
return TRUE;
}
/* Build MTRR Map of current physical addresses */
BOOLEAN EptBuildMtrrMap(){
IA32_MTRR_CAPABILITIES_REGISTER MTRRCap;
IA32_MTRR_PHYSBASE_REGISTER CurrentPhysBase;
IA32_MTRR_PHYSMASK_REGISTER CurrentPhysMask;
PMTRR_RANGE_DESCRIPTOR Descriptor;
ULONG CurrentRegister;
ULONG NumberOfBitsInMask;
//¶ÁÈ¡IA32_MTRRCAP MSR µÄVCNTÖµ£¨0xFE£©£¬¸Ãֵȷ¶¨Á˿ɱäMTRRµÄÊýÁ¿£¨ÇøÓòÊý£©
MTRRCap.Flags = __readmsr(MSR_IA32_MTRR_CAPABILITIES);
for (CurrentRegister = 0; CurrentRegister < MTRRCap.VariableRangeCount; CurrentRegister++){
// For each dynamic register pair
CurrentPhysBase.Flags = __readmsr(MSR_IA32_MTRR_PHYSBASE0 + (CurrentRegister * 2));
CurrentPhysMask.Flags = __readmsr(MSR_IA32_MTRR_PHYSMASK0 + (CurrentRegister * 2));
// Is the range enabled?
if (CurrentPhysMask.Valid){
// We only need to read these once because the ISA dictates that MTRRs are to be synchronized between all processors
// during BIOS initialization.
Descriptor = &EptState->MemoryRanges[EptState->NumberOfEnabledMemoryRanges++];
// Calculate the base address in bytesÒª»ùÓÚMSR¼ÆËãÆðʼµØÖ·ºÍ½áÊøµØÖ·£¨ÎïÀí£©¡£
Descriptor->PhysicalBaseAddress = CurrentPhysBase.PageFrameNumber * PAGE_SIZE;
// Calculate the total size of the range
// The lowest bit of the mask that is set to 1 specifies the size of the range
_BitScanForward64(&NumberOfBitsInMask, CurrentPhysMask.PageFrameNumber * PAGE_SIZE);
// Size of the range in bytes + Base Address
Descriptor->PhysicalEndAddress = Descriptor->PhysicalBaseAddress + ((1ULL << NumberOfBitsInMask) - 1ULL);
// Memory Type (cacheability attributes)×îºó£¬¶ÁÈ¡ÓÉBIOS»ò²Ù×÷ϵͳÉèÖõĻº´æ²ßÂÔ¡£
Descriptor->MemoryType = (UCHAR)CurrentPhysBase.Type;
if (Descriptor->MemoryType == MEMORY_TYPE_WRITE_BACK){
/* This is already our default, so no need to store this range.
* Simply 'free' the range we just wrote. */
EptState->NumberOfEnabledMemoryRanges--;
}
DbgPrint("MTRR Range: Base=0x%llx End=0x%llx Type=0x%x\n", Descriptor->PhysicalBaseAddress, Descriptor->PhysicalEndAddress, Descriptor->MemoryType);
}
}
DbgPrint("Total MTRR Ranges Committed: %d\n", EptState->NumberOfEnabledMemoryRanges);
return TRUE;
}
/* Set up PML2 Entries */
VOID EptSetupPML2Entry(PEPT_PML2_ENTRY NewEntry, SIZE_T PageFrameNumber){
SIZE_T AddressOfPage;
SIZE_T CurrentMtrrRange;
SIZE_T TargetMemoryType;
/*
Each of the 512 collections of 512 PML2 entries is setup here.
This will, in total, identity map every physical address from 0x0 to physical address 0x8000000000 (512GB of memory)
((EntryGroupIndex * VMM_EPT_PML2E_COUNT) + EntryIndex) * 2MB is the actual physical address we're mapping
*/
NewEntry->PageFrameNumber = PageFrameNumber;
// Size of 2MB page * PageFrameNumber == AddressOfPage (physical memory).
AddressOfPage = PageFrameNumber * SIZE_2_MB;
/* To be safe, we will map the first page as UC as to not bring up any kind of undefined behavior from the
fixed MTRR section which we are not formally recognizing (typically there is MMIO memory in the first MB).
I suggest reading up on the fixed MTRR section of the manual to see why the first entry is likely going to need to be UC.
*/
//Èç¹ûûÓÐÒ»¸öÃèÊö£¬ÄÇôÎÒÃÇÑ¡ÔñWrite-Back£¨MEMORY_TYPE_WRITE_BACK£©×÷ΪĬÈϵĻº´æ²ßÂÔ¡£·ñÔò£¬ÎÒÃDZØÐëÑ¡ÔñMTRRÖÐʹÓõĻº´æ²ßÂÔ¡£
if (PageFrameNumber == 0){
NewEntry->MemoryType = MEMORY_TYPE_UNCACHEABLE;
return;
}
// Default memory type is always WB for performance.
TargetMemoryType = MEMORY_TYPE_WRITE_BACK;
// For each MTRR range
for (CurrentMtrrRange = 0; CurrentMtrrRange < EptState->NumberOfEnabledMemoryRanges; CurrentMtrrRange++){
// If this page's address is below or equal to the max physical address of the range
if (AddressOfPage <= EptState->MemoryRanges[CurrentMtrrRange].PhysicalEndAddress){
// And this page's last address is above or equal to the base physical address of the range
if ((AddressOfPage + SIZE_2_MB - 1) >= EptState->MemoryRanges[CurrentMtrrRange].PhysicalBaseAddress){
/* If we're here, this page fell within one of the ranges specified by the variable MTRRs
Therefore, we must mark this page as the same cache type exposed by the MTRR
*/
TargetMemoryType = EptState->MemoryRanges[CurrentMtrrRange].MemoryType;
// LogInfo("0x%X> Range=%llX -> %llX | Begin=%llX End=%llX", PageFrameNumber, AddressOfPage, AddressOfPage + SIZE_2_MB - 1, EptState->MemoryRanges[CurrentMtrrRange].PhysicalBaseAddress, EptState->MemoryRanges[CurrentMtrrRange].PhysicalEndAddress);
// 11.11.4.1 MTRR Precedences
if (TargetMemoryType == MEMORY_TYPE_UNCACHEABLE){
// If this is going to be marked uncacheable, then we stop the search as UC always takes precedent.
break;
}
}
}
}
// Finally, commit the memory type to the entry.
NewEntry->MemoryType = TargetMemoryType;
}
/* Allocates page maps and create identity page table */
PVMM_EPT_PAGE_TABLE EptAllocateAndCreateIdentityPageTable(){
PVMM_EPT_PAGE_TABLE PageTable;
EPT_PML3_POINTER RWXTemplate;
EPT_PML2_ENTRY PML2EntryTemplate;
SIZE_T EntryGroupIndex;
SIZE_T EntryIndex;
// Allocate all paging structures as 4KB aligned pages
PHYSICAL_ADDRESS MaxSize;
PVOID Output;
// Allocate address anywhere in the OS's memory space
MaxSize.QuadPart = MAXULONG64;
//Õâ¸ö¿ÉÒÔÇå³ýºó3λ
PageTable = MmAllocateContiguousMemory((sizeof(VMM_EPT_PAGE_TABLE) / PAGE_SIZE) * PAGE_SIZE, MaxSize);
if (PageTable == NULL){
DbgPrint("Failed to allocate memory for PageTable\n");
return NULL;
}
// Zero out all entries to ensure all unused entries are marked Not Present
RtlZeroMemory(PageTable, sizeof(VMM_EPT_PAGE_TABLE));
// Initialize the dynamic split list which holds all dynamic page splits
InitializeListHead(&PageTable->DynamicSplitList);
//¶ÔÓÚ³õʼ»¯½×¶Î£¬ÎÒÃǽ«ËùÓÐEPT±íÉϵÄËùÓзÃÎÊÉèÖÃΪ1£¨°üÀ¨¶ÁÈ¡·ÃÎÊ£¬Ð´Èë·ÃÎÊ£¬Ö´ÐзÃÎÊ£©
// Mark the first 512GB PML4 entry as present, which allows us to manage up to 512GB of discrete paging structures.
PageTable->PML4[0].PageFrameNumber = (SIZE_T)VirtualAddress_to_PhysicallAddress(&PageTable->PML3[0]) / PAGE_SIZE;
PageTable->PML4[0].ReadAccess = 1;
PageTable->PML4[0].WriteAccess = 1;
PageTable->PML4[0].ExecuteAccess = 1;
/* Now mark each 1GB PML3 entry as RWX and map each to their PML2 entry */
// Ensure stack memory is cleared
RWXTemplate.Flags = 0;
// Set up one 'template' RWX PML3 entry and copy it into each of the 512 PML3 entries
// Using the same method as SimpleVisor for copying each entry using intrinsics.
RWXTemplate.ReadAccess = 1;
RWXTemplate.WriteAccess = 1;
RWXTemplate.ExecuteAccess = 1;
//ÓÃ__stosqÁ¬ÐøÓôËÄ£°åÌî³ä±í¸ñ¡£__stosqÉú³É´æ´¢×Ö·û´®Ö¸Árep stosq£©Òâζ×ÅÁ¬Ðø£¨ÔÚÎÒÃǵÄʾÀýÖÐΪVMM_EPT_PML3E_COUNT = 512£©ÔÚÌض¨Î»Öø´ÖÆijЩÄÚÈÝ
// Copy the template into each of the 512 PML3 entry slots
__stosq((SIZE_T*)&PageTable->PML3[0], RWXTemplate.Flags, VMM_EPT_PML3E_COUNT);
// For each of the 512 PML3 entries
for (EntryIndex = 0; EntryIndex < VMM_EPT_PML3E_COUNT; EntryIndex++){
// Map the 1GB PML3 entry to 512 PML2 (2MB) entries to describe each large page.
// NOTE: We do *not* manage any PML1 (4096 byte) entries and do not allocate them.
//ÕâÀïÊÇ´¦Àí»º´æ»úÖÆmtrr
PageTable->PML3[EntryIndex].PageFrameNumber = (SIZE_T)VirtualAddress_to_PhysicallAddress(&PageTable->PML2[EntryIndex][0]) / PAGE_SIZE;
}//Ìîдpml3µÄÎïÀíµØÖ·
PML2EntryTemplate.Flags = 0;
// All PML2 entries will be RWX and 'present'
PML2EntryTemplate.WriteAccess = 1;
PML2EntryTemplate.ReadAccess = 1;
PML2EntryTemplate.ExecuteAccess = 1;
// We are using 2MB large pages, so we must mark this 1 here.
PML2EntryTemplate.LargePage = 1;
/* For each collection of 512 PML2 entries (512 collections * 512 entries per collection), mark it RWX using the same template above.
This marks the entries as "Present" regardless of if the actual system has memory at this region or not. We will cause a fault in our
EPT handler if the guest access a page outside a usable range, despite the EPT frame being present here.
*/
__stosq((SIZE_T*)&PageTable->PML2[0], PML2EntryTemplate.Flags, VMM_EPT_PML3E_COUNT * VMM_EPT_PML2E_COUNT);
// For each of the 512 collections of 512 2MB PML2 entries
for (EntryGroupIndex = 0; EntryGroupIndex < VMM_EPT_PML3E_COUNT; EntryGroupIndex++){
// For each 2MB PML2 entry in the collection
for (EntryIndex = 0; EntryIndex < VMM_EPT_PML2E_COUNT; EntryIndex++){
// Setup the memory type and frame number of the PML2 entry.
EptSetupPML2Entry(&PageTable->PML2[EntryGroupIndex][EntryIndex], (EntryGroupIndex * VMM_EPT_PML2E_COUNT) + EntryIndex);
}
}
return PageTable;
}
/*
Initialize EPT for an individual logical processor.
Creates an identity mapped page table and sets up an EPTP to be applied to the VMCS later.
*/
BOOLEAN EptLogicalProcessorInitialize(){
PVMM_EPT_PAGE_TABLE PageTable;
EPTP EPTP;
/* Allocate the identity mapped page table*/
PageTable = EptAllocateAndCreateIdentityPageTable();//¹¹½¨eptµÄÎïÀíÒ³
if (!PageTable){
DbgPrint("Unable to allocate memory for EPT\n");
return FALSE;
}
DbgPrint("=================================================================allocate Ok!\n");
// Virtual address to the page table to keep track of it for later freeing
EptState->EptPageTable = PageTable;
EPTP.Flags = 0;
// For performance, we let the processor know it can cache the EPT.
EPTP.MemoryType = MEMORY_TYPE_WRITE_BACK;
// We are not utilizing the 'access' and 'dirty' flag features.
EPTP.EnableAccessAndDirtyFlags = FALSE;
/*
Bits 5:3 (1 less than the EPT page-walk length) must be 3, indicating an EPT page-walk length of 4;
see Section 28.2.2
*/
EPTP.PageWalkLength = 3;
// The physical page number of the page table we will be using
EPTP.PageFrameNumber = (SIZE_T)VirtualAddress_to_PhysicallAddress(&PageTable->PML4) / PAGE_SIZE;
// We will write the EPTP to the VMCS later
EptState->EptPointer = EPTP;
/ Example Test /
//EptPageHook(ExAllocatePoolWithTag, FALSE);
return TRUE;
}
void Initialize_EPTP() {
// Allocate global variable to hold Ept State
EptState = ExAllocatePoolWithTag(NonPagedPool, sizeof(EPT_STATE), POOLTAG);
if (!EptState) {
DbgPrint("Insufficient memory");
return FALSE;
}
// Zero memory
RtlZeroMemory(EptState, sizeof(EPT_STATE));
// Check whether EPT is supported or not
if (!EptCheckFeatures()) {//¼ì²âÊÇ·ñÖ§³Öeptp
DbgPrint("Your processor doesn't support all EPT features");
return FALSE;
}
else {
// Our processor supports EPT, now let's build MTRR
DbgPrint("Your processor supports all EPT features");
// Build MTRR Map
if (!EptBuildMtrrMap()) {//£¡mtrr¡±ÃüÁîÏÔʾMTRRÇøÓò¼°Æ仺´æ²ßÂÔ
DbgPrint("Could not build Mtrr memory map");
return FALSE;
}
DbgPrint("Mtrr memory map built successfully");
}
//Õâ¸ö²ÅÊǹ¹½¨ept
if (!EptLogicalProcessorInitialize()) {
// There were some errors in EptLogicalProcessorInitialize
return FALSE;
}
}
//ÏÂÃæÊǹØÓÚhookµÄ
/* Converts Physical Address to Virtual Address */
UINT64 PhysicalAddressToVirtualAddress(UINT64 PhysicalAddress){
PHYSICAL_ADDRESS PhysicalAddr;
PhysicalAddr.QuadPart = PhysicalAddress;
return MmGetVirtualForPhysical(PhysicalAddr);
}
/* Get the PML1 entry for this physical address if the page is split. Return NULL if the address is invalid or the page wasn't already split. */
PEPT_PML1_ENTRY EptGetPml1Entry(PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress){
SIZE_T Directory, DirectoryPointer, PML4Entry;
PEPT_PML2_ENTRY PML2;
PEPT_PML1_ENTRY PML1;
PEPT_PML2_POINTER PML2Pointer;
Directory = ADDRMASK_EPT_PML2_INDEX(PhysicalAddress);
DirectoryPointer = ADDRMASK_EPT_PML3_INDEX(PhysicalAddress);
PML4Entry = ADDRMASK_EPT_PML4_INDEX(PhysicalAddress);
// Addresses above 512GB are invalid because it is > physical address bus width
if (PML4Entry > 0){
return NULL;
}
PML2 = &EptPageTable->PML2[DirectoryPointer][Directory];
// Check to ensure the page is split
if (PML2->LargePage){
return NULL;
}
// Conversion to get the right PageFrameNumber.
// These pointers occupy the same place in the table and are directly convertable.
PML2Pointer = (PEPT_PML2_POINTER)PML2;
// If it is, translate to the PML1 pointer
PML1 = (PEPT_PML1_ENTRY)PhysicalAddressToVirtualAddress((PVOID)(PML2Pointer->PageFrameNumber * PAGE_SIZE));
if (!PML1){
return NULL;
}
// Index into PML1 for that address
PML1 = &PML1[ADDRMASK_EPT_PML1_INDEX(PhysicalAddress)];
return PML1;
}
/* Get the PML2 entry for this physical address. */
PEPT_PML2_ENTRY EptGetPml2Entry(PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress){
SIZE_T Directory, DirectoryPointer, PML4Entry;
PEPT_PML2_ENTRY PML2;
Directory = ADDRMASK_EPT_PML2_INDEX(PhysicalAddress);
DirectoryPointer = ADDRMASK_EPT_PML3_INDEX(PhysicalAddress);
PML4Entry = ADDRMASK_EPT_PML4_INDEX(PhysicalAddress);
// Addresses above 512GB are invalid because it is > physical address bus width
if (PML4Entry > 0){
return NULL;
}
PML2 = &EptPageTable->PML2[DirectoryPointer][Directory];
return PML2;
}
/* Split 2MB (LargePage) into 4kb pages */
BOOLEAN EptSplitLargePage(PVMM_EPT_PAGE_TABLE EptPageTable, PVOID PreAllocatedBuffer, SIZE_T PhysicalAddress, ULONG CoreIndex){
PVMM_EPT_DYNAMIC_SPLIT NewSplit;
EPT_PML1_ENTRY EntryTemplate;
SIZE_T EntryIndex;
PEPT_PML2_ENTRY TargetEntry;
EPT_PML2_POINTER NewPointer;
// Find the PML2 entry that's currently used
TargetEntry = EptGetPml2Entry(EptPageTable, PhysicalAddress);
if (!TargetEntry){
DbgPrint("An invalid physical address passed");
return FALSE;
}
// If this large page is not marked a large page, that means it's a pointer already.
// That page is therefore already split.
if (!TargetEntry->LargePage){
return TRUE;
}
// Allocate the PML1 entries
NewSplit = (PVMM_EPT_DYNAMIC_SPLIT)PreAllocatedBuffer;
if (!NewSplit){
DbgPrint("Failed to allocate dynamic split memory");
return FALSE;
}
RtlZeroMemory(NewSplit, sizeof(VMM_EPT_DYNAMIC_SPLIT));
// Point back to the entry in the dynamic split for easy reference for which entry that dynamic split is for.
NewSplit->Entry = TargetEntry;
// Make a template for RWX
EntryTemplate.Flags = 0;
EntryTemplate.ReadAccess = 1;
EntryTemplate.WriteAccess = 1;
EntryTemplate.ExecuteAccess = 1;
// Copy the template into all the PML1 entries
__stosq((SIZE_T*)&NewSplit->PML1[0], EntryTemplate.Flags, VMM_EPT_PML1E_COUNT);
// Set the page frame numbers for identity mapping.
for (EntryIndex = 0; EntryIndex < VMM_EPT_PML1E_COUNT; EntryIndex++){
// Convert the 2MB page frame number to the 4096 page entry number plus the offset into the frame.
NewSplit->PML1[EntryIndex].PageFrameNumber = ((TargetEntry->PageFrameNumber * SIZE_2_MB) / PAGE_SIZE) + EntryIndex;
}
// Allocate a new pointer which will replace the 2MB entry with a pointer to 512 4096 byte entries.
NewPointer.Flags = 0;
NewPointer.WriteAccess = 1;
NewPointer.ReadAccess = 1;
NewPointer.ExecuteAccess = 1;
NewPointer.PageFrameNumber = (SIZE_T)VirtualAddress_to_PhysicallAddress(&NewSplit->PML1[0]) / PAGE_SIZE;
// Add our allocation to the linked list of dynamic splits for later deallocation
InsertHeadList(&EptPageTable->DynamicSplitList, &NewSplit->DynamicSplitList);
// Now, replace the entry in the page table with our new split pointer.
RtlCopyMemory(TargetEntry, &NewPointer, sizeof(NewPointer));
return TRUE;
}
PVOID TargetBuffer;
PVOID fakepage;//ÓÃÀ´ÊÍ·Å
//ÕâÀï×öÒ»¸ö¶Áд·ÖÀë
BOOLEAN EptPageHook(PVOID TargetFunc) {
EPT_PML1_ENTRY OriginalEntry;
INVEPT_DESCRIPTOR Descriptor;
SIZE_T PhysicalAddress;
PVOID VirtualTarget;
PEPT_PML1_ENTRY TargetPage;
ULONG LogicalCoreIndex;
// Check whether we are in VMX Root Mode or Not
LogicalCoreIndex = KeGetCurrentProcessorIndex();
/* Translate the page from a physical address to virtual so we can read its memory.
* This function will return NULL if the physical address was not already mapped in
* virtual memory.
*/
VirtualTarget = PAGE_ALIGN(TargetFunc);
PhysicalAddress = (SIZE_T)VirtualAddress_to_PhysicallAddress(VirtualTarget);
if (!PhysicalAddress) {
DbgPrint("Target address could not be mapped to physical memory");
return FALSE;
}
TargetBuffer = ExAllocatePoolWithTag(NonPagedPool, sizeof(VMM_EPT_DYNAMIC_SPLIT), POOLTAG);
// Zero out the memory
RtlZeroMemory(TargetBuffer, sizeof(VMM_EPT_DYNAMIC_SPLIT));
if (!EptSplitLargePage(EptState->EptPageTable, TargetBuffer, PhysicalAddress, LogicalCoreIndex)){
DbgPrint("Could not split page for the address : 0x%llx", PhysicalAddress);
return FALSE;
}
// Pointer to the page entry in the page table.
TargetPage = EptGetPml1Entry(EptState->EptPageTable, PhysicalAddress);
// Ensure the target is valid.
if (!TargetPage){
DbgPrint("Failed to get PML1 entry of the target address");
return FALSE;
}
//ÕâÀïÔÚÉêÇëÒ»¸ö¼ÙÒ³Ãæ
fakepage= ExAllocatePoolWithTag(NonPagedPool, 0x1000, POOLTAG);//4k ÔÚת³ÉÎïÀíÒ³
RtlZeroMemory(fakepage, 0x1000);
fakepagephy.Flags = VirtualAddress_to_PhysicallAddress(fakepage);
fakepagephy.ReadAccess = 1;
fakepagephy.WriteAccess = 1;
fakepagephy.ExecuteAccess = 0;
// Save the original permissions of the page
OriginalEntry = *TargetPage;
OrigAddress = *TargetPage;//¼Ç¼ÏÂpteµÄÖµ
/*
* Lastly, mark the entry in the table as no execute. This will cause the next time that an instruction is
* fetched from this page to cause an EPT violation exit. This will allow us to swap in the fake page with our
* hook.
*/
OriginalEntry.ReadAccess = 0;
OriginalEntry.WriteAccess = 0;
OriginalEntry.ExecuteAccess = 1;
// Apply the hook to EPT
TargetPage->Flags = OriginalEntry.Flags;
// Invalidate the entry in the TLB caches so it will not conflict with the actual paging structure.
// Uncomment in order to invalidate all the contexts
//Descriptor.EptPointer = EptState->EptPointer.Flags;
//Descriptor.Reserved = 0;
//AsmInvept(1, &Descriptor);
//AsmInvept(2, 0);
//1´ú±íµÄÊǵ¥¸ötlbÎÞЧ µ¥Ò»ÉÏÏÂÎÄÒâζ×ÅÄú½«»ùÓÚµ¥¸öEPTPʹËùÓÐEPTÅÉÉúµÄת»»ÎÞЧ£¨¼ò¶øÑÔÖ®£º¶ÔÓÚÂß¼ºËÐÄÖеĵ¥¸öVM£©¡£
//2´ú±íµÄÊÇËùÓÐ È«ÉÏÏÂÎÄÒâζ×ÅÄúʹËùÓÐEPTÅÉÉúµÄ·ÒëÎÞЧ¡££¨¶ÔÓÚÿ¸öVM£©¡£
return TRUE;
}
//¹Ø±Õvt
UINT64 PhysicalAddress_to_VirtualAddress(UINT64 pa) {
PHYSICAL_ADDRESS PhysicalAddr;
PhysicalAddr.QuadPart = pa;
return MmGetVirtualForPhysical(PhysicalAddr);
}
void Terminate_VMX(void) {
DbgPrint("\nTerminating VMX...\n");
KAFFINITY kAffinityMask;
for (size_t i = 0; i < ProcessorCounts; i++) {
kAffinityMask = ipow(2, i);
KeSetSystemAffinityThread(kAffinityMask);
DbgPrint("Uload Current thread is executing in %d th logical processor.\n", i);
flagModify = 1;
//Ò»ÑùµÄÏȱ£´æ
AsmVmxSaveState();
Wmcall();
//Ö´ÐÐÖ®ºóÌøµ½»ØÀ´
AsmVmxRestore();
__vmx_off();//ÕâÀï²»ÐèÒªÔÚ¹ØÒ»´Î
Disable_VMX_Operation();
ExFreePoolWithTag(vmState[i].VMM_Stack, POOLTAG);
ExFreePoolWithTag(vmState[i].MSRBitMap, POOLTAG+1);
MmFreeContiguousMemory(PhysicalAddress_to_VirtualAddress(vmState[i].VMXON_REGION));
MmFreeContiguousMemory(PhysicalAddress_to_VirtualAddress(vmState[i].VMCS_REGION));
KeRevertToUserAffinityThread();
}
ExFreePoolWithTag(vmState, POOLTAG);//ÊÍ·ÅÄÚ´æ
//ÊÍ·Åept
ExFreePoolWithTag(TargetBuffer, POOLTAG);
MmFreeContiguousMemory(EptState->EptPageTable);
ExFreePoolWithTag(EptState, POOLTAG);
ExFreePoolWithTag(fakepage, POOLTAG);
}
VOID DrvUnload(PDRIVER_OBJECT DriverObject) {
Terminate_VMX();
//27.5.2дµÄ¾ÍÊÇÐèÒª»Ö¸´dgtrºÍidtrµÄ´óС
DbgPrint("Uload OK\n");
}
NTSTATUS DriverEntry(PDRIVER_OBJECT pDriverObject, PUNICODE_STRING pRegistryPath) {
pDriverObject->DriverUnload = DrvUnload;
Initialize_EPTP();//³õʼ»¯ept
if(EptState->EptPageTable !=NULL)
EptPageHook(ExAllocatePoolWithTag);//ÕâÀï»á¶ÔÕâ¸öº¯ÊýµÄÒ³µÄÖ´ÐÐȨÏÞÉèÖÃΪ0
//³õʼ»¯
//DbgPrint("===========================================================================================address %llX", EptState->EptPageTable);
Initiate_VMX();
//Æô¶¯
// Launching VM for Test (in the 0th virtual processor)
return STATUS_SUCCESS;
}
其次是asm文件
PUBLIC Enable_VMX_Operation
PUBLIC Breakpoint
PUBLIC STI_Instruction
PUBLIC CLI_Instruction
PUBLIC INVEPT_Instruction
PUBLIC GetCs
PUBLIC GetDs
PUBLIC GetEs
PUBLIC GetSs
PUBLIC GetFs
PUBLIC GetGs
PUBLIC GetLdtr
PUBLIC GetTr
PUBLIC Get_GDT_Base
PUBLIC Get_IDT_Base
PUBLIC Get_GDT_Limit
PUBLIC Get_IDT_Limit
PUBLIC Get_RFLAGS
PUBLIC Restore_To_VMXOFF_State
PUBLIC Save_VMXOFF_State
EXTERN VmxVmexitHandler:PROC
EXTERN g_StackPointerForReturning:QWORD
EXTERN g_BasePointerForReturning:QWORD
EXTERN GuestHandler:PROC
EXTERN retrsp:QWORD
EXTERN retrip:QWORD
EXTERN retrip22:QWORD
.code _text
;------------------------------------------------------------------------
VMX_ERROR_CODE_SUCCESS = 0
VMX_ERROR_CODE_FAILED_WITH_STATUS = 1
VMX_ERROR_CODE_FAILED = 2
;------------------------------------------------------------------------
Enable_VMX_Operation PROC PUBLIC
push rax ; Save the state
xor rax,rax ; Clear the RAX
mov rax,cr4
or rax,02000h ; Set the 14th bit
mov cr4,rax
pop rax ; Restore the state
ret
Enable_VMX_Operation ENDP
;------------------------------------------------------------------------
Breakpoint PROC PUBLIC
int 3
ret
Breakpoint ENDP
;------------------------------------------------------------------------
STI_Instruction PROC PUBLIC
STI
ret
STI_Instruction ENDP
;------------------------------------------------------------------------
CLI_Instruction PROC PUBLIC
CLI
ret
CLI_Instruction ENDP
;------------------------------------------------------------------------
Restore_To_VMXOFF_State PROC PUBLIC
VMXOFF ; turn it off before existing
MOV rsp, g_StackPointerForReturning
MOV rbp, g_BasePointerForReturning
; make rsp point to a correct return point
ADD rsp,8
; return True
xor rax,rax
mov rax,1
; return section
mov rbx, [rsp+28h+8h]
mov rsi, [rsp+28h+10h]
add rsp, 020h
pop rdi
ret
Restore_To_VMXOFF_State ENDP
;------------------------------------------------------------------------
Save_VMXOFF_State PROC PUBLIC
MOV g_StackPointerForReturning,rsp
MOV g_BasePointerForReturning,rbp
ret
Save_VMXOFF_State ENDP
;------------------------------------------------------------------------
INVEPT_Instruction PROC PUBLIC
invept rcx, oword ptr [rdx]
jz @jz
jc @jc
xor rax, rax
ret
@jz: mov rax, VMX_ERROR_CODE_FAILED_WITH_STATUS
ret
@jc: mov rax, VMX_ERROR_CODE_FAILED
ret
INVEPT_Instruction ENDP
;------------------------------------------------------------------------
Get_GDT_Base PROC
LOCAL gdtr[10]:BYTE
sgdt gdtr
mov rax, QWORD PTR gdtr[2]
ret
Get_GDT_Base ENDP
;------------------------------------------------------------------------
GetCs PROC
mov rax, cs
ret
GetCs ENDP
;------------------------------------------------------------------------
GetDs PROC
mov rax, ds
ret
GetDs ENDP
;------------------------------------------------------------------------
GetEs PROC
mov rax, es
ret
GetEs ENDP
;------------------------------------------------------------------------
GetSs PROC
mov rax, ss
ret
GetSs ENDP
;------------------------------------------------------------------------
GetFs PROC
mov rax, fs
ret
GetFs ENDP
;------------------------------------------------------------------------
GetGs PROC
mov rax, gs
ret
GetGs ENDP
;------------------------------------------------------------------------
GetLdtr PROC
sldt rax
ret
GetLdtr ENDP
;------------------------------------------------------------------------
GetTr PROC
str rax
ret
GetTr ENDP
;------------------------------------------------------------------------
Get_IDT_Base PROC
LOCAL idtr[10]:BYTE
sidt idtr
mov rax, QWORD PTR idtr[2]
ret
Get_IDT_Base ENDP
;------------------------------------------------------------------------
Get_GDT_Limit PROC
LOCAL gdtr[10]:BYTE
sgdt gdtr
mov ax, WORD PTR gdtr[0]
ret
Get_GDT_Limit ENDP
;------------------------------------------------------------------------
Get_IDT_Limit PROC
LOCAL idtr[10]:BYTE
sidt idtr
mov ax, WORD PTR idtr[0]
ret
Get_IDT_Limit ENDP
;------------------------------------------------------------------------
Get_RFLAGS PROC
pushfq
pop rax
ret
Get_RFLAGS ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
Disable_VMX_Operation PROC PUBLIC
push rax ; Save the state
xor rax,rax ; Clear the RAX
mov rax,cr4
and rax,0ffffffffffffdfffh ; Set the 14th bit
mov cr4,rax
pop rax ; Restore the state
ret
Disable_VMX_Operation ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
AsmVmexitHandler PROC
;int 3
;push 0 ; we might be in an unaligned stack state, so the memory before stack might cause
; irql less or equal as it doesn't exist, so we just put some extra space avoid
; these kind of erros
pushfq
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rdi
push rsi
push rbp
push rbp ; rsp
push rbx
push rdx
push rcx
push rax
mov rcx, rsp ; Fast call argument to PGUEST_REGS
sub rsp, 28h ; Free some space for Shadow Section
call VmxVmexitHandler
add rsp, 28h ; Restore the state
;int 3
pop rax
pop rcx
pop rdx
pop rbx
pop rbp ; rsp
pop rbp
pop rsi
pop rdi
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
;push Rflags
popfq
;mov rsp,gust_rsp
;¼ÌÐøÖ´ÐÐ
vmresume
sub rsp, 0100h ; to avoid error in future functions
;mov rsp,gust_rsp;ÕâÀïÐèÒª×îºó²Å¸ø²»È»¶ÑÕ»»á·¢Éú±ä»¯
;int 3
;jmp gust_rip
AsmVmexitHandler ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
AsmVmxRestoreState PROC
;VMCALL
;int 3
mov rax, retfc
jmp rax
AsmVmxRestoreState ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
AsmGetGdtLimit PROC
LOCAL gdtr[10]:BYTE
sgdt gdtr
mov ax, WORD PTR gdtr[0]
ret
AsmGetGdtLimit ENDP
;------------------------------------------------------------------------
AsmGetIdtLimit PROC
LOCAL idtr[10]:BYTE
sidt idtr
mov ax, WORD PTR idtr[0]
ret
AsmGetIdtLimit ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
AsmGetGdtBase PROC
LOCAL gdtr[10]:BYTE
sgdt gdtr
mov rax, QWORD PTR gdtr[2]
ret
AsmGetGdtBase ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
AsmGetRflags PROC
pushfq
pop rax
ret
AsmGetRflags ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
AsmVmxSaveState PROC
;ÏÈÌáÉý¶ÑÕ»²»È»ÕâÀïµÄÖµ»á±»¸²¸Ç
sub rsp,5000h
pushfq
push rax
push rcx
push rdx
push rbx
push rbp
push rsi
push rdi
push r8
push r9
push r10
push r11
push r12
push r13
push r14
push r15
mov retrsp,rsp
add rsp,5080h ;ÕâÀïÒª¼ÓÉÏ֮ǰµÄ200h
mov rax,[rsp]
mov retrip ,rax
ret
AsmVmxSaveState ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
AsmVmxRestore PROC
pop retrip22
;sub rsp,5080h
pop r15
pop r14
pop r13
pop r12
pop r11
pop r10
pop r9
pop r8
pop rdi
pop rsi
pop rbp
pop rbx
pop rdx
pop rcx
pop rax
popfq ; restore r/eflags
add rsp,5008h ;»¹Ô»ØÈ¥
jmp retrip22
AsmVmxRestore ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
retfc PROC
mov rax,retrip
add rax,3
mov retrip,rax
mov rsp,retrsp
jmp retrip
retfc ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
GetCpuIdInfo PROC
push rbp
mov rbp, rsp
push rbx
push rsi
mov [rbp+18h], rdx
mov eax, ecx
cpuid
mov rsi, [rbp+18h]
mov [rsi], eax
mov [r8], ebx
mov [r9], ecx
mov rsi, [rbp+30h]
mov [rsi], edx
pop rsi
pop rbx
mov rsp, rbp
pop rbp
ret
GetCpuIdInfo ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
AsmInvvpid PROC
invvpid rcx, oword ptr [rdx]
jz @jz
jc @jc
xor rax, rax
ret
@jz: mov rax, VMX_ERROR_CODE_FAILED_WITH_STATUS
ret
@jc: mov rax, VMX_ERROR_CODE_FAILED
ret
AsmInvvpid ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
MSRRead PROC
rdmsr ; MSR[ecx] --> edx:eax
shl rdx, 32
or rax, rdx
ret
MSRRead ENDP
;------------------------------------------------------------------------
MSRWrite PROC
mov rax, rdx
shr rdx, 32
wrmsr
ret
MSRWrite ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
Wmcall PROC
vmcall
Wmcall ENDP
;------------------------------------------------------------------------
;------------------------------------------------------------------------
; Error codes :
VMX_ERROR_CODE_SUCCESS = 0
VMX_ERROR_CODE_FAILED_WITH_STATUS = 1
VMX_ERROR_CODE_FAILED = 2
;------------------------------------------------------------------------
AsmInvept PROC PUBLIC
invept rcx, oword ptr [rdx]
jz @jz
jc @jc
xor rax, rax
ret
@jz:
mov rax, VMX_ERROR_CODE_FAILED_WITH_STATUS
ret
@jc:
mov rax, VMX_ERROR_CODE_FAILED
ret
AsmInvept ENDP
;------------------------------------------------------------------------
END
comm.h
#pragma once
#include <ntddk.h>
#define VPID_TAG 0x1
typedef struct _VirtualMachineState
{
UINT64 VMXON_REGION; // VMXON region
UINT64 VMCS_REGION; // VMCS region
UINT64 EPTP; // Extended-Page-Table Pointer
UINT64 VMM_Stack; // Stack for VMM in VM-Exit State
UINT64 MSRBitMap; // MSRBitMap Virtual Address
UINT64 MSRBitMapPhysical; // MSRBitMap Physical Address
} VirtualMachineState, *PVirtualMachineState;
extern PVirtualMachineState vmState;
extern int ProcessorCounts;
#define POOLTAG 0x48564653 // [H]yper[V]isor [F]rom [S]cratch (HVFS)
#define VMM_STACK_SIZE 0x8000
#define RPL_MASK 3
int ipow(int base, int exp) {
int result = 1;
for (;;)
{
if (exp & 1)
{
result *= base;
}
exp >>= 1;
if (!exp)
{
break;
}
base *= base;
}
return result;
}
typedef union SEGMENT_ATTRIBUTES
{
USHORT UCHARs;
struct
{
USHORT TYPE : 4; /* 0; Bit 40-43 */
USHORT S : 1; /* 4; Bit 44 */
USHORT DPL : 2; /* 5; Bit 45-46 */
USHORT P : 1; /* 7; Bit 47 */
USHORT AVL : 1; /* 8; Bit 52 */
USHORT L : 1; /* 9; Bit 53 */
USHORT DB : 1; /* 10; Bit 54 */
USHORT G : 1; /* 11; Bit 55 */
USHORT GAP : 4;
} Fields;
} SEGMENT_ATTRIBUTES;
typedef struct SEGMENT_SELECTOR
{
USHORT SEL;
SEGMENT_ATTRIBUTES ATTRIBUTES;
ULONG32 LIMIT;
ULONG64 BASE;
} SEGMENT_SELECTOR, *PSEGMENT_SELECTOR;
enum SEGREGS
{
ES = 0,
CS,
SS,
DS,
FS,
GS,
LDTR,
TR
};
typedef struct _GUEST_REGS
{
ULONG64 rax; // 0x00 // NOT VALID FOR SVM
ULONG64 rcx;
ULONG64 rdx; // 0x10
ULONG64 rbx;
ULONG64 rsp; // 0x20 // rsp is not stored here on SVM
ULONG64 rbp;
ULONG64 rsi; // 0x30
ULONG64 rdi;
ULONG64 r8; // 0x40
ULONG64 r9;
ULONG64 r10; // 0x50
ULONG64 r11;
ULONG64 r12; // 0x60
ULONG64 r13;
ULONG64 r14; // 0x70
ULONG64 r15;
} GUEST_REGS, *PGUEST_REGS;
typedef union _RFLAGS
{
struct
{
unsigned Reserved1 : 10;
unsigned ID : 1; // Identification flag
unsigned VIP : 1; // Virtual interrupt pending
unsigned VIF : 1; // Virtual interrupt flag
unsigned AC : 1; // Alignment check
unsigned VM : 1; // Virtual 8086 mode
unsigned RF : 1; // Resume flag
unsigned Reserved2 : 1;
unsigned NT : 1; // Nested task flag
unsigned IOPL : 2; // I/O privilege level
unsigned OF : 1;
unsigned DF : 1;
unsigned IF : 1; // Interrupt flag
unsigned TF : 1; // Task flag
unsigned SF : 1; // Sign flag
unsigned ZF : 1; // Zero flag
unsigned Reserved3 : 1;
unsigned AF : 1; // Borrow flag
unsigned Reserved4 : 1;
unsigned PF : 1; // Parity flag
unsigned Reserved5 : 1;
unsigned CF : 1; // Carry flag [Bit 0]
unsigned Reserved6 : 32;
};
ULONG64 Content;
} RFLAGS;
typedef struct _SEGMENT_DESCRIPTOR
{
USHORT LIMIT0;
USHORT BASE0;
UCHAR BASE1;
UCHAR ATTR0;
UCHAR LIMIT1ATTR1;
UCHAR BASE2;
} SEGMENT_DESCRIPTOR, *PSEGMENT_DESCRIPTOR;
CPU.h
#pragma once
#include <ntddk.h>
typedef union _IA32_FEATURE_CONTROL_MSR{
ULONG64 All;
struct{
ULONG64 Lock : 1; // [0]
ULONG64 EnableSMX : 1; // [1]
ULONG64 EnableVmxon : 1; // [2]
ULONG64 Reserved2 : 5; // [3-7]
ULONG64 EnableLocalSENTER : 7; // [8-14]
ULONG64 EnableGlobalSENTER : 1; // [15]
ULONG64 Reserved3a : 16; //
ULONG64 Reserved3b : 32; // [16-63]
} Fields;
} IA32_FEATURE_CONTROL_MSR, *PIA32_FEATURE_CONTROL_MSR;
typedef struct _CPUID{
int eax;
int ebx;
int ecx;
int edx;
} CPUID, *PCPUID;
typedef union _IA32_VMX_BASIC_MSR{
ULONG64 All;
struct
{
ULONG32 RevisionIdentifier : 31; // [0-30]
ULONG32 Reserved1 : 1; // [31]
ULONG32 RegionSize : 12; // [32-43]
ULONG32 RegionClear : 1; // [44]
ULONG32 Reserved2 : 3; // [45-47]
ULONG32 SupportedIA64 : 1; // [48]
ULONG32 SupportedDualMoniter : 1; // [49]
ULONG32 MemoryType : 4; // [50-53]
ULONG32 VmExitReport : 1; // [54]
ULONG32 VmxCapabilityHint : 1; // [55]
ULONG32 Reserved3 : 8; // [56-63]
} Fields;
} IA32_VMX_BASIC_MSR, *PIA32_VMX_BASIC_MSR;
BOOLEAN Is_VMX_Supported();
ept.h
#pragma once
//
// Constants //
//
// MTRR Physical Base MSRs
#define MSR_IA32_MTRR_PHYSBASE0 0x00000200
#define MSR_IA32_MTRR_PHYSBASE1 0x00000202
#define MSR_IA32_MTRR_PHYSBASE2 0x00000204
#define MSR_IA32_MTRR_PHYSBASE3 0x00000206
#define MSR_IA32_MTRR_PHYSBASE4 0x00000208
#define MSR_IA32_MTRR_PHYSBASE5 0x0000020A
#define MSR_IA32_MTRR_PHYSBASE6 0x0000020C
#define MSR_IA32_MTRR_PHYSBASE7 0x0000020E
#define MSR_IA32_MTRR_PHYSBASE8 0x00000210
#define MSR_IA32_MTRR_PHYSBASE9 0x00000212
// MTRR Physical Mask MSRs
#define MSR_IA32_MTRR_PHYSMASK0 0x00000201
#define MSR_IA32_MTRR_PHYSMASK1 0x00000203
#define MSR_IA32_MTRR_PHYSMASK2 0x00000205
#define MSR_IA32_MTRR_PHYSMASK3 0x00000207
#define MSR_IA32_MTRR_PHYSMASK4 0x00000209
#define MSR_IA32_MTRR_PHYSMASK5 0x0000020B
#define MSR_IA32_MTRR_PHYSMASK6 0x0000020D
#define MSR_IA32_MTRR_PHYSMASK7 0x0000020F
#define MSR_IA32_MTRR_PHYSMASK8 0x00000211
#define MSR_IA32_MTRR_PHYSMASK9 0x00000213
// Memory Types
#define MEMORY_TYPE_UNCACHEABLE 0x00000000
#define MEMORY_TYPE_WRITE_COMBINING 0x00000001
#define MEMORY_TYPE_WRITE_THROUGH 0x00000004
#define MEMORY_TYPE_WRITE_PROTECTED 0x00000005
#define MEMORY_TYPE_WRITE_BACK 0x00000006
#define MEMORY_TYPE_INVALID 0x000000FF
// Page attributes for internal use
#define PAGE_ATTRIB_READ 0x2
#define PAGE_ATTRIB_WRITE 0x4
#define PAGE_ATTRIB_EXEC 0x8
// VMX EPT & VPID Capabilities MSR
#define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048C
// MTRR Def MSR
#define MSR_IA32_MTRR_DEF_TYPE 0x000002FF
// MTRR Capabilities MSR
#define MSR_IA32_MTRR_CAPABILITIES 0x000000FE
// The number of 512GB PML4 entries in the page table/
#define VMM_EPT_PML4E_COUNT 512
// The number of 1GB PDPT entries in the page table per 512GB PML4 entry.
#define VMM_EPT_PML3E_COUNT 512
// Then number of 2MB Page Directory entries in the page table per 1GB PML3 entry.
#define VMM_EPT_PML2E_COUNT 512
// Then number of 4096 byte Page Table entries in the page table per 2MB PML2 entry when dynamically split.
#define VMM_EPT_PML1E_COUNT 512
// Integer 2MB
#define SIZE_2_MB ((SIZE_T)(512 * PAGE_SIZE))
// Offset into the 1st paging structure (4096 byte)
#define ADDRMASK_EPT_PML1_OFFSET(_VAR_) (_VAR_ & 0xFFFULL)
// Index of the 1st paging structure (4096 byte)
#define ADDRMASK_EPT_PML1_INDEX(_VAR_) ((_VAR_ & 0x1FF000ULL) >> 12)
// Index of the 2nd paging structure (2MB)
#define ADDRMASK_EPT_PML2_INDEX(_VAR_) ((_VAR_ & 0x3FE00000ULL) >> 21)
// Index of the 3rd paging structure (1GB)
#define ADDRMASK_EPT_PML3_INDEX(_VAR_) ((_VAR_ & 0x7FC0000000ULL) >> 30)
// Index of the 4th paging structure (512GB)
#define ADDRMASK_EPT_PML4_INDEX(_VAR_) ((_VAR_ & 0xFF8000000000ULL) >> 39)
/**
* Linked list for-each macro for traversing LIST_ENTRY structures.
*
* _LISTHEAD_ is a pointer to the struct that the list head belongs to.
* _LISTHEAD_NAME_ is the name of the variable which contains the list head. Should match the same name as the list entry struct member in the actual record.
* _TARGET_TYPE_ is the type name of the struct of each item in the list
* _TARGET_NAME_ is the name which will contain the pointer to the item each iteration
*
* Example:
* FOR_EACH_LIST_ENTRY(ProcessorContext->EptPageTable, DynamicSplitList, VMM_EPT_DYNAMIC_SPLIT, Split)
* OsFreeNonpagedMemory(Split);
* }
*
* ProcessorContext->EptPageTable->DynamicSplitList is the head of the list.
* VMM_EPT_DYNAMIC_SPLIT is the struct of each item in the list.
* Split is the name of the local variable which will hold the pointer to the item.
*/
#define FOR_EACH_LIST_ENTRY(_LISTHEAD_, _LISTHEAD_NAME_, _TARGET_TYPE_, _TARGET_NAME_) \
for (PLIST_ENTRY Entry = _LISTHEAD_->_LISTHEAD_NAME_.Flink; Entry != &_LISTHEAD_->_LISTHEAD_NAME_; Entry = Entry->Flink) { \
P##_TARGET_TYPE_ _TARGET_NAME_ = CONTAINING_RECORD(Entry, _TARGET_TYPE_, _LISTHEAD_NAME_);
/**
* The braces for the block are messy due to the need to define a local variable in the for loop scope.
* Therefore, this macro just ends the for each block without messing up code editors trying to detect
* the block indent level.
*/
# define FOR_EACH_LIST_ENTRY_END() }
//
// Variables //
//
// Vmx-root lock for changing EPT PML1 Entry and Invalidating TLB
volatile LONG Pml1ModificationAndInvalidationLock;
//
// Unions & Structs //
//
typedef union _IA32_VMX_EPT_VPID_CAP_REGISTER
{
struct
{
/**
* [Bit 0] When set to 1, the processor supports execute-only translations by EPT. This support allows software to
* configure EPT paging-structure entries in which bits 1:0 are clear (indicating that data accesses are not allowed) and
* bit 2 is set (indicating that instruction fetches are allowed).
*/
UINT64 ExecuteOnlyPages : 1;
UINT64 Reserved1 : 5;
/**
* [Bit 6] Indicates support for a page-walk length of 4.
*/
UINT64 PageWalkLength4 : 1;
UINT64 Reserved2 : 1;
/**
* [Bit 8] When set to 1, the logical processor allows software to configure the EPT paging-structure memory type to be
* uncacheable (UC).
*
* @see Vol3C[24.6.11(Extended-Page-Table Pointer (EPTP))]
*/
UINT64 MemoryTypeUncacheable : 1;
UINT64 Reserved3 : 5;
/**
* [Bit 14] When set to 1, the logical processor allows software to configure the EPT paging-structure memory type to be
* write-back (WB).
*/
UINT64 MemoryTypeWriteBack : 1;
UINT64 Reserved4 : 1;
/**
* [Bit 16] When set to 1, the logical processor allows software to configure a EPT PDE to map a 2-Mbyte page (by setting
* bit 7 in the EPT PDE).
*/
UINT64 Pde2MbPages : 1;
/**
* [Bit 17] When set to 1, the logical processor allows software to configure a EPT PDPTE to map a 1-Gbyte page (by setting
* bit 7 in the EPT PDPTE).
*/
UINT64 Pdpte1GbPages : 1;
UINT64 Reserved5 : 2;
/**
* [Bit 20] If bit 20 is read as 1, the INVEPT instruction is supported.
*
* @see Vol3C[30(VMX INSTRUCTION REFERENCE)]
* @see Vol3C[28.3.3.1(Operations that Invalidate Cached Mappings)]
*/
UINT64 Invept : 1;
/**
* [Bit 21] When set to 1, accessed and dirty flags for EPT are supported.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 EptAccessedAndDirtyFlags : 1;
/**
* [Bit 22] When set to 1, the processor reports advanced VM-exit information for EPT violations. This reporting is done
* only if this bit is read as 1.
*
* @see Vol3C[27.2.1(Basic VM-Exit Information)]
*/
UINT64 AdvancedVmexitEptViolationsInformation : 1;
UINT64 Reserved6 : 2;
/**
* [Bit 25] When set to 1, the single-context INVEPT type is supported.
*
* @see Vol3C[30(VMX INSTRUCTION REFERENCE)]
* @see Vol3C[28.3.3.1(Operations that Invalidate Cached Mappings)]
*/
UINT64 InveptSingleContext : 1;
/**
* [Bit 26] When set to 1, the all-context INVEPT type is supported.
*
* @see Vol3C[30(VMX INSTRUCTION REFERENCE)]
* @see Vol3C[28.3.3.1(Operations that Invalidate Cached Mappings)]
*/
UINT64 InveptAllContexts : 1;
UINT64 Reserved7 : 5;
/**
* [Bit 32] When set to 1, the INVVPID instruction is supported.
*/
UINT64 Invvpid : 1;
UINT64 Reserved8 : 7;
/**
* [Bit 40] When set to 1, the individual-address INVVPID type is supported.
*/
UINT64 InvvpidIndividualAddress : 1;
/**
* [Bit 41] When set to 1, the single-context INVVPID type is supported.
*/
UINT64 InvvpidSingleContext : 1;
/**
* [Bit 42] When set to 1, the all-context INVVPID type is supported.
*/
UINT64 InvvpidAllContexts : 1;
/**
* [Bit 43] When set to 1, the single-context-retaining-globals INVVPID type is supported.
*/
UINT64 InvvpidSingleContextRetainGlobals : 1;
UINT64 Reserved9 : 20;
};
UINT64 Flags;
} IA32_VMX_EPT_VPID_CAP_REGISTER, *PIA32_VMX_EPT_VPID_CAP_REGISTER;
typedef union _PEPT_PML4
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 512-GByte region controlled by this entry.
*/
UINT64 ReadAccess : 1;
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 512-GByte region controlled by this entry.
*/
UINT64 WriteAccess : 1;
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0, execute access; indicates whether
* instruction fetches are allowed from the 512-GByte region controlled by this entry.
* If that control is 1, execute access for supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 512-GByte region controlled by this entry.
*/
UINT64 ExecuteAccess : 1;
UINT64 Reserved1 : 5;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software has accessed the 512-GByte region
* controlled by this entry. Ignored if bit 6 of EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
UINT64 Reserved2 : 1;
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based execute control for EPT" VM-execution control
* is 1, indicates whether instruction fetches are allowed from user-mode linear addresses in the 512-GByte region
* controlled by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
UINT64 Reserved3 : 1;
/**
* [Bits 47:12] Physical address of 4-KByte aligned EPT page-directory-pointer table referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
UINT64 Reserved4 : 16;
};
UINT64 Flags;
} EPT_PML4, *PEPT_PML4;
// See Table 28-1.
typedef union _EPT_PML4E {
ULONG64 All;
struct {
UINT64 Read : 1; // bit 0
UINT64 Write : 1; // bit 1
UINT64 Execute : 1; // bit 2
UINT64 Reserved1 : 5; // bit 7:3 (Must be Zero)
UINT64 Accessed : 1; // bit 8
UINT64 Ignored1 : 1; // bit 9
UINT64 ExecuteForUserMode : 1; // bit 10
UINT64 Ignored2 : 1; // bit 11
UINT64 PhysicalAddress : 36; // bit (N-1):12 or Page-Frame-Number
UINT64 Reserved2 : 4; // bit 51:N
UINT64 Ignored3 : 12; // bit 63:52
}Fields;
}EPT_PML4E, *PEPT_PML4E;
// See Table 28-3
typedef union _EPT_PDPTE {
ULONG64 All;
struct {
UINT64 Read : 1; // bit 0
UINT64 Write : 1; // bit 1
UINT64 Execute : 1; // bit 2
UINT64 Reserved1 : 5; // bit 7:3 (Must be Zero)
UINT64 Accessed : 1; // bit 8
UINT64 Ignored1 : 1; // bit 9
UINT64 ExecuteForUserMode : 1; // bit 10
UINT64 Ignored2 : 1; // bit 11
UINT64 PhysicalAddress : 36; // bit (N-1):12 or Page-Frame-Number
UINT64 Reserved2 : 4; // bit 51:N
UINT64 Ignored3 : 12; // bit 63:52
}Fields;
}EPT_PDPTE, *PEPT_PDPTE;
// See Table 28-5
typedef union _EPT_PDE {
ULONG64 All;
struct {
UINT64 Read : 1; // bit 0
UINT64 Write : 1; // bit 1
UINT64 Execute : 1; // bit 2
UINT64 Reserved1 : 5; // bit 7:3 (Must be Zero)
UINT64 Accessed : 1; // bit 8
UINT64 Ignored1 : 1; // bit 9
UINT64 ExecuteForUserMode : 1; // bit 10
UINT64 Ignored2 : 1; // bit 11
UINT64 PhysicalAddress : 36; // bit (N-1):12 or Page-Frame-Number
UINT64 Reserved2 : 4; // bit 51:N
UINT64 Ignored3 : 12; // bit 63:52
}Fields;
}EPT_PDE, *PEPT_PDE;
// See Table 28-6
typedef union _EPT_PTE {
ULONG64 All;
struct {
UINT64 Read : 1; // bit 0
UINT64 Write : 1; // bit 1
UINT64 Execute : 1; // bit 2
UINT64 EPTMemoryType : 3; // bit 5:3 (EPT Memory type)
UINT64 IgnorePAT : 1; // bit 6
UINT64 Ignored1 : 1; // bit 7
UINT64 AccessedFlag : 1; // bit 8
UINT64 DirtyFlag : 1; // bit 9
UINT64 ExecuteForUserMode : 1; // bit 10
UINT64 Ignored2 : 1; // bit 11
UINT64 PhysicalAddress : 36; // bit (N-1):12 or Page-Frame-Number
UINT64 Reserved : 4; // bit 51:N
UINT64 Ignored3 : 11; // bit 62:52
UINT64 SuppressVE : 1; // bit 63
}Fields;
}EPT_PTE, *PEPT_PTE;
typedef union _EPDPTE_1GB
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 1-GByte page referenced by this entry.
*/
UINT64 ReadAccess : 1;
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 1-GByte page referenced by this entry.
*/
UINT64 WriteAccess : 1;
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0, execute access; indicates whether
* instruction fetches are allowed from the 1-GByte page controlled by this entry.
* If that control is 1, execute access for supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 1-GByte page controlled by this entry.
*/
UINT64 ExecuteAccess : 1;
/**
* [Bits 5:3] EPT memory type for this 1-GByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 MemoryType : 3;
/**
* [Bit 6] Ignore PAT memory type for this 1-GByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 IgnorePat : 1;
/**
* [Bit 7] Must be 1 (otherwise, this entry references an EPT page directory).
*/
UINT64 LargePage : 1;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software has accessed the 1-GByte page
* referenced by this entry. Ignored if bit 6 of EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
/**
* [Bit 9] If bit 6 of EPTP is 1, dirty flag for EPT; indicates whether software has written to the 1-GByte page referenced
* by this entry. Ignored if bit 6 of EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Dirty : 1;
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based execute control for EPT" VM-execution control
* is 1, indicates whether instruction fetches are allowed from user-mode linear addresses in the 1-GByte page controlled
* by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
UINT64 Reserved1 : 19;
/**
* [Bits 47:30] Physical address of 4-KByte aligned EPT page-directory-pointer table referenced by this entry.
*/
UINT64 PageFrameNumber : 18;
UINT64 Reserved2 : 15;
/**
* [Bit 63] Suppress \#VE. If the "EPT-violation \#VE" VM-execution control is 1, EPT violations caused by accesses to this
* page are convertible to virtualization exceptions only if this bit is 0. If "EPT-violation \#VE" VMexecution control is
* 0, this bit is ignored.
*
* @see Vol3C[25.5.6.1(Convertible EPT Violations)]
*/
UINT64 SuppressVe : 1;
};
UINT64 Flags;
} EPDPTE_1GB, *PEPDPTE_1GB;
typedef union _EPDPTE
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 1-GByte region controlled by this entry.
*/
UINT64 ReadAccess : 1;
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 1-GByte region controlled by this entry.
*/
UINT64 WriteAccess : 1;
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0, execute access; indicates whether
* instruction fetches are allowed from the 1-GByte region controlled by this entry.
* If that control is 1, execute access for supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 1-GByte region controlled by this entry.
*/
UINT64 ExecuteAccess : 1;
UINT64 Reserved1 : 5;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software has accessed the 1-GByte region
* controlled by this entry. Ignored if bit 6 of EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
UINT64 Reserved2 : 1;
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based execute control for EPT" VM-execution control
* is 1, indicates whether instruction fetches are allowed from user-mode linear addresses in the 1-GByte region controlled
* by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
UINT64 Reserved3 : 1;
/**
* [Bits 47:12] Physical address of 4-KByte aligned EPT page-directory-pointer table referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
UINT64 Reserved4 : 16;
};
UINT64 Flags;
} EPDPTE, *PEPDPTE;
typedef union _EPDE_2MB
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 2-MByte page referenced by this entry.
*/
UINT64 ReadAccess : 1;
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 2-MByte page referenced by this entry.
*/
UINT64 WriteAccess : 1;
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0, execute access; indicates whether
* instruction fetches are allowed from the 2-MByte page controlled by this entry.
* If that control is 1, execute access for supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 2-MByte page controlled by this entry.
*/
UINT64 ExecuteAccess : 1;
/**
* [Bits 5:3] EPT memory type for this 2-MByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 MemoryType : 3;
/**
* [Bit 6] Ignore PAT memory type for this 2-MByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 IgnorePat : 1;
/**
* [Bit 7] Must be 1 (otherwise, this entry references an EPT page table).
*/
UINT64 LargePage : 1;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software has accessed the 2-MByte page
* referenced by this entry. Ignored if bit 6 of EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
/**
* [Bit 9] If bit 6 of EPTP is 1, dirty flag for EPT; indicates whether software has written to the 2-MByte page referenced
* by this entry. Ignored if bit 6 of EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Dirty : 1;
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based execute control for EPT" VM-execution control
* is 1, indicates whether instruction fetches are allowed from user-mode linear addresses in the 2-MByte page controlled
* by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
UINT64 Reserved1 : 10;
/**
* [Bits 47:21] Physical address of 4-KByte aligned EPT page-directory-pointer table referenced by this entry.
*/
UINT64 PageFrameNumber : 27;
UINT64 Reserved2 : 15;
/**
* [Bit 63] Suppress \#VE. If the "EPT-violation \#VE" VM-execution control is 1, EPT violations caused by accesses to this
* page are convertible to virtualization exceptions only if this bit is 0. If "EPT-violation \#VE" VMexecution control is
* 0, this bit is ignored.
*
* @see Vol3C[25.5.6.1(Convertible EPT Violations)]
*/
UINT64 SuppressVe : 1;
};
UINT64 Flags;
} EPDE_2MB, *PEPDE_2MB;
typedef union _EPDE
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 2-MByte region controlled by this entry.
*/
UINT64 ReadAccess : 1;
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 2-MByte region controlled by this entry.
*/
UINT64 WriteAccess : 1;
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0, execute access; indicates whether
* instruction fetches are allowed from the 2-MByte region controlled by this entry.
* If that control is 1, execute access for supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 2-MByte region controlled by this entry.
*/
UINT64 ExecuteAccess : 1;
UINT64 Reserved1 : 5;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software has accessed the 2-MByte region
* controlled by this entry. Ignored if bit 6 of EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
UINT64 Reserved2 : 1;
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based execute control for EPT" VM-execution control
* is 1, indicates whether instruction fetches are allowed from user-mode linear addresses in the 2-MByte region controlled
* by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
UINT64 Reserved3 : 1;
/**
* [Bits 47:12] Physical address of 4-KByte aligned EPT page table referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
UINT64 Reserved4 : 16;
};
UINT64 Flags;
} EPDE, *PEPDE;
typedef union _EPTE
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 4-KByte page referenced by this entry.
*/
UINT64 ReadAccess : 1;
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 4-KByte page referenced by this entry.
*/
UINT64 WriteAccess : 1;
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0, execute access; indicates whether
* instruction fetches are allowed from the 4-KByte page controlled by this entry.
* If that control is 1, execute access for supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 4-KByte page controlled by this entry.
*/
UINT64 ExecuteAccess : 1;
/**
* [Bits 5:3] EPT memory type for this 4-KByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 MemoryType : 3;
/**
* [Bit 6] Ignore PAT memory type for this 4-KByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 IgnorePat : 1;
UINT64 Reserved1 : 1;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software has accessed the 4-KByte page
* referenced by this entry. Ignored if bit 6 of EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
/**
* [Bit 9] If bit 6 of EPTP is 1, dirty flag for EPT; indicates whether software has written to the 4-KByte page referenced
* by this entry. Ignored if bit 6 of EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Dirty : 1;
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based execute control for EPT" VM-execution control
* is 1, indicates whether instruction fetches are allowed from user-mode linear addresses in the 4-KByte page controlled
* by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
UINT64 Reserved2 : 1;
/**
* [Bits 47:12] Physical address of the 4-KByte page referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
UINT64 Reserved3 : 15;
/**
* [Bit 63] Suppress \#VE. If the "EPT-violation \#VE" VM-execution control is 1, EPT violations caused by accesses to this
* page are convertible to virtualization exceptions only if this bit is 0. If "EPT-violation \#VE" VMexecution control is
* 0, this bit is ignored.
*
* @see Vol3C[25.5.6.1(Convertible EPT Violations)]
*/
UINT64 SuppressVe : 1;
};
UINT64 Flags;
} EPTE, *PEPTE;
//
// typedefs //
//
typedef EPT_PML4 EPT_PML4_POINTER, *PEPT_PML4_POINTER;
typedef EPDPTE EPT_PML3_POINTER, *PEPT_PML3_POINTER;
typedef EPDE_2MB EPT_PML2_ENTRY, *PEPT_PML2_ENTRY;
typedef EPDE EPT_PML2_POINTER, *PEPT_PML2_POINTER;
typedef EPTE EPT_PML1_ENTRY, *PEPT_PML1_ENTRY;
//
// Structs Cont. //
//
typedef struct _VMM_EPT_PAGE_TABLE
{
/**
* 28.2.2 Describes 512 contiguous 512GB memory regions each with 512 1GB regions.
*/
DECLSPEC_ALIGN(PAGE_SIZE) EPT_PML4_POINTER PML4[VMM_EPT_PML4E_COUNT];
/**
* Describes exactly 512 contiguous 1GB memory regions within a our singular 512GB PML4 region.
*/
DECLSPEC_ALIGN(PAGE_SIZE) EPT_PML3_POINTER PML3[VMM_EPT_PML3E_COUNT];
/**
* For each 1GB PML3 entry, create 512 2MB entries to map identity.
* NOTE: We are using 2MB pages as the smallest paging size in our map, so we do not manage individiual 4096 byte pages.
* Therefore, we do not allocate any PML1 (4096 byte) paging structures.
*/
DECLSPEC_ALIGN(PAGE_SIZE) EPT_PML2_ENTRY PML2[VMM_EPT_PML3E_COUNT][VMM_EPT_PML2E_COUNT];
/**
* List of all allocated dynamic splits. Used to free dynamic entries at the end of execution.
* A dynamic split is a 2MB page that's been split into 512 4096 size pages.
* This is used only on request when a specific page's protections need to be split.
*/
LIST_ENTRY DynamicSplitList;
} VMM_EPT_PAGE_TABLE, *PVMM_EPT_PAGE_TABLE;
typedef union _EPTP
{
struct
{
/**
* [Bits 2:0] EPT paging-structure memory type:
* - 0 = Uncacheable (UC)
* - 6 = Write-back (WB)
* Other values are reserved.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 MemoryType : 3;
/**
* [Bits 5:3] This value is 1 less than the EPT page-walk length.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 PageWalkLength : 3;
/**
* [Bit 6] Setting this control to 1 enables accessed and dirty flags for EPT.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 EnableAccessAndDirtyFlags : 1;
UINT64 Reserved1 : 5;
/**
* [Bits 47:12] Bits N-1:12 of the physical address of the 4-KByte aligned EPT PML4 table.
*/
UINT64 PageFrameNumber : 36;
UINT64 Reserved2 : 16;
};
UINT64 Flags;
} EPTP, *PEPTP;
// MSR_IA32_MTRR_DEF_TYPE
typedef union _IA32_MTRR_DEF_TYPE_REGISTER
{
struct
{
/**
* [Bits 2:0] Default Memory Type.
*/
UINT64 DefaultMemoryType : 3;
UINT64 Reserved1 : 7;
/**
* [Bit 10] Fixed Range MTRR Enable.
*/
UINT64 FixedRangeMtrrEnable : 1;
/**
* [Bit 11] MTRR Enable.
*/
UINT64 MtrrEnable : 1;
UINT64 Reserved2 : 52;
};
UINT64 Flags;
} IA32_MTRR_DEF_TYPE_REGISTER, *PIA32_MTRR_DEF_TYPE_REGISTER;
typedef struct INVEPT_DESC
{
EPTP ept_pointer;
UINT64 reserved;
}INVEPT_DESC, *PINVEPT_DESC;
enum invept_t
{
single_context = 0x00000001,
all_contexts = 0x00000002,
};
// MSR_IA32_MTRR_CAPABILITIES
typedef union _IA32_MTRR_CAPABILITIES_REGISTER
{
struct
{
/**
* @brief VCNT (variable range registers count) field
*
* [Bits 7:0] Indicates the number of variable ranges implemented on the processor.
*/
UINT64 VariableRangeCount : 8;
/**
* @brief FIX (fixed range registers supported) flag
*
* [Bit 8] Fixed range MTRRs (MSR_IA32_MTRR_FIX64K_00000 through MSR_IA32_MTRR_FIX4K_0F8000) are supported when set; no fixed range
* registers are supported when clear.
*/
UINT64 FixedRangeSupported : 1;
UINT64 Reserved1 : 1;
/**
* @brief WC (write combining) flag
*
* [Bit 10] The write-combining (WC) memory type is supported when set; the WC type is not supported when clear.
*/
UINT64 WcSupported : 1;
/**
* @brief SMRR (System-Management Range Register) flag
*
* [Bit 11] The system-management range register (SMRR) interface is supported when bit 11 is set; the SMRR interface is
* not supported when clear.
*/
UINT64 SmrrSupported : 1;
UINT64 Reserved2 : 52;
};
UINT64 Flags;
} IA32_MTRR_CAPABILITIES_REGISTER, *PIA32_MTRR_CAPABILITIES_REGISTER;
// MSR_IA32_MTRR_PHYSBASE(0-9)
typedef union _IA32_MTRR_PHYSBASE_REGISTER
{
struct
{
/**
* [Bits 7:0] Specifies the memory type for the range.
*/
UINT64 Type : 8;
UINT64 Reserved1 : 4;
/**
* [Bits 47:12] Specifies the base address of the address range. This 24-bit value, in the case where MAXPHYADDR is 36
* bits, is extended by 12 bits at the low end to form the base address (this automatically aligns the address on a 4-KByte
* boundary).
*/
UINT64 PageFrameNumber : 36;
UINT64 Reserved2 : 16;
};
UINT64 Flags;
} IA32_MTRR_PHYSBASE_REGISTER, *PIA32_MTRR_PHYSBASE_REGISTER;
// MSR_IA32_MTRR_PHYSMASK(0-9).
typedef union _IA32_MTRR_PHYSMASK_REGISTER
{
struct
{
/**
* [Bits 7:0] Specifies the memory type for the range.
*/
UINT64 Type : 8;
UINT64 Reserved1 : 3;
/**
* [Bit 11] Enables the register pair when set; disables register pair when clear.
*/
UINT64 Valid : 1;
/**
* [Bits 47:12] Specifies a mask (24 bits if the maximum physical address size is 36 bits, 28 bits if the maximum physical
* address size is 40 bits). The mask determines the range of the region being mapped, according to the following
* relationships:
* - Address_Within_Range AND PhysMask = PhysBase AND PhysMask
* - This value is extended by 12 bits at the low end to form the mask value.
* - The width of the PhysMask field depends on the maximum physical address size supported by the processor.
* CPUID.80000008H reports the maximum physical address size supported by the processor. If CPUID.80000008H is not
* available, software may assume that the processor supports a 36-bit physical address size.
*
* @see Vol3A[11.11.3(Example Base and Mask Calculations)]
*/
UINT64 PageFrameNumber : 36;
UINT64 Reserved2 : 16;
};
UINT64 Flags;
} IA32_MTRR_PHYSMASK_REGISTER, *PIA32_MTRR_PHYSMASK_REGISTER;
typedef struct _INVEPT_DESCRIPTOR
{
UINT64 EptPointer;
UINT64 Reserved; // Must be zero.
} INVEPT_DESCRIPTOR, *PINVEPT_DESCRIPTOR;
typedef struct _MTRR_RANGE_DESCRIPTOR
{
SIZE_T PhysicalBaseAddress;
SIZE_T PhysicalEndAddress;
UCHAR MemoryType;
} MTRR_RANGE_DESCRIPTOR, *PMTRR_RANGE_DESCRIPTOR;
typedef struct _EPT_STATE
{
LIST_ENTRY HookedPagesList; // A list of the details about hooked pages
MTRR_RANGE_DESCRIPTOR MemoryRanges[9]; // Physical memory ranges described by the BIOS in the MTRRs. Used to build the EPT identity mapping.
ULONG NumberOfEnabledMemoryRanges; // Number of memory ranges specified in MemoryRanges
EPTP EptPointer; // Extended-Page-Table Pointer
PVMM_EPT_PAGE_TABLE EptPageTable; // Page table entries for EPT operation
} EPT_STATE, *PEPT_STATE;
typedef struct _VMM_EPT_DYNAMIC_SPLIT
{
/*
* The 4096 byte page table entries that correspond to the split 2MB table entry.
*/
DECLSPEC_ALIGN(PAGE_SIZE) EPT_PML1_ENTRY PML1[VMM_EPT_PML1E_COUNT];
/*
* The pointer to the 2MB entry in the page table which this split is servicing.
*/
union
{
PEPT_PML2_ENTRY Entry;
PEPT_PML2_POINTER Pointer;
};
/*
* Linked list entries for each dynamic split
*/
LIST_ENTRY DynamicSplitList;
} VMM_EPT_DYNAMIC_SPLIT, *PVMM_EPT_DYNAMIC_SPLIT;
typedef union _VMX_EXIT_QUALIFICATION_EPT_VIOLATION
{
struct
{
/**
* [Bit 0] Set if the access causing the EPT violation was a data read.
*/
UINT64 ReadAccess : 1;
/**
* [Bit 1] Set if the access causing the EPT violation was a data write.
*/
UINT64 WriteAccess : 1;
/**
* [Bit 2] Set if the access causing the EPT violation was an instruction fetch.
*/
UINT64 ExecuteAccess : 1;
/**
* [Bit 3] The logical-AND of bit 0 in the EPT paging-structure entries used to translate the guest-physical address of the
* access causing the EPT violation (indicates whether the guest-physical address was readable).
*/
UINT64 EptReadable : 1;
/**
* [Bit 4] The logical-AND of bit 1 in the EPT paging-structure entries used to translate the guest-physical address of the
* access causing the EPT violation (indicates whether the guest-physical address was writeable).
*/
UINT64 EptWriteable : 1;
/**
* [Bit 5] The logical-AND of bit 2 in the EPT paging-structure entries used to translate the guest-physical address of the
* access causing the EPT violation.
* If the "mode-based execute control for EPT" VM-execution control is 0, this indicates whether the guest-physical address
* was executable. If that control is 1, this indicates whether the guest-physical address was executable for
* supervisor-mode linear addresses.
*/
UINT64 EptExecutable : 1;
/**
* [Bit 6] If the "mode-based execute control" VM-execution control is 0, the value of this bit is undefined. If that
* control is 1, this bit is the logical-AND of bit 10 in the EPT paging-structures entries used to translate the
* guest-physical address of the access causing the EPT violation. In this case, it indicates whether the guest-physical
* address was executable for user-mode linear addresses.
*/
UINT64 EptExecutableForUserMode : 1;
/**
* [Bit 7] Set if the guest linear-address field is valid. The guest linear-address field is valid for all EPT violations
* except those resulting from an attempt to load the guest PDPTEs as part of the execution of the MOV CR instruction.
*/
UINT64 ValidGuestLinearAddress : 1;
/**
* [Bit 8] If bit 7 is 1:
* - Set if the access causing the EPT violation is to a guest-physical address that is the translation of a linear
* address.
* - Clear if the access causing the EPT violation is to a paging-structure entry as part of a page walk or the update of
* an accessed or dirty bit.
* Reserved if bit 7 is 0 (cleared to 0).
*/
UINT64 CausedByTranslation : 1;
/**
* [Bit 9] This bit is 0 if the linear address is a supervisor-mode linear address and 1 if it is a user-mode linear
* address. Otherwise, this bit is undefined.
*
* @remarks If bit 7 is 1, bit 8 is 1, and the processor supports advanced VM-exit information for EPT violations. (If
* CR0.PG = 0, the translation of every linear address is a user-mode linear address and thus this bit will be 1.)
*/
UINT64 UserModeLinearAddress : 1;
/**
* [Bit 10] This bit is 0 if paging translates the linear address to a read-only page and 1 if it translates to a
* read/write page. Otherwise, this bit is undefined
*
* @remarks If bit 7 is 1, bit 8 is 1, and the processor supports advanced VM-exit information for EPT violations. (If
* CR0.PG = 0, every linear address is read/write and thus this bit will be 1.)
*/
UINT64 ReadableWritablePage : 1;
/**
* [Bit 11] This bit is 0 if paging translates the linear address to an executable page and 1 if it translates to an
* execute-disable page. Otherwise, this bit is undefined.
*
* @remarks If bit 7 is 1, bit 8 is 1, and the processor supports advanced VM-exit information for EPT violations. (If
* CR0.PG = 0, CR4.PAE = 0, or MSR_IA32_EFER.NXE = 0, every linear address is executable and thus this bit will be 0.)
*/
UINT64 ExecuteDisablePage : 1;
/**
* [Bit 12] NMI unblocking due to IRET.
*/
UINT64 NmiUnblocking : 1;
UINT64 Reserved1 : 51;
};
UINT64 Flags;
} VMX_EXIT_QUALIFICATION_EPT_VIOLATION, *PVMX_EXIT_QUALIFICATION_EPT_VIOLATION;
// Structure for each hooked instance
typedef struct _EPT_HOOKED_PAGE_DETAIL
{
DECLSPEC_ALIGN(PAGE_SIZE) CHAR FakePageContents[PAGE_SIZE];
/**
* Linked list entires for each page hook.
*/
LIST_ENTRY PageHookList;
/**
* The virtual address from the caller prespective view (cr3)
*/
UINT64 VirtualAddress;
/**
* The base address of the page. Used to find this structure in the list of page hooks
* when a hook is hit.
*/
SIZE_T PhysicalBaseAddress;
/**
* The base address of the page with fake contents. Used to swap page with fake contents
* when a hook is hit.
*/
SIZE_T PhysicalBaseAddressOfFakePageContents;
/*
* The page entry in the page tables that this page is targetting.
*/
PEPT_PML1_ENTRY EntryAddress;
/**
* The original page entry. Will be copied back when the hook is removed
* from the page.
*/
EPT_PML1_ENTRY OriginalEntry;
/**
* The original page entry. Will be copied back when the hook is remove from the page.
*/
EPT_PML1_ENTRY ChangedEntry;
/**
* The buffer of the trampoline function which is used in the inline hook.
*/
PCHAR Trampoline;
/**
* This field shows whether the hook contains a hidden hook for execution or not
*/
BOOLEAN IsExecutionHook;
} EPT_HOOKED_PAGE_DETAIL, *PEPT_HOOKED_PAGE_DETAIL;
//
// Enums //
//
typedef enum _INVEPT_TYPE
{
INVEPT_SINGLE_CONTEXT = 0x00000001,
INVEPT_ALL_CONTEXTS = 0x00000002
}INVEPT_TYPE;
//
// Functions //
//
// Check for EPT Features
BOOLEAN EptCheckFeatures();
// Build MTRR Map
BOOLEAN EptBuildMtrrMap();
// Hook in VMX Root Mode (A pre-allocated buffer should be available)
BOOLEAN EptPerformPageHook(PVOID TargetAddress, PVOID HookFunction, PVOID* OrigFunction, BOOLEAN UnsetRead, BOOLEAN UnsetWrite, BOOLEAN UnsetExecute);
// Hook in VMX Non Root Mode
// Initialize EPT Table based on Processor Index
BOOLEAN EptLogicalProcessorInitialize();
// Handle EPT Violation
BOOLEAN EptHandleEptViolation(ULONG ExitQualification, UINT64 GuestPhysicalAddr);
// Get the PML1 Entry of a special address
PEPT_PML1_ENTRY EptGetPml1Entry(PVMM_EPT_PAGE_TABLE EptPageTable, SIZE_T PhysicalAddress);
// Handle vm-exits for Monitor Trap Flag to restore previous state
VOID EptHandleMonitorTrapFlag(PEPT_HOOKED_PAGE_DETAIL HookedEntry);
// Handle Ept Misconfigurations
VOID EptHandleMisconfiguration(UINT64 GuestAddress);
// This function set the specific PML1 entry in a spinlock protected area then invalidate the TLB , this function should be called from vmx root-mode
VOID EptSetPML1AndInvalidateTLB(PEPT_PML1_ENTRY EntryAddress, EPT_PML1_ENTRY EntryValue, INVEPT_TYPE InvalidationType);
// Handle hooked pages in Vmx-root mode
BOOLEAN EptHandleHookedPage(EPT_HOOKED_PAGE_DETAIL* HookedEntryDetails, VMX_EXIT_QUALIFICATION_EPT_VIOLATION ViolationQualification, SIZE_T PhysicalAddress);
// Remove a special hook from the hooked pages lists
BOOLEAN EptPageUnHookSinglePage(SIZE_T PhysicalAddress);
// Remove all hooks from the hooked pages lists
VOID EptPageUnHookAllPages();
MSR.h
#pragma once
#include <ntddk.h>
#define MSR_APIC_BASE 0x01B
#define MSR_IA32_FEATURE_CONTROL 0x03A
#define MSR_IA32_VMX_BASIC 0x480
#define MSR_IA32_VMX_PINBASED_CTLS 0x481
#define MSR_IA32_VMX_PROCBASED_CTLS 0x482
#define MSR_IA32_VMX_EXIT_CTLS 0x483
#define MSR_IA32_VMX_ENTRY_CTLS 0x484
#define MSR_IA32_VMX_MISC 0x485
#define MSR_IA32_VMX_CR0_FIXED0 0x486
#define MSR_IA32_VMX_CR0_FIXED1 0x487
#define MSR_IA32_VMX_CR4_FIXED0 0x488
#define MSR_IA32_VMX_CR4_FIXED1 0x489
#define MSR_IA32_VMX_VMCS_ENUM 0x48A
#define MSR_IA32_VMX_PROCBASED_CTLS2 0x48B
#define MSR_IA32_VMX_EPT_VPID_CAP 0x48C
#define MSR_IA32_VMX_TRUE_PINBASED_CTLS 0x48D
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x48E
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x48F
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x490
#define MSR_IA32_VMX_VMFUNC 0x491
#define MSR_IA32_SYSENTER_CS 0x174
#define MSR_IA32_SYSENTER_ESP 0x175
#define MSR_IA32_SYSENTER_EIP 0x176
#define MSR_IA32_DEBUGCTL 0x1D9
#define MSR_LSTAR 0xC0000082
#define MSR_FS_BASE 0xC0000100
#define MSR_GS_BASE 0xC0000101
#define MSR_SHADOW_GS_BASE 0xC0000102
typedef union _MSR
{
struct
{
ULONG Low;
ULONG High;
};
ULONG64 Content;
} MSR, *PMSR;
VMX.h
#pragma once
#include <ntddk.h>
#include "Ept.h"
//
// Constants //
//
// VMCS Region Size
#define VMCS_SIZE 4096
// VMXON Region Size
#define VMXON_SIZE 4096
// PIN-Based Execution
#define PIN_BASED_VM_EXECUTION_CONTROLS_EXTERNAL_INTERRUPT 0x00000001
#define PIN_BASED_VM_EXECUTION_CONTROLS_NMI_EXITING 0x00000004
#define PIN_BASED_VM_EXECUTION_CONTROLS_VIRTUAL_NMI 0x00000010
#define PIN_BASED_VM_EXECUTION_CONTROLS_ACTIVE_VMX_TIMER 0x00000020
#define PIN_BASED_VM_EXECUTION_CONTROLS_PROCESS_POSTED_INTERRUPTS 0x00000040
// CPU-Based Controls
#define CPU_BASED_VIRTUAL_INTR_PENDING 0x00000004
#define CPU_BASED_USE_TSC_OFFSETING 0x00000008
#define CPU_BASED_HLT_EXITING 0x00000080
#define CPU_BASED_INVLPG_EXITING 0x00000200
#define CPU_BASED_MWAIT_EXITING 0x00000400
#define CPU_BASED_RDPMC_EXITING 0x00000800
#define CPU_BASED_RDTSC_EXITING 0x00001000
#define CPU_BASED_CR3_LOAD_EXITING 0x00008000
#define CPU_BASED_CR3_STORE_EXITING 0x00010000
#define CPU_BASED_CR8_LOAD_EXITING 0x00080000
#define CPU_BASED_CR8_STORE_EXITING 0x00100000
#define CPU_BASED_TPR_SHADOW 0x00200000
#define CPU_BASED_VIRTUAL_NMI_PENDING 0x00400000
#define CPU_BASED_MOV_DR_EXITING 0x00800000
#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000
#define CPU_BASED_MONITOR_TRAP_FLAG 0x08000000
#define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000
#define CPU_BASED_MONITOR_EXITING 0x20000000
#define CPU_BASED_PAUSE_EXITING 0x40000000
#define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
// Secondary CPU-Based Controls
#define CPU_BASED_CTL2_ENABLE_EPT 0x2
#define CPU_BASED_CTL2_RDTSCP 0x8
#define CPU_BASED_CTL2_ENABLE_VPID 0x20
#define CPU_BASED_CTL2_UNRESTRICTED_GUEST 0x80
#define CPU_BASED_CTL2_VIRTUAL_INTERRUPT_DELIVERY 0x200
#define CPU_BASED_CTL2_ENABLE_INVPCID 0x1000
#define CPU_BASED_CTL2_ENABLE_VMFUNC 0x2000
#define CPU_BASED_CTL2_ENABLE_XSAVE_XRSTORS 0x100000
// VM-exit Control Bits
#define VM_EXIT_IA32E_MODE 0x00000200
#define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
#define VM_EXIT_SAVE_GUEST_PAT 0x00040000
#define VM_EXIT_LOAD_HOST_PAT 0x00080000
// VM-entry Control Bits
#define VM_ENTRY_IA32E_MODE 0x00000200
#define VM_ENTRY_SMM 0x00000400
#define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
#define VM_ENTRY_LOAD_GUEST_PAT 0x00004000
// VM-exit Reasons
#define EXIT_REASON_EXCEPTION_NMI 0
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
#define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_INIT 3
#define EXIT_REASON_SIPI 4
#define EXIT_REASON_IO_SMI 5
#define EXIT_REASON_OTHER_SMI 6
#define EXIT_REASON_PENDING_VIRT_INTR 7
#define EXIT_REASON_PENDING_VIRT_NMI 8
#define EXIT_REASON_TASK_SWITCH 9
#define EXIT_REASON_CPUID 10
#define EXIT_REASON_GETSEC 11
#define EXIT_REASON_HLT 12
#define EXIT_REASON_INVD 13
#define EXIT_REASON_INVLPG 14
#define EXIT_REASON_RDPMC 15
#define EXIT_REASON_RDTSC 16
#define EXIT_REASON_RSM 17
#define EXIT_REASON_VMCALL 18
#define EXIT_REASON_VMCLEAR 19
#define EXIT_REASON_VMLAUNCH 20
#define EXIT_REASON_VMPTRLD 21
#define EXIT_REASON_VMPTRST 22
#define EXIT_REASON_VMREAD 23
#define EXIT_REASON_VMRESUME 24
#define EXIT_REASON_VMWRITE 25
#define EXIT_REASON_VMXOFF 26
#define EXIT_REASON_VMXON 27
#define EXIT_REASON_CR_ACCESS 28
#define EXIT_REASON_DR_ACCESS 29
#define EXIT_REASON_IO_INSTRUCTION 30
#define EXIT_REASON_MSR_READ 31
#define EXIT_REASON_MSR_WRITE 32
#define EXIT_REASON_INVALID_GUEST_STATE 33
#define EXIT_REASON_MSR_LOADING 34
#define EXIT_REASON_MWAIT_INSTRUCTION 36
#define EXIT_REASON_MONITOR_TRAP_FLAG 37
#define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40
#define EXIT_REASON_MCE_DURING_VMENTRY 41
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
#define EXIT_REASON_APIC_ACCESS 44
#define EXIT_REASON_ACCESS_GDTR_OR_IDTR 46
#define EXIT_REASON_ACCESS_LDTR_OR_TR 47
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_INVEPT 50
#define EXIT_REASON_RDTSCP 51
#define EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED 52
#define EXIT_REASON_INVVPID 53
#define EXIT_REASON_WBINVD 54
#define EXIT_REASON_XSETBV 55
#define EXIT_REASON_APIC_WRITE 56
#define EXIT_REASON_RDRAND 57
#define EXIT_REASON_INVPCID 58
#define EXIT_REASON_RDSEED 61
#define EXIT_REASON_PML_FULL 62
#define EXIT_REASON_XSAVES 63
#define EXIT_REASON_XRSTORS 64
#define EXIT_REASON_PCOMMIT 65
// CPUID RCX(s) - Based on Hyper-V
#define HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS 0x40000000
#define HYPERV_CPUID_INTERFACE 0x40000001
#define HYPERV_CPUID_VERSION 0x40000002
#define HYPERV_CPUID_FEATURES 0x40000003
#define HYPERV_CPUID_ENLIGHTMENT_INFO 0x40000004
#define HYPERV_CPUID_IMPLEMENT_LIMITS 0x40000005
#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
#define HYPERV_CPUID_MIN 0x40000005
#define HYPERV_CPUID_MAX 0x4000ffff
// Exit Qualifications for MOV for Control Register Access
#define TYPE_MOV_TO_CR 0
#define TYPE_MOV_FROM_CR 1
#define TYPE_CLTS 2
#define TYPE_LMSW 3
// Stack size
#define VMM_STACK_SIZE 0x8000
//
// Enums //
//
typedef enum _VMCS_FIELDS {
VIRTUAL_PROCESSOR_ID = 0x00000000,
POSTED_INTR_NOTIFICATION_VECTOR = 0x00000002,
EPTP_INDEX = 0x00000004,
#define GUEST_SEG_SELECTOR(sel) (GUEST_ES_SELECTOR + (sel) * 2) /* ES ... GS */
GUEST_ES_SELECTOR = 0x00000800,
GUEST_CS_SELECTOR = 0x00000802,
GUEST_SS_SELECTOR = 0x00000804,
GUEST_DS_SELECTOR = 0x00000806,
GUEST_FS_SELECTOR = 0x00000808,
GUEST_GS_SELECTOR = 0x0000080a,
GUEST_LDTR_SELECTOR = 0x0000080c,
GUEST_TR_SELECTOR = 0x0000080e,
GUEST_INTR_STATUS = 0x00000810,
GUEST_PML_INDEX = 0x00000812,
HOST_ES_SELECTOR = 0x00000c00,
HOST_CS_SELECTOR = 0x00000c02,
HOST_SS_SELECTOR = 0x00000c04,
HOST_DS_SELECTOR = 0x00000c06,
HOST_FS_SELECTOR = 0x00000c08,
HOST_GS_SELECTOR = 0x00000c0a,
HOST_TR_SELECTOR = 0x00000c0c,
IO_BITMAP_A = 0x00002000,
IO_BITMAP_B = 0x00002002,
MSR_BITMAP = 0x00002004,
VM_EXIT_MSR_STORE_ADDR = 0x00002006,
VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
PML_ADDRESS = 0x0000200e,
TSC_OFFSET = 0x00002010,
VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
APIC_ACCESS_ADDR = 0x00002014,
PI_DESC_ADDR = 0x00002016,
VM_FUNCTION_CONTROL = 0x00002018,
EPT_POINTER = 0x0000201a,
EOI_EXIT_BITMAP0 = 0x0000201c,
#define EOI_EXIT_BITMAP(n) (EOI_EXIT_BITMAP0 + (n) * 2) /* n = 0...3 */
EPTP_LIST_ADDR = 0x00002024,
VMREAD_BITMAP = 0x00002026,
VMWRITE_BITMAP = 0x00002028,
VIRT_EXCEPTION_INFO = 0x0000202a,
XSS_EXIT_BITMAP = 0x0000202c,
TSC_MULTIPLIER = 0x00002032,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
VMCS_LINK_POINTER = 0x00002800,
VMCS_LINK_POINTER_HIGH = 0x00002801,
GUEST_IA32_DEBUGCTL = 0x00002802,
GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
GUEST_PAT = 0x00002804,
GUEST_EFER = 0x00002806,
GUEST_PERF_GLOBAL_CTRL = 0x00002808,
GUEST_PDPTE0 = 0x0000280a,
#define GUEST_PDPTE(n) (GUEST_PDPTE0 + (n) * 2) /* n = 0...3 */
GUEST_BNDCFGS = 0x00002812,
HOST_PAT = 0x00002c00,
HOST_EFER = 0x00002c02,
HOST_PERF_GLOBAL_CTRL = 0x00002c04,
PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
EXCEPTION_BITMAP = 0x00004004,
PAGE_FAULT_ERROR_CODE_MASK = 0x00004006,
PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008,
CR3_TARGET_COUNT = 0x0000400a,
VM_EXIT_CONTROLS = 0x0000400c,
VM_EXIT_MSR_STORE_COUNT = 0x0000400e,
VM_EXIT_MSR_LOAD_COUNT = 0x00004010,
VM_ENTRY_CONTROLS = 0x00004012,
VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
VM_ENTRY_INTR_INFO = 0x00004016,
VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
VM_ENTRY_INSTRUCTION_LEN = 0x0000401a,
TPR_THRESHOLD = 0x0000401c,
SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
PLE_GAP = 0x00004020,
PLE_WINDOW = 0x00004022,
VM_INSTRUCTION_ERROR = 0x00004400,
VM_EXIT_REASON = 0x00004402,
VM_EXIT_INTR_INFO = 0x00004404,
VM_EXIT_INTR_ERROR_CODE = 0x00004406,
IDT_VECTORING_INFO = 0x00004408,
IDT_VECTORING_ERROR_CODE = 0x0000440a,
VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
VMX_INSTRUCTION_INFO = 0x0000440e,
#define GUEST_SEG_LIMIT(sel) (GUEST_ES_LIMIT + (sel) * 2) /* ES ... GS */
GUEST_ES_LIMIT = 0x00004800,
GUEST_CS_LIMIT = 0x00004802,
GUEST_SS_LIMIT = 0x00004804,
GUEST_DS_LIMIT = 0x00004806,
GUEST_FS_LIMIT = 0x00004808,
GUEST_GS_LIMIT = 0x0000480a,
GUEST_LDTR_LIMIT = 0x0000480c,
GUEST_TR_LIMIT = 0x0000480e,
GUEST_GDTR_LIMIT = 0x00004810,
GUEST_IDTR_LIMIT = 0x00004812,
#define GUEST_SEG_AR_BYTES(sel) (GUEST_ES_AR_BYTES + (sel) * 2) /* ES ... GS */
GUEST_ES_AR_BYTES = 0x00004814,
GUEST_CS_AR_BYTES = 0x00004816,
GUEST_SS_AR_BYTES = 0x00004818,
GUEST_DS_AR_BYTES = 0x0000481a,
GUEST_FS_AR_BYTES = 0x0000481c,
GUEST_GS_AR_BYTES = 0x0000481e,
GUEST_LDTR_AR_BYTES = 0x00004820,
GUEST_TR_AR_BYTES = 0x00004822,
GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
GUEST_ACTIVITY_STATE = 0x00004826,
GUEST_SMBASE = 0x00004828,
GUEST_SYSENTER_CS = 0x0000482a,
GUEST_PREEMPTION_TIMER = 0x0000482e,
HOST_SYSENTER_CS = 0x00004c00,
CR0_GUEST_HOST_MASK = 0x00006000,
CR4_GUEST_HOST_MASK = 0x00006002,
CR0_READ_SHADOW = 0x00006004,
CR4_READ_SHADOW = 0x00006006,
CR3_TARGET_VALUE0 = 0x00006008,
CR3_TARGET_VALUE1 = 0x0000600a,
CR3_TARGET_VALUE2 = 0x0000600c,
CR3_TARGET_VALUE3 = 0x0000600e,
EXIT_QUALIFICATION = 0x00006400,
GUEST_LINEAR_ADDRESS = 0x0000640a,
GUEST_CR0 = 0x00006800,
GUEST_CR3 = 0x00006802,
GUEST_CR4 = 0x00006804,
#define GUEST_SEG_BASE(sel) (GUEST_ES_BASE + (sel) * 2) /* ES ... GS */
GUEST_ES_BASE = 0x00006806,
GUEST_CS_BASE = 0x00006808,
GUEST_SS_BASE = 0x0000680a,
GUEST_DS_BASE = 0x0000680c,
GUEST_FS_BASE = 0x0000680e,
GUEST_GS_BASE = 0x00006810,
GUEST_LDTR_BASE = 0x00006812,
GUEST_TR_BASE = 0x00006814,
GUEST_GDTR_BASE = 0x00006816,
GUEST_IDTR_BASE = 0x00006818,
GUEST_DR7 = 0x0000681a,
GUEST_RSP = 0x0000681c,
GUEST_RIP = 0x0000681e,
GUEST_RFLAGS = 0x00006820,
GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
GUEST_SYSENTER_ESP = 0x00006824,
GUEST_SYSENTER_EIP = 0x00006826,
HOST_CR0 = 0x00006c00,
HOST_CR3 = 0x00006c02,
HOST_CR4 = 0x00006c04,
HOST_FS_BASE = 0x00006c06,
HOST_GS_BASE = 0x00006c08,
HOST_TR_BASE = 0x00006c0a,
HOST_GDTR_BASE = 0x00006c0c,
HOST_IDTR_BASE = 0x00006c0e,
HOST_SYSENTER_ESP = 0x00006c10,
HOST_SYSENTER_EIP = 0x00006c12,
HOST_RSP = 0x00006c14,
HOST_RIP = 0x00006c16,
};
//
// Structures & Unions //
//
typedef struct _VMX_VMXOFF_STATE
{
BOOLEAN IsVmxoffExecuted; // Shows whether the VMXOFF executed or not
UINT64 GuestRip; // Rip address of guest to return
UINT64 GuestRsp; // Rsp address of guest to return
} VMX_VMXOFF_STATE, *PVMX_VMXOFF_STATE;
typedef struct _VIRTUAL_MACHINE_STATE
{
BOOLEAN IsOnVmxRootMode; // Detects whether the current logical core is on Executing on VMX Root Mode
BOOLEAN IncrementRip; // Checks whether it has to redo the previous instruction or not (it used mainly in Ept routines)
BOOLEAN HasLaunched; // Indicate whether the core is virtualized or not
UINT64 VmxonRegionPhysicalAddress; // Vmxon region physical address
UINT64 VmxonRegionVirtualAddress; // VMXON region virtual address
UINT64 VmcsRegionPhysicalAddress; // VMCS region physical address
UINT64 VmcsRegionVirtualAddress; // VMCS region virtual address
UINT64 VmmStack; // Stack for VMM in VM-Exit State
UINT64 MsrBitmapVirtualAddress; // Msr Bitmap Virtual Address
UINT64 MsrBitmapPhysicalAddress; // Msr Bitmap Physical Address
VMX_VMXOFF_STATE VmxoffState; // Shows the vmxoff state of the guest
PEPT_HOOKED_PAGE_DETAIL MtfEptHookRestorePoint; // It shows the detail of the hooked paged that should be restore in MTF vm-exit
} VIRTUAL_MACHINE_STATE, *PVIRTUAL_MACHINE_STATE;
typedef struct _VMX_EXIT_QUALIFICATION_IO_INSTRUCTION
{
union
{
ULONG64 Flags;
struct
{
ULONG64 SizeOfAccess : 3;
ULONG64 AccessType : 1;
ULONG64 StringInstruction : 1;
ULONG64 RepPrefixed : 1;
ULONG64 OperandEncoding : 1;
ULONG64 Reserved1 : 9;
ULONG64 PortNumber : 16;
};
};
} VMX_EXIT_QUALIFICATION_IO_INSTRUCTION, *PVMX_EXIT_QUALIFICATION_IO_INSTRUCTION;
typedef union _MOV_CR_QUALIFICATION
{
ULONG_PTR All;
struct
{
ULONG ControlRegister : 4;
ULONG AccessType : 2;
ULONG LMSWOperandType : 1;
ULONG Reserved1 : 1;
ULONG Register : 4;
ULONG Reserved2 : 4;
ULONG LMSWSourceData : 16;
ULONG Reserved3;
} Fields;
} MOV_CR_QUALIFICATION, *PMOV_CR_QUALIFICATION;
//
// Functions //
//
// Initialize VMX Operation
BOOLEAN VmxInitializer();
// Terminate VMX Operation
BOOLEAN VmxTerminate();
// Allocate VMX Regions
BOOLEAN VmxAllocateVmxonRegion(VIRTUAL_MACHINE_STATE* CurrentGuestState);
BOOLEAN VmxAllocateVmcsRegion(VIRTUAL_MACHINE_STATE* CurrentGuestState);
BOOLEAN VmxAllocateVmmStack(INT ProcessorID);
BOOLEAN VmxAllocateMsrBitmap(INT ProcessorID);
// VMX Instructions
VOID VmxVmptrst();
VOID VmxVmresume();
VOID VmxVmxoff();
BOOLEAN VmxLoadVmcs(VIRTUAL_MACHINE_STATE* CurrentGuestState);
BOOLEAN VmxClearVmcsState(VIRTUAL_MACHINE_STATE* CurrentGuestState);
// Virtualize an already running machine
BOOLEAN VmxVirtualizeCurrentSystem(PVOID GuestStack);
// Configure VMCS
BOOLEAN VmxSetupVmcs(VIRTUAL_MACHINE_STATE* CurrentGuestState, PVOID GuestStack);