From e6c99208fc0954a0f559ad8c462f26ec8ef6d736 Mon Sep 17 00:00:00 2001 From: Requiem Date: Mon, 16 Feb 2026 00:14:34 +0100 Subject: [PATCH 1/3] feat: improved WINE checks --- src/vmaware.hpp | 91 +++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 84 insertions(+), 7 deletions(-) diff --git a/src/vmaware.hpp b/src/vmaware.hpp index c02f8020..4acdca75 100644 --- a/src/vmaware.hpp +++ b/src/vmaware.hpp @@ -7575,19 +7575,96 @@ struct VM { } #endif - const HMODULE k32 = GetModuleHandleA("kernel32.dll"); - if (!k32) { + const HMODULE kernel32 = GetModuleHandleA("kernel32.dll"); + const HMODULE ntdll = util::get_ntdll(); + if (!kernel32 || !ntdll) { return false; } - const char* names[] = { "wine_get_unix_file_name" }; - void* functions[1] = { nullptr }; - util::get_function_address(k32, names, functions, _countof(names)); + const char* kernel32_names[] = { "wine_get_unix_file_name" }; + void* kernel32_functions[ARRAYSIZE(kernel32_names)] = {}; + util::get_function_address(kernel32, kernel32_names, kernel32_functions, _countof(kernel32_names)); + + if (kernel32_functions[0] != nullptr) { + return core::add(brands::WINE); + } + + const char* ntdll_names[] = { "NtAllocateVirtualMemory", "NtFreeVirtualMemory", "NtProtectVirtualMemory" }; + void* ntdll_functions[ARRAYSIZE(ntdll_names)] = {}; + util::get_function_address(ntdll, ntdll_names, ntdll_functions, _countof(ntdll_names)); + + // https://www.unknowncheats.me/forum/anti-cheat-bypass/729130-article-wine-detection.html + const UINT oldMode = SetErrorMode(SEM_NOALIGNMENTFAULTEXCEPT); + + static constexpr unsigned char movaps_stub[] = { + 0x0F, 0x28, 0x01, // movaps xmm0, XMMWORD PTR [rcx] (Windows x64: arg in RCX) + 0xC3 // ret + }; + + typedef void (*movaps_fn)(void*); + + using NtAllocateVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE, PVOID*, ULONG_PTR, PSIZE_T, ULONG, ULONG); + using NtFreeVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG); + using NtProtectVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG, PULONG); + + const auto ntAllocateVirtualMemory = reinterpret_cast(ntdll_functions[0]); + const auto ntFreeVirtualMemory = reinterpret_cast(ntdll_functions[1]); + const auto ntProtectVirtualMemory = reinterpret_cast(ntdll_functions[2]); + + if (ntAllocateVirtualMemory == nullptr || ntFreeVirtualMemory == nullptr || ntProtectVirtualMemory == nullptr) { + SetErrorMode(oldMode); + return false; + } + + PVOID execMem = NULL; + const HANDLE hCurrentProcess = reinterpret_cast(-1); + SIZE_T regionSize = sizeof movaps_stub; + NTSTATUS st = ntAllocateVirtualMemory(hCurrentProcess, &execMem, 0, ®ionSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + if (!NT_SUCCESS(st) || execMem == NULL) { + SetErrorMode(oldMode); + return false; + } + + memcpy(execMem, movaps_stub, sizeof movaps_stub); - if (functions[0] != nullptr) { + { + PVOID tmpBase = execMem; + SIZE_T tmpSz = regionSize; + ULONG oldProt = 0; + st = ntProtectVirtualMemory(hCurrentProcess, &tmpBase, &tmpSz, PAGE_EXECUTE_READ, &oldProt); + if (!NT_SUCCESS(st)) { + PVOID freeBase = execMem; + SIZE_T freeSize = 0; + ntFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + SetErrorMode(oldMode); + return false; + } + } + + __declspec(align(16)) unsigned char buffer[32] = { 0 }; + void* misaligned = buffer + 1; + + __try { + ((movaps_fn)execMem)(misaligned); + } + __except (EXCEPTION_EXECUTE_HANDLER) { + // free executable memory, restore error mode, then return the WINE marker + PVOID freeBase = execMem; + SIZE_T freeSize = 0; + ntFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + + SetErrorMode(oldMode); return core::add(brands::WINE); } + // normal path: free exec memory, restore mode, return false + { + PVOID freeBase = execMem; + SIZE_T freeSize = 0; + ntFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + } + + SetErrorMode(oldMode); return false; } @@ -8184,7 +8261,7 @@ struct VM { PSIZE_T RegionSize, ULONG AllocationType, ULONG Protect - ); + ); using NtFreeVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE ProcessHandle, PVOID* BaseAddress, PSIZE_T RegionSize, ULONG FreeType); constexpr ULONG SystemModuleInformation = 11; From 17c02835986223c260d9892f8550ec242bc4e5ba Mon Sep 17 00:00:00 2001 From: Requiem Date: Mon, 16 Feb 2026 01:38:20 +0100 Subject: [PATCH 2/3] style: updated library following all guidelines --- CONTRIBUTING.md | 5 - docs/documentation.md | 148 ++--- src/vmaware.hpp | 1450 ++++++++++++++++++++--------------------- 3 files changed, 797 insertions(+), 806 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 313fc816..b6942570 100755 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -68,11 +68,6 @@ int main() { } ``` -> [!WARNING] -> ## Note from the developer: -> It should be mentioned that not all of the codebase is formatted this way. This standard guideline has been introduced 2 years after the project has started, and the lack of any guideline has resulted in the codebase looking fragmented, inconsistent, and very different in some portions due to differing coding styles among developers. This is completely my fault, and it has accumulated technical debt over the years. Although the current state isn't formatted consistently, the guideline is meant to slowly evolve the library into a much simpler version that's approachable to anybody trying to contribute and read through the code. - - ## I want to add a new technique, how would I do that? There's a few steps that should be taken: 1. Make sure to add the technique name in the enums of all the techniques in the appropriate place. diff --git a/docs/documentation.md b/docs/documentation.md index ab8a0a03..35eb4698 100644 --- a/docs/documentation.md +++ b/docs/documentation.md @@ -515,85 +515,85 @@ VMAware provides a convenient way to not only check for VMs, but also have the f | `VM::HYPERVISOR_BIT` | Check if hypervisor feature bit in CPUID ECX bit 31 is enabled (always false for physical CPUs) | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4376) | | `VM::HYPERVISOR_STR` | Check for hypervisor brand string length (would be around 2 characters in a host machine) | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4402) | | `VM::TIMER` | Check for timing anomalies in the system | 🐧🪟🍏 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4588) | -| `VM::THREAD_COUNT` | Check if there are only 1 or 2 threads, which is a common pattern in VMs with default settings, nowadays physical CPUs should have at least 4 threads for modern CPUs | 🐧🪟🍏 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7227) | -| `VM::MAC` | Check if mac address starts with certain VM designated values | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5284) | -| `VM::TEMPERATURE` | Check for device's temperature | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6136) | -| `VM::SYSTEMD` | Check result from systemd-detect-virt tool | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5165) | -| `VM::CVENDOR` | Check if the chassis vendor is a VM vendor | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5189) | -| `VM::CTYPE` | Check if the chassis type is valid (it's very often invalid in VMs) | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5214) | -| `VM::DOCKERENV` | Check if /.dockerenv or /.dockerinit file is present | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5232) | -| `VM::DMIDECODE` | Check if dmidecode output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5247) | -| `VM::DMESG` | Check if dmesg output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5390) | -| `VM::HWMON` | Check if /sys/class/hwmon/ directory is present. If not, likely a VM | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5431) | -| `VM::DLL` | Check for VM-specific DLLs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7527) | -| `VM::HWMODEL` | Check if the sysctl for the hwmodel does not contain the "Mac" string | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7251) | -| `VM::WINE` | Check if the function "wine_get_unix_file_name" is present and if the OS booted from a VHD container | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7558) | -| `VM::POWER_CAPABILITIES` | Check what power states are enabled | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7597) | -| `VM::PROCESSES` | Check for any VM processes that are active | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6147) | -| `VM::LINUX_USER_HOST` | Check for default VM username and hostname for linux | 🐧 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5441) | -| `VM::GAMARUE` | Check for Gamarue ransomware technique which compares VM-specific Window product IDs | 🪟 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7657) | +| `VM::THREAD_COUNT` | Check if there are only 1 or 2 threads, which is a common pattern in VMs with default settings, nowadays physical CPUs should have at least 4 threads for modern CPUs | 🐧🪟🍏 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7228) | +| `VM::MAC` | Check if mac address starts with certain VM designated values | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5285) | +| `VM::TEMPERATURE` | Check for device's temperature | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6137) | +| `VM::SYSTEMD` | Check result from systemd-detect-virt tool | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5166) | +| `VM::CVENDOR` | Check if the chassis vendor is a VM vendor | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5190) | +| `VM::CTYPE` | Check if the chassis type is valid (it's very often invalid in VMs) | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5215) | +| `VM::DOCKERENV` | Check if /.dockerenv or /.dockerinit file is present | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5233) | +| `VM::DMIDECODE` | Check if dmidecode output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5248) | +| `VM::DMESG` | Check if dmesg output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5391) | +| `VM::HWMON` | Check if /sys/class/hwmon/ directory is present. If not, likely a VM | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5432) | +| `VM::DLL` | Check for VM-specific DLLs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7528) | +| `VM::HWMODEL` | Check if the sysctl for the hwmodel does not contain the "Mac" string | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7252) | +| `VM::WINE` | Check if the function "wine_get_unix_file_name" is present and if the OS booted from a VHD container | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7559) | +| `VM::POWER_CAPABILITIES` | Check what power states are enabled | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7669) | +| `VM::PROCESSES` | Check for any VM processes that are active | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6148) | +| `VM::LINUX_USER_HOST` | Check for default VM username and hostname for linux | 🐧 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5442) | +| `VM::GAMARUE` | Check for Gamarue ransomware technique which compares VM-specific Window product IDs | 🪟 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7729) | | `VM::BOCHS_CPU` | Check for various Bochs-related emulation oversights through CPU checks | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4430) | -| `VM::MAC_MEMSIZE` | Check if memory is too low for MacOS system | 🍏 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7287) | -| `VM::MAC_IOKIT` | Check MacOS' IO kit registry for VM-specific strings | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7320) | -| `VM::IOREG_GREP` | Check for VM-strings in ioreg commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7417) | -| `VM::MAC_SIP` | Check for the status of System Integrity Protection and hv_mm_present | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7458) | -| `VM::VPC_INVALID` | Check for official VPC method | 🪟 | 75% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7766) | +| `VM::MAC_MEMSIZE` | Check if memory is too low for MacOS system | 🍏 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7288) | +| `VM::MAC_IOKIT` | Check MacOS' IO kit registry for VM-specific strings | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7321) | +| `VM::IOREG_GREP` | Check for VM-strings in ioreg commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7418) | +| `VM::MAC_SIP` | Check for the status of System Integrity Protection and hv_mm_present | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7459) | +| `VM::VPC_INVALID` | Check for official VPC method | 🪟 | 75% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7838) | | `VM::SYSTEM_REGISTERS` | | | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L1) | -| `VM::VMWARE_IOMEM` | Check for VMware string in /proc/iomem | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5470) | -| `VM::VMWARE_IOPORTS` | Check for VMware string in /proc/ioports | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5982) | -| `VM::VMWARE_SCSI` | Check for VMware string in /proc/scsi/scsi | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5779) | -| `VM::VMWARE_DMESG` | Check for VMware-specific device name in dmesg output | 🐧 | 65% | Admin | | Disabled by default | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5798) | -| `VM::VMWARE_STR` | Check str assembly instruction method for VMware | 🪟 | 35% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7817) | -| `VM::VMWARE_BACKDOOR` | Check for official VMware io port backdoor technique | 🪟 | 100% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7842) | -| `VM::MUTEX` | Check for mutex strings of VM brands | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7903) | +| `VM::VMWARE_IOMEM` | Check for VMware string in /proc/iomem | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5471) | +| `VM::VMWARE_IOPORTS` | Check for VMware string in /proc/ioports | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5983) | +| `VM::VMWARE_SCSI` | Check for VMware string in /proc/scsi/scsi | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5780) | +| `VM::VMWARE_DMESG` | Check for VMware-specific device name in dmesg output | 🐧 | 65% | Admin | | Disabled by default | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5799) | +| `VM::VMWARE_STR` | Check str assembly instruction method for VMware | 🪟 | 35% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7889) | +| `VM::VMWARE_BACKDOOR` | Check for official VMware io port backdoor technique | 🪟 | 100% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7914) | +| `VM::MUTEX` | Check for mutex strings of VM brands | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7975) | | `VM::THREAD_MISMATCH` | Check if the system's thread count matches the expected thread count for the detected CPU model | 🐧🪟🍏 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4510) | -| `VM::CUCKOO_DIR` | Check for cuckoo directory using crt and WIN API directory functions | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7989) | -| `VM::CUCKOO_PIPE` | Check for Cuckoo specific piping mechanism | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8045) | -| `VM::AZURE` | Check for default Azure hostname format (Azure uses Hyper-V as their base VM brand) | 🐧🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6384) | -| `VM::DISPLAY` | Check for display configurations commonly found in VMs | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8101) | -| `VM::DEVICE_STRING` | Check if bogus device string would be accepted | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8136) | -| `VM::BLUESTACKS_FOLDERS` | Check for the presence of BlueStacks-specific folders | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5486) | +| `VM::CUCKOO_DIR` | Check for cuckoo directory using crt and WIN API directory functions | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8061) | +| `VM::CUCKOO_PIPE` | Check for Cuckoo specific piping mechanism | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8117) | +| `VM::AZURE` | Check for default Azure hostname format (Azure uses Hyper-V as their base VM brand) | 🐧🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6385) | +| `VM::DISPLAY` | Check for display configurations commonly found in VMs | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8173) | +| `VM::DEVICE_STRING` | Check if bogus device string would be accepted | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8208) | +| `VM::BLUESTACKS_FOLDERS` | Check for the presence of BlueStacks-specific folders | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5487) | | `VM::CPUID_SIGNATURE` | Check for signatures in leaf 0x40000001 in CPUID | 🐧🪟🍏 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4538) | | `VM::KGT_SIGNATURE` | Check for Intel KGT (Trusty branch) hypervisor signature in CPUID | 🐧🪟🍏 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4564) | -| `VM::QEMU_VIRTUAL_DMI` | Check for presence of QEMU in the /sys/devices/virtual/dmi/id directory | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5567) | -| `VM::QEMU_USB` | Check for presence of QEMU in the /sys/kernel/debug/usb/devices directory | 🐧 | 20% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5596) | -| `VM::HYPERVISOR_DIR` | Check for presence of any files in /sys/hypervisor directory | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5624) | -| `VM::UML_CPU` | Check for the "UML" string in the CPU brand | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5672) | -| `VM::KMSG` | Check for any indications of hypervisors in the kernel message logs | 🐧 | 5% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5702) | -| `VM::VBOX_MODULE` | Check for a VBox kernel module | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5756) | -| `VM::SYSINFO_PROC` | Check for potential VM info in /proc/sysinfo | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5832) | -| `VM::DMI_SCAN` | Check for string matches of VM brands in the linux DMI | 🐧 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5854) | -| `VM::SMBIOS_VM_BIT` | Check for the VM bit in the SMBIOS data | 🐧 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5937) | -| `VM::PODMAN_FILE` | Check for podman file in /run/ | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5967) | -| `VM::WSL_PROC` | Check for WSL or microsoft indications in /proc/ subdirectories | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5999) | -| `VM::DRIVERS` | Check for VM-specific names for drivers | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8153) | -| `VM::DISK_SERIAL` | Check for serial numbers of virtual disks | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8251) | -| `VM::IVSHMEM` | Check for IVSHMEM device presence | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8490) | -| `VM::GPU_CAPABILITIES` | Check for GPU capabilities related to VMs | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8596) | -| `VM::DEVICE_HANDLES` | Check for vm-specific devices | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8634) | -| `VM::QEMU_FW_CFG` | Detect QEMU fw_cfg interface. This first checks the Device Tree for a fw-cfg node or hypervisor tag, then verifies the presence of the qemu_fw_cfg module and firmware directories in sysfs. | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6027) | -| `VM::VIRTUAL_PROCESSORS` | Check if the number of virtual and logical processors are reported correctly by the system | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8737) | -| `VM::HYPERVISOR_QUERY` | Check if a call to NtQuerySystemInformation with the 0x9f leaf fills a _SYSTEM_HYPERVISOR_DETAIL_INFORMATION structure | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8767) | -| `VM::AMD_SEV` | Check for AMD-SEV MSR running on the system | 🐧🍏 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5509) | -| `VM::VIRTUAL_REGISTRY` | Check for particular object directory which is present in Sandboxie virtual environment but not in usual host systems | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8835) | -| `VM::FIRMWARE` | Check for VM signatures on all firmware tables | 🐧🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6444) | -| `VM::FILE_ACCESS_HISTORY` | Check if the number of accessed files are too low for a human-managed environment | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6057) | -| `VM::AUDIO` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8929) | -| `VM::NSJAIL_PID` | Check if process status matches with nsjail patterns with PID anomalies | 🐧 | 75% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6084) | -| `VM::PCI_DEVICES` | Check for PCI vendor and device IDs that are VM-specific | 🐧🪟 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6871) | -| `VM::ACPI_SIGNATURE` | Check for VM-specific ACPI device signatures | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9037) | -| `VM::TRAP` | Check if after raising two traps at the same RIP, a hypervisor interferes with the instruction pointer delivery | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9182) | -| `VM::UD` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8929) | -| `VM::BLOCKSTEP` | Check if a hypervisor does not properly restore the interruptibility state after a VM-exit in compatibility mode | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9457) | -| `VM::DBVM` | Check if Dark Byte's VM is present | 🪟 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9504) | -| `VM::BOOT_LOGO` | Check boot logo for known VM images | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9623) | -| `VM::MAC_SYS` | Check for VM-strings in system profiler commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7502) | -| `VM::KERNEL_OBJECTS` | Check for any signs of VMs in Windows kernel object entities | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9726) | -| `VM::NVRAM` | Check for known NVRAM signatures that are present on virtual firmware | 🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9912) | -| `VM::SMBIOS_INTEGRITY` | Check if SMBIOS is malformed/corrupted in a way that is typical for VMs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10460) | -| `VM::EDID` | Check for non-standard EDID configurations | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10471) | -| `VM::CPU_HEURISTIC` | Check whether the CPU is genuine and its reported instruction capabilities are not masked | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10727) | -| `VM::CLOCK` | Check the presence of system timers | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11195) | +| `VM::QEMU_VIRTUAL_DMI` | Check for presence of QEMU in the /sys/devices/virtual/dmi/id directory | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5568) | +| `VM::QEMU_USB` | Check for presence of QEMU in the /sys/kernel/debug/usb/devices directory | 🐧 | 20% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5597) | +| `VM::HYPERVISOR_DIR` | Check for presence of any files in /sys/hypervisor directory | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5625) | +| `VM::UML_CPU` | Check for the "UML" string in the CPU brand | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5673) | +| `VM::KMSG` | Check for any indications of hypervisors in the kernel message logs | 🐧 | 5% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5703) | +| `VM::VBOX_MODULE` | Check for a VBox kernel module | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5757) | +| `VM::SYSINFO_PROC` | Check for potential VM info in /proc/sysinfo | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5833) | +| `VM::DMI_SCAN` | Check for string matches of VM brands in the linux DMI | 🐧 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5855) | +| `VM::SMBIOS_VM_BIT` | Check for the VM bit in the SMBIOS data | 🐧 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5938) | +| `VM::PODMAN_FILE` | Check for podman file in /run/ | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5968) | +| `VM::WSL_PROC` | Check for WSL or microsoft indications in /proc/ subdirectories | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6000) | +| `VM::DRIVERS` | Check for VM-specific names for drivers | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8225) | +| `VM::DISK_SERIAL` | Check for serial numbers of virtual disks | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8323) | +| `VM::IVSHMEM` | Check for IVSHMEM device presence | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8562) | +| `VM::GPU_CAPABILITIES` | Check for GPU capabilities related to VMs | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8668) | +| `VM::DEVICE_HANDLES` | Check for vm-specific devices | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8706) | +| `VM::QEMU_FW_CFG` | Detect QEMU fw_cfg interface. This first checks the Device Tree for a fw-cfg node or hypervisor tag, then verifies the presence of the qemu_fw_cfg module and firmware directories in sysfs. | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6028) | +| `VM::VIRTUAL_PROCESSORS` | Check if the number of virtual and logical processors are reported correctly by the system | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8809) | +| `VM::HYPERVISOR_QUERY` | Check if a call to NtQuerySystemInformation with the 0x9f leaf fills a _SYSTEM_HYPERVISOR_DETAIL_INFORMATION structure | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8839) | +| `VM::AMD_SEV` | Check for AMD-SEV MSR running on the system | 🐧🍏 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5510) | +| `VM::VIRTUAL_REGISTRY` | Check for particular object directory which is present in Sandboxie virtual environment but not in usual host systems | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8907) | +| `VM::FIRMWARE` | Check for VM signatures on all firmware tables | 🐧🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6445) | +| `VM::FILE_ACCESS_HISTORY` | Check if the number of accessed files are too low for a human-managed environment | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6058) | +| `VM::AUDIO` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9001) | +| `VM::NSJAIL_PID` | Check if process status matches with nsjail patterns with PID anomalies | 🐧 | 75% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6085) | +| `VM::PCI_DEVICES` | Check for PCI vendor and device IDs that are VM-specific | 🐧🪟 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6872) | +| `VM::ACPI_SIGNATURE` | Check for VM-specific ACPI device signatures | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9109) | +| `VM::TRAP` | Check if after raising two traps at the same RIP, a hypervisor interferes with the instruction pointer delivery | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9254) | +| `VM::UD` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9001) | +| `VM::BLOCKSTEP` | Check if a hypervisor does not properly restore the interruptibility state after a VM-exit in compatibility mode | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9529) | +| `VM::DBVM` | Check if Dark Byte's VM is present | 🪟 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9576) | +| `VM::BOOT_LOGO` | Check boot logo for known VM images | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9695) | +| `VM::MAC_SYS` | Check for VM-strings in system profiler commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7503) | +| `VM::KERNEL_OBJECTS` | Check for any signs of VMs in Windows kernel object entities | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9796) | +| `VM::NVRAM` | Check for known NVRAM signatures that are present on virtual firmware | 🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9986) | +| `VM::SMBIOS_INTEGRITY` | Check if SMBIOS is malformed/corrupted in a way that is typical for VMs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10534) | +| `VM::EDID` | Check for non-standard EDID configurations | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10545) | +| `VM::CPU_HEURISTIC` | Check whether the CPU is genuine and its reported instruction capabilities are not masked | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10801) | +| `VM::CLOCK` | Check the presence of system timers | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11269) |
diff --git a/src/vmaware.hpp b/src/vmaware.hpp index 4acdca75..ba92a95c 100644 --- a/src/vmaware.hpp +++ b/src/vmaware.hpp @@ -58,10 +58,10 @@ * - struct for internal cpu operations => line 718 * - struct for internal memoization => line 3042 * - struct for internal utility functions => line 3224 - * - struct for internal core components => line 11344 + * - struct for internal core components => line 11418 * - start of VM detection technique list => line 4279 - * - start of public VM detection functions => line 11722 - * - start of externally defined variables => line 12742 + * - start of public VM detection functions => line 11796 + * - start of externally defined variables => line 12816 * * * ============================== EXAMPLE =================================== @@ -90,7 +90,7 @@ * Welcome! This is just a preliminary text to lay the context of how it works, * how it's structured, and to guide anybody who's trying to understand the whole code. * Reading over 12k+ lines of other people's C++ code is obviously not an easy task, - * and that's perfectly understandable. We'd struggle as well if I were in your position + * and that's perfectly understandable. We'd struggle as well if we were in your position * while not even knowing where to start. So here's a more human-friendly explanation: * * @@ -3352,8 +3352,8 @@ struct VM { #elif (WINDOWS) bool is_admin = false; HANDLE hToken = nullptr; - const HANDLE hCurrentProcess = reinterpret_cast(-1LL); - if (OpenProcessToken(hCurrentProcess, TOKEN_QUERY, &hToken)) { + const HANDLE current_process = reinterpret_cast(-1LL); + if (OpenProcessToken(current_process, TOKEN_QUERY, &hToken)) { TOKEN_ELEVATION elevation{}; DWORD dwSize; if (GetTokenInformation(hToken, TokenElevation, &elevation, sizeof(elevation), &dwSize)) { @@ -3464,7 +3464,7 @@ struct VM { return util::make_unique(); #else #if (LINUX || APPLE) - struct FileDeleter { + struct file_deleter { void operator()(FILE* f) const noexcept { if (f) { pclose(f); @@ -3472,7 +3472,7 @@ struct VM { } }; - std::unique_ptr pipe(popen(cmd, "r"), FileDeleter()); + std::unique_ptr pipe(popen(cmd, "r"), file_deleter()); if (!pipe) { return util::make_unique(); } @@ -3481,9 +3481,9 @@ struct VM { char* line = nullptr; // to ensure line is freed even if string::append throws std::bad_alloc - struct LineGuard { + struct line_guard { char*& ptr; - ~LineGuard() { if (ptr) free(ptr); } + ~line_guard() { if (ptr) free(ptr); } } guard{ line }; size_t len = 0; @@ -3581,11 +3581,11 @@ struct VM { [[nodiscard]] static bool is_running_under_translator() { #if (WINDOWS && _WIN32_WINNT >= _WIN32_WINNT_WIN10) - const HANDLE hCurrentProcess = reinterpret_cast(-1LL); + const HANDLE current_process = reinterpret_cast(-1LL); USHORT procMachine = 0, nativeMachine = 0; const auto pIsWow64Process2 = &IsWow64Process2; - if (pIsWow64Process2(hCurrentProcess, &procMachine, &nativeMachine)) { + if (pIsWow64Process2(current_process, &procMachine, &nativeMachine)) { if (nativeMachine == IMAGE_FILE_MACHINE_ARM64 && (procMachine == IMAGE_FILE_MACHINE_AMD64 || procMachine == IMAGE_FILE_MACHINE_I386)) { debug("Translator detected x64/x86 process on ARM64"); @@ -3595,7 +3595,7 @@ struct VM { // only if we got MACHINE_UNKNOWN on process but native is ARM64 if (nativeMachine == IMAGE_FILE_MACHINE_ARM64) { - using PGetProcessInformation = BOOL(__stdcall*)(HANDLE, PROCESS_INFORMATION_CLASS, PVOID, DWORD); + using get_process_information = BOOL(__stdcall*)(HANDLE, PROCESS_INFORMATION_CLASS, PVOID, DWORD); const HMODULE ntdll = util::get_ntdll(); if (ntdll == nullptr) { return false; @@ -3605,15 +3605,15 @@ struct VM { void* funcs[1] = { nullptr }; util::get_function_address(ntdll, names, funcs, 1); - PGetProcessInformation pGetProcInfo = reinterpret_cast(funcs[0]); - if (pGetProcInfo) { + get_process_information get_proc_info = reinterpret_cast(funcs[0]); + if (get_proc_info) { struct PROCESS_MACHINE_INFORMATION { USHORT ProcessMachine; USHORT Res0; DWORD MachineAttributes; } pmInfo = {}; // ProcessMachineTypeInfo == 9 per MS Q&A - if (pGetProcInfo(hCurrentProcess, (PROCESS_INFORMATION_CLASS)9, &pmInfo, sizeof(pmInfo))) { + if (get_proc_info(current_process, (PROCESS_INFORMATION_CLASS)9, &pmInfo, sizeof(pmInfo))) { if (pmInfo.ProcessMachine == IMAGE_FILE_MACHINE_AMD64 || pmInfo.ProcessMachine == IMAGE_FILE_MACHINE_I386) { debug("Translator detected x64/x86 process on ARM64 by fallback"); return true; @@ -3947,8 +3947,8 @@ struct VM { #if (WINDOWS) // retrieves the addresses of specified functions from a loaded module using the export directory, manual implementation of GetProcAddress static void get_function_address(const HMODULE hModule, const char* names[], void** functions, size_t count) { - using FuncMap = std::unordered_map; - static std::unordered_map function_cache; + using func_map = std::unordered_map; + static std::unordered_map function_cache; for (size_t i = 0; i < count; ++i) functions[i] = nullptr; if (!hModule) return; @@ -4036,7 +4036,7 @@ struct VM { const DWORD* funcRvas = reinterpret_cast(base + addr_funcs); const WORD* ordinals = reinterpret_cast(base + addr_ord); - FuncMap& module_cache = function_cache[hModule]; + func_map& module_cache = function_cache[hModule]; for (size_t i = 0; i < count; ++i) { const char* current_name = names[i]; @@ -4088,9 +4088,9 @@ struct VM { [[nodiscard]] static HMODULE get_ntdll() { - static HMODULE cachedNtdll = nullptr; - if (cachedNtdll != nullptr) { - return cachedNtdll; + static HMODULE cached_ntdll = nullptr; + if (cached_ntdll != nullptr) { + return cached_ntdll; } #ifndef _WINTERNL_ @@ -4151,15 +4151,15 @@ struct VM { #endif if (!peb) { // not x86 or tampered with - const HMODULE h = GetModuleHandleW(L"ntdll.dll"); - if (h) cachedNtdll = h; - return h; + const HMODULE ntdll = GetModuleHandleW(L"ntdll.dll"); + if (ntdll) cached_ntdll = ntdll; + return ntdll; } PPEB_LDR_DATA ldr = peb->Ldr; if (!ldr) { const HMODULE h = GetModuleHandleW(L"ntdll.dll"); - if (h) cachedNtdll = h; + if (h) cached_ntdll = h; return h; } @@ -4167,8 +4167,8 @@ struct VM { #define CONTAINING_RECORD(address, type, field) ((type *)((char*)(address) - (size_t)(&((type *)0)->field))) #endif - constexpr WCHAR targetName[] = L"ntdll.dll"; - constexpr size_t targetLen = (std::size(targetName) - 1); + constexpr WCHAR target_name[] = L"ntdll.dll"; + constexpr size_t target_length = (std::size(target_name) - 1); LIST_ENTRY* head = &ldr->InMemoryOrderModuleList; // static analyzers don't know that InMemoryOrderModuleList is a circular list managed by the loader @@ -4180,35 +4180,35 @@ struct VM { auto* fullname = &ent->FullDllName; if (!fullname->Buffer || fullname->Length == 0) continue; - const auto totalChars = static_cast(fullname->Length / sizeof(WCHAR)); + const auto total_chars = static_cast(fullname->Length / sizeof(WCHAR)); - size_t start = totalChars; + size_t start = total_chars; while (start > 0) { const WCHAR c = fullname->Buffer[start - 1]; if (c == L'\\' || c == L'/') break; --start; } - const size_t fileLen = totalChars - start; - if (fileLen != targetLen) continue; + const size_t file_length = total_chars - start; + if (file_length != target_length) continue; bool match = true; - for (size_t i = 0; i < fileLen; ++i) { + for (size_t i = 0; i < file_length; ++i) { WCHAR a = fullname->Buffer[start + i]; - WCHAR b = targetName[i]; + WCHAR b = target_name[i]; if (a >= L'A' && a <= L'Z') a = static_cast(a + 32); if (b >= L'A' && b <= L'Z') b = static_cast(b + 32); if (a != b) { match = false; break; } } if (match) { - cachedNtdll = reinterpret_cast(ent->DllBase); - return cachedNtdll; + cached_ntdll = reinterpret_cast(ent->DllBase); + return cached_ntdll; } } const HMODULE h = GetModuleHandleW(L"ntdll.dll"); - if (h) cachedNtdll = h; + if (h) cached_ntdll = h; return h; } @@ -4307,15 +4307,15 @@ struct VM { #else const std::string& brand = cpu::get_brand(); - struct CStrView { + struct cstrview { const char* data; std::size_t size; - constexpr CStrView(const char* d, std::size_t s) noexcept + constexpr cstrview(const char* d, std::size_t s) noexcept : data(d), size(s) { } }; - static constexpr std::array checks{ { + static constexpr std::array checks{ { { "qemu", 4 }, { "kvm", 3 }, { "vbox", 4 }, @@ -4620,14 +4620,14 @@ struct VM { using NtQueryInformationThread_t = NTSTATUS(__stdcall*)(HANDLE, int, PVOID, ULONG, PULONG); using NtSetInformationThread_t = NTSTATUS(__stdcall*)(HANDLE, int, PVOID, ULONG); - const auto pNtQueryInformationThread = reinterpret_cast(funcs[0]); - const auto pNtSetInformationThread = reinterpret_cast(funcs[1]); - if (!pNtQueryInformationThread || !pNtSetInformationThread) { + const auto nt_query_information_thread = reinterpret_cast(funcs[0]); + const auto nt_set_information_thread = reinterpret_cast(funcs[1]); + if (!nt_query_information_thread || !nt_set_information_thread) { return true; } - constexpr int ThreadBasicInformation = 0; - constexpr int ThreadAffinityMask = 4; + constexpr int thread_basic_information = 0; + constexpr int thread_affinity_mask = 4; struct CLIENT_ID { ULONG_PTR UniqueProcess; @@ -4641,13 +4641,13 @@ struct VM { LONG Priority; LONG BasePriority; } tbi; - const HANDLE hCurrentThread = reinterpret_cast(-2LL); + const HANDLE current_thread = reinterpret_cast(-2LL); // current affinity memset(&tbi, 0, sizeof(tbi)); - NTSTATUS status = pNtQueryInformationThread( - hCurrentThread, - ThreadBasicInformation, + NTSTATUS status = nt_query_information_thread( + current_thread, + thread_basic_information, &tbi, sizeof(tbi), nullptr @@ -4657,33 +4657,33 @@ struct VM { return false; } - const ULONG_PTR originalAffinity = tbi.AffinityMask; + const ULONG_PTR original_affinity = tbi.AffinityMask; // new affinity - const DWORD_PTR wantedMask = static_cast(1); - status = pNtSetInformationThread( - hCurrentThread, - ThreadAffinityMask, - reinterpret_cast(const_cast(&wantedMask)), - static_cast(sizeof(wantedMask)) + const DWORD_PTR wanted_mask = static_cast(1); + status = nt_set_information_thread( + current_thread, + thread_affinity_mask, + reinterpret_cast(const_cast(&wanted_mask)), + static_cast(sizeof(wanted_mask)) ); // setting a higher priority for the current thread actually makes the ration between rdtsc and other timers like QIT vary much more // contrary to what someone might think about preempting reschedule - DWORD_PTR prevMask = 0; + DWORD_PTR previous_mask = 0; if (status >= 0) { - prevMask = originalAffinity; // emulate SetThreadAffinityMask return + previous_mask = original_affinity; // emulate SetThreadAffinityMask return } else { - prevMask = 0; + previous_mask = 0; } #endif // check for RDTSCP support, we will use it later int regs[4] = { 0 }; cpu::cpuid(regs, 0x80000001); - const bool haveRdtscp = (regs[3] & (1u << 27)) != 0; - if (!haveRdtscp) { + const bool have_rdtscp = (regs[3] & (1u << 27)) != 0; + if (!have_rdtscp) { debug("TIMER: RDTSCP instruction not supported"); // __rdtscp should be supported nowadays return true; } @@ -4774,10 +4774,10 @@ struct VM { }; // first measurement (CPUID / VMEXIT) - const ULONG64 firstRatio = accumulate_and_measure(cp_ptr); + const ULONG64 first_ratio = accumulate_and_measure(cp_ptr); // second measurement (XOR / ALU) - const ULONG64 secondRatio = accumulate_and_measure(xor_ptr); + const ULONG64 second_ratio = accumulate_and_measure(xor_ptr); VMAWARE_UNUSED(dummy); @@ -4786,18 +4786,18 @@ struct VM { diff = firstRatio - secondRatio abs = (diff ^ mask) - mask */ - const ULONG64 diffMask = (ULONG64)0 - (ULONG64)(firstRatio < secondRatio); // all-ones if first ", firstRatio, ", Interrupt -> ", secondRatio, ", Ratio: ", difference); - - if (prevMask != 0) { - pNtSetInformationThread( - hCurrentThread, - ThreadAffinityMask, - reinterpret_cast(const_cast(&originalAffinity)), - static_cast(sizeof(originalAffinity)) + const ULONG64 diff_mask = (ULONG64)0 - (ULONG64)(first_ratio < second_ratio); // all-ones if first ", first_ratio, ", Interrupt -> ", second_ratio, ", Ratio: ", difference); + + if (previous_mask != 0) { + nt_set_information_thread( + current_thread, + thread_affinity_mask, + reinterpret_cast(const_cast(&original_affinity)), + static_cast(sizeof(original_affinity)) ); } @@ -4909,8 +4909,8 @@ struct VM { std::sort(absdev.begin(), absdev.end()); const u64 MAD = median_of_sorted(absdev, 0, absdev.size()); // convert MAD to an approximate standard-deviation-like measure - const long double kMADtoSigma = 1.4826L; // consistent for normal approx - const long double sigma = (MAD == 0) ? 1.0L : (static_cast(MAD) * kMADtoSigma); + const long double kmad_to_sigma = 1.4826L; // consistent for normal approx + const long double sigma = (MAD == 0) ? 1.0L : (static_cast(MAD) * kmad_to_sigma); // find the densest small-valued cluster by sliding a fixed-count window // this locates the most concentrated group of samples (likely it would be the true VMEXIT cluster) @@ -5078,7 +5078,7 @@ struct VM { } }; - const entropy_provider entropyProv{}; + const entropy_provider entropy_prov{}; // Intel leaves on an AMD CPU and viceversa will still work for this probe // for leafs like 0 that just returns static data, like "AuthenticAMD" or "GenuineIntel", a fast exit path could be made @@ -5100,7 +5100,7 @@ struct VM { }; constexpr size_t n_leaves = sizeof(leaves) / sizeof(leaves[0]); - const size_t iterations = static_cast(rng(100, 200, [&entropyProv]() noexcept { return entropyProv(); })); + const size_t iterations = static_cast(rng(100, 200, [&entropy_prov]() noexcept { return entropy_prov(); })); // pre-allocate sample buffer and touch pages to avoid page faults by MMU during measurement std::vector samples; @@ -5285,10 +5285,10 @@ struct VM { * @implements VM::MAC */ [[nodiscard]] static bool mac_address_check() { - struct FDGuard { + struct fdguard { int fd; - explicit FDGuard(int fd = -1) : fd(fd) {} - ~FDGuard() { if (fd != -1) ::close(fd); } + explicit fdguard(int fd = -1) : fd(fd) {} + ~fdguard() { if (fd != -1) ::close(fd); } int get() const { return fd; } int release() { int tmp = fd; fd = -1; return tmp; } }; @@ -5303,7 +5303,7 @@ struct VM { if (sock == -1) { return false; } - FDGuard sockGuard(sock); // will close on function exit + fdguard sockGuard(sock); // will close on function exit ifc.ifc_len = sizeof(buf); ifc.ifc_buf = buf; @@ -6231,20 +6231,20 @@ struct VM { #elif (WINDOWS && x86) SYSTEM_INFO si; GetNativeSystemInfo(&si); - DWORD_PTR originalMask = 0; - const HANDLE hCurrentThread = reinterpret_cast(-2LL); + DWORD_PTR original_mask = 0; + const HANDLE current_thread = reinterpret_cast(-2LL); // Iterating processors for SGDT, SLDT, and SIDT for (DWORD i = 0; i < si.dwNumberOfProcessors; ++i) { const DWORD_PTR mask = (DWORD_PTR)1 << i; - const DWORD_PTR previousMask = SetThreadAffinityMask(hCurrentThread, mask); + const DWORD_PTR previous_mask = SetThreadAffinityMask(current_thread, mask); - if (previousMask == 0) { + if (previous_mask == 0) { continue; } - if (originalMask == 0) { - originalMask = previousMask; + if (original_mask == 0) { + original_mask = previous_mask; } // Technique 1: SGDT (x86 & x64) @@ -6326,12 +6326,12 @@ struct VM { #elif (MSVC) && (x86_32) __asm { sidt idtr_buffer } #elif (MSVC) && (x86_64) - #pragma pack(push, 1) + #pragma pack(push, 1) struct { USHORT Limit; ULONG_PTR Base; } idtr; - #pragma pack(pop) + #pragma pack(pop) __sidt(&idtr); memcpy(idtr_buffer, &idtr, sizeof(idtr)); #endif @@ -6351,8 +6351,8 @@ struct VM { if (found) break; } - if (originalMask != 0) { - SetThreadAffinityMask(hCurrentThread, originalMask); + if (original_mask != 0) { + SetThreadAffinityMask(current_thread, original_mask); } // Technique 4: SMSW (x86_32 only), no affinity pinning needed @@ -6741,10 +6741,10 @@ struct VM { return false; } - struct DirCloser { + struct dir_closer { DIR* d; - explicit DirCloser(DIR* dir) : d(dir) {} - ~DirCloser() { if (d) closedir(d); } + explicit dir_closer(DIR* dir) : d(dir) {} + ~dir_closer() { if (d) closedir(d); } } dir(raw_dir); constexpr const char* targets[] = { @@ -6776,10 +6776,10 @@ struct VM { continue; } - struct FDCloser { + struct fd_closer { int fd; - explicit FDCloser(int f) : fd(f) {} - ~FDCloser() { if (fd != -1) close(fd); } + explicit fd_closer(int f) : fd(f) {} + ~fd_closer() { if (fd != -1) close(fd); } } fdguard(fd); struct stat statbuf; @@ -6821,11 +6821,11 @@ struct VM { } for (const char* target : targets) { - size_t targetLen = strlen(target); - if (targetLen > file_size_u) + size_t target_length = strlen(target); + if (target_length > file_size_u) continue; - for (size_t j = 0; j <= file_size_u - targetLen; ++j) { - if (memcmp(buffer.data() + j, target, targetLen) == 0) { + for (size_t j = 0; j <= file_size_u - target_length; ++j) { + if (memcmp(buffer.data() + j, target, target_length) == 0) { const char* brand = nullptr; if (strcmp(target, "Parallels Software International") == 0 || strcmp(target, "Parallels(R)") == 0) { @@ -6872,8 +6872,8 @@ struct VM { * @implements VM::PCI_DEVICES */ [[nodiscard]] static bool pci_devices() { - struct PCI_Device { u16 vendor_id; u32 device_id; }; - std::vector devices; + struct pci_device { u16 vendor_id; u32 device_id; }; + std::vector devices; #if (LINUX) const std::string pci_path = "/sys/bus/pci/devices"; @@ -6910,7 +6910,7 @@ struct VM { } #endif #elif (WINDOWS) - static constexpr const wchar_t* kRoots[] = { + static constexpr const wchar_t* kroots[] = { L"SYSTEM\\CurrentControlSet\\Enum\\PCI", L"SYSTEM\\CurrentControlSet\\Enum\\USB", L"SYSTEM\\CurrentControlSet\\Enum\\HDAUDIO" @@ -7113,21 +7113,21 @@ struct VM { }; // for each rootPath we open the root key once - for (size_t rootIdx = 0; rootIdx < _countof(kRoots); ++rootIdx) { - const wchar_t* rootPath = kRoots[rootIdx]; - HKEY hRoot = nullptr; + for (size_t root_idx = 0; root_idx < _countof(kroots); ++root_idx) { + const wchar_t* root_path = kroots[root_idx]; + HKEY root = nullptr; if (RegOpenKeyExW( HKEY_LOCAL_MACHINE, - rootPath, + root_path, 0, KEY_READ, - &hRoot + &root ) != ERROR_SUCCESS) { continue; } - enum_devices(hRoot); - RegCloseKey(hRoot); + enum_devices(root); + RegCloseKey(root); } #endif @@ -7594,7 +7594,7 @@ struct VM { util::get_function_address(ntdll, ntdll_names, ntdll_functions, _countof(ntdll_names)); // https://www.unknowncheats.me/forum/anti-cheat-bypass/729130-article-wine-detection.html - const UINT oldMode = SetErrorMode(SEM_NOALIGNMENTFAULTEXCEPT); + const UINT old_mode = SetErrorMode(SEM_NOALIGNMENTFAULTEXCEPT); static constexpr unsigned char movaps_stub[] = { 0x0F, 0x28, 0x01, // movaps xmm0, XMMWORD PTR [rcx] (Windows x64: arg in RCX) @@ -7607,64 +7607,58 @@ struct VM { using NtFreeVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG); using NtProtectVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG, PULONG); - const auto ntAllocateVirtualMemory = reinterpret_cast(ntdll_functions[0]); - const auto ntFreeVirtualMemory = reinterpret_cast(ntdll_functions[1]); - const auto ntProtectVirtualMemory = reinterpret_cast(ntdll_functions[2]); + const auto nt_allocate_virtual_memory = reinterpret_cast(ntdll_functions[0]); + const auto nt_free_virtual_memory = reinterpret_cast(ntdll_functions[1]); + const auto nt_protect_virtual_memory = reinterpret_cast(ntdll_functions[2]); - if (ntAllocateVirtualMemory == nullptr || ntFreeVirtualMemory == nullptr || ntProtectVirtualMemory == nullptr) { - SetErrorMode(oldMode); + if (nt_allocate_virtual_memory == nullptr || nt_free_virtual_memory == nullptr || nt_protect_virtual_memory == nullptr) { + SetErrorMode(old_mode); return false; } - PVOID execMem = NULL; - const HANDLE hCurrentProcess = reinterpret_cast(-1); - SIZE_T regionSize = sizeof movaps_stub; - NTSTATUS st = ntAllocateVirtualMemory(hCurrentProcess, &execMem, 0, ®ionSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); - if (!NT_SUCCESS(st) || execMem == NULL) { - SetErrorMode(oldMode); + PVOID exec_mem = NULL; + const HANDLE current_process = reinterpret_cast(-1); + SIZE_T region_size = sizeof movaps_stub; + NTSTATUS st = nt_allocate_virtual_memory(current_process, &exec_mem, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + if (!NT_SUCCESS(st) || exec_mem == NULL) { + SetErrorMode(old_mode); return false; } - memcpy(execMem, movaps_stub, sizeof movaps_stub); - - { - PVOID tmpBase = execMem; - SIZE_T tmpSz = regionSize; - ULONG oldProt = 0; - st = ntProtectVirtualMemory(hCurrentProcess, &tmpBase, &tmpSz, PAGE_EXECUTE_READ, &oldProt); - if (!NT_SUCCESS(st)) { - PVOID freeBase = execMem; - SIZE_T freeSize = 0; - ntFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); - SetErrorMode(oldMode); - return false; - } + memcpy(exec_mem, movaps_stub, sizeof movaps_stub); + + PVOID tmp_base = exec_mem; + SIZE_T tmp_sz = region_size; + ULONG old_protection = 0; + st = nt_protect_virtual_memory(current_process, &tmp_base, &tmp_sz, PAGE_EXECUTE_READ, &old_protection); + if (!NT_SUCCESS(st)) { + PVOID free_base = exec_mem; + SIZE_T free_size = 0; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); + SetErrorMode(old_mode); + return false; } - + __declspec(align(16)) unsigned char buffer[32] = { 0 }; void* misaligned = buffer + 1; __try { - ((movaps_fn)execMem)(misaligned); + ((movaps_fn)exec_mem)(misaligned); } __except (EXCEPTION_EXECUTE_HANDLER) { - // free executable memory, restore error mode, then return the WINE marker - PVOID freeBase = execMem; - SIZE_T freeSize = 0; - ntFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + PVOID free_base = exec_mem; + SIZE_T free_size = 0; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); - SetErrorMode(oldMode); + SetErrorMode(old_mode); return core::add(brands::WINE); } + + PVOID free_base = exec_mem; + SIZE_T free_size = 0; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); - // normal path: free exec memory, restore mode, return false - { - PVOID freeBase = execMem; - SIZE_T freeSize = 0; - ntFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); - } - - SetErrorMode(oldMode); + SetErrorMode(old_mode); return false; } @@ -7686,10 +7680,10 @@ struct VM { using NtPI_t = NTSTATUS(__stdcall*)(POWER_INFORMATION_LEVEL, PVOID, ULONG, PVOID, ULONG); - const auto NtPowerInformation = reinterpret_cast(funcs[0]); + const auto nt_power_information = reinterpret_cast(funcs[0]); SYSTEM_POWER_CAPABILITIES caps = { 0 }; - const NTSTATUS status = NtPowerInformation( + const NTSTATUS status = nt_power_information( SystemPowerCapabilities, nullptr, 0, &caps, sizeof(caps) @@ -7701,16 +7695,16 @@ struct VM { const bool s2_supported = caps.SystemS2; const bool s3_supported = caps.SystemS3; const bool s4_supported = caps.SystemS4; - const bool hiberFilePresent = caps.HiberFilePresent; + const bool hiber_file_present = caps.HiberFilePresent; const bool is_physical_pattern = (s0_supported || s3_supported) && - (s4_supported || hiberFilePresent); + (s4_supported || hiber_file_present); if (is_physical_pattern) { return false; } - const bool is_vm_pattern = !(s0_supported || s3_supported || s4_supported || hiberFilePresent) && + const bool is_vm_pattern = !(s0_supported || s3_supported || s4_supported || hiber_file_present) && (s1_supported || s2_supported); if (is_vm_pattern) { @@ -7742,45 +7736,45 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pNtOpenKey = reinterpret_cast(funcs[0]); - const auto pNtQueryValueKey = reinterpret_cast(funcs[1]); - const auto pRtlInitUnicodeString = reinterpret_cast(funcs[2]); - const auto pNtClose = reinterpret_cast(funcs[3]); + const auto nt_open_key = reinterpret_cast(funcs[0]); + const auto nt_query_value_key = reinterpret_cast(funcs[1]); + const auto rtl_init_unicode_string = reinterpret_cast(funcs[2]); + const auto nt_close = reinterpret_cast(funcs[3]); - if (!pNtOpenKey || !pNtQueryValueKey || !pRtlInitUnicodeString || !pNtClose) + if (!nt_open_key || !nt_query_value_key || !rtl_init_unicode_string || !nt_close) return false; // We use native unicode strings and object attributes to interface directly with the kernel - UNICODE_STRING uKeyName; - pRtlInitUnicodeString(&uKeyName, L"\\Registry\\Machine\\Software\\Microsoft\\Windows NT\\CurrentVersion"); + UNICODE_STRING key_name; + rtl_init_unicode_string(&key_name, L"\\Registry\\Machine\\Software\\Microsoft\\Windows NT\\CurrentVersion"); - OBJECT_ATTRIBUTES objAttr; - ZeroMemory(&objAttr, sizeof(objAttr)); - objAttr.Length = sizeof(objAttr); - objAttr.ObjectName = &uKeyName; - objAttr.Attributes = OBJ_CASE_INSENSITIVE; + OBJECT_ATTRIBUTES object_attributes; + ZeroMemory(&object_attributes, sizeof(object_attributes)); + object_attributes.Length = sizeof(object_attributes); + object_attributes.ObjectName = &key_name; + object_attributes.Attributes = OBJ_CASE_INSENSITIVE; // Open the registry key with minimal permissions (query only) - HANDLE hKey = nullptr; + HANDLE key = nullptr; constexpr ACCESS_MASK KEY_QUERY_ONLY = 0x0001; // KEY_QUERY_VALUE - NTSTATUS st = pNtOpenKey(&hKey, KEY_QUERY_ONLY, &objAttr); - if (!NT_SUCCESS(st) || !hKey) { + NTSTATUS st = nt_open_key(&key, KEY_QUERY_ONLY, &object_attributes); + if (!NT_SUCCESS(st) || !key) { return false; } // We specifically want the "ProductId". Automated malware analysis sandboxes often // neglect to randomize this value, thats why we flag it - UNICODE_STRING uValueName; - pRtlInitUnicodeString(&uValueName, L"ProductId"); + UNICODE_STRING value_name; + rtl_init_unicode_string(&value_name, L"ProductId"); // Buffer for KEY_VALUE_PARTIAL_INFORMATION BYTE buffer[128]{}; - ULONG resultLength = 0; - constexpr ULONG KeyValuePartialInformation = 2; + ULONG result_length = 0; + constexpr ULONG key_value_partial_information = 2; - st = pNtQueryValueKey(hKey, &uValueName, KeyValuePartialInformation, buffer, sizeof(buffer), &resultLength); + st = nt_query_value_key(key, &value_name, key_value_partial_information, buffer, sizeof(buffer), &result_length); - pNtClose(hKey); + nt_close(key); if (!NT_SUCCESS(st)) { return false; @@ -7794,41 +7788,41 @@ struct VM { BYTE Data[1]; }; - if (resultLength < offsetof(KEY_VALUE_PARTIAL_INFORMATION_LOCAL, Data) + 1) { + if (result_length < offsetof(KEY_VALUE_PARTIAL_INFORMATION_LOCAL, Data) + 1) { return false; } // Safely extract the ProductId string from the raw byte buffer, ensuring we don't // buffer overflow if the registry returns garbage data const auto* kv = reinterpret_cast(buffer); - const ULONG dataLen = kv->DataLength; - if (dataLen == 0 || dataLen >= sizeof(buffer)) return false; + const ULONG data_length = kv->DataLength; + if (data_length == 0 || data_length >= sizeof(buffer)) return false; - char productId[64] = { 0 }; - const size_t copyLen = (dataLen < (sizeof(productId) - 1)) ? dataLen : (sizeof(productId) - 1); - memcpy(productId, kv->Data, copyLen); - productId[copyLen] = '\0'; + char product_id[64] = { 0 }; + const size_t copyLen = (data_length < (sizeof(product_id) - 1)) ? data_length : (sizeof(product_id) - 1); + memcpy(product_id, kv->Data, copyLen); + product_id[copyLen] = '\0'; // A list of known "dirty" Product IDs associated with public malware analysis sandboxes - struct TargetPattern { + struct target_pattern { const char* product_id; const char* brand; }; - constexpr TargetPattern targets[] = { + constexpr target_pattern targets[] = { {"55274-640-2673064-23950", brands::JOEBOX}, {"76487-644-3177037-23510", brands::CWSANDBOX}, {"76487-337-8429955-22614", brands::ANUBIS} }; - constexpr size_t target_len = 21; + constexpr size_t target_length = 21; - if (strlen(productId) != target_len) return false; + if (strlen(product_id) != target_length) return false; // compare the current system's ProductId against the blacklist // if a match is found, we identify the specific sandbox environment and flag it for (const auto& target : targets) { - if (memcmp(productId, target.product_id, target_len) == 0) { + if (memcmp(product_id, target.product_id, target_length) == 0) { debug("GAMARUE: Detected ", target.product_id); return core::add(target.brand); } @@ -7847,7 +7841,7 @@ struct VM { bool rc = false; #if (x86_32 && !CLANG) - auto IsInsideVPC_exceptionFilter = [](PEXCEPTION_POINTERS ep) noexcept -> DWORD { + auto is_inside_vpc = [](PEXCEPTION_POINTERS ep) noexcept -> DWORD { PCONTEXT ctx = ep->ContextRecord; ctx->Ebx = static_cast(-1); // Not running VPC @@ -7880,7 +7874,7 @@ struct VM { pop eax } } - __except (IsInsideVPC_exceptionFilter(GetExceptionInformation())) { + __except (is_inside_vpc(GetExceptionInformation())) { rc = false; } #endif @@ -7992,11 +7986,11 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pNtOpenMutant = reinterpret_cast(funcs[0]); - const auto pRtlInitUnicodeString = reinterpret_cast(funcs[1]); - const auto pNtClose = reinterpret_cast(funcs[2]); + const auto nt_open_mutant = reinterpret_cast(funcs[0]); + const auto rtl_init_unicode_string = reinterpret_cast(funcs[1]); + const auto nt_close = reinterpret_cast(funcs[2]); - if (!pNtOpenMutant || !pRtlInitUnicodeString || !pNtClose) { + if (!nt_open_mutant || !rtl_init_unicode_string || !nt_close) { return false; } @@ -8024,7 +8018,7 @@ struct VM { if (*path == L'\0') continue; UNICODE_STRING u_name; - pRtlInitUnicodeString(&u_name, path); + rtl_init_unicode_string(&u_name, path); OBJECT_ATTRIBUTES obj_attr; memset(&obj_attr, 0, sizeof(obj_attr)); @@ -8033,10 +8027,10 @@ struct VM { obj_attr.Attributes = OBJ_CASE_INSENSITIVE; HANDLE h_mutant = nullptr; - const NTSTATUS st = pNtOpenMutant(&h_mutant, MUTANT_QUERY_STATE, &obj_attr); + const NTSTATUS st = nt_open_mutant(&h_mutant, MUTANT_QUERY_STATE, &obj_attr); if (NT_SUCCESS(st)) { - if (h_mutant) pNtClose(h_mutant); + if (h_mutant) nt_close(h_mutant); return true; } } @@ -8080,34 +8074,34 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pNtOpenFile = reinterpret_cast(funcs[0]); - const auto pRtlInitUnicodeString = reinterpret_cast(funcs[1]); - const auto pNtClose = reinterpret_cast(funcs[2]); + const auto nt_open_file = reinterpret_cast(funcs[0]); + const auto rtl_init_unicode_string = reinterpret_cast(funcs[1]); + const auto nt_close = reinterpret_cast(funcs[2]); - if (!pNtOpenFile || !pRtlInitUnicodeString || !pNtClose) { + if (!nt_open_file || !rtl_init_unicode_string || !nt_close) { return false; } - const wchar_t* nativePath = L"\\??\\C:\\Cuckoo"; - UNICODE_STRING uPath; - pRtlInitUnicodeString(&uPath, nativePath); + const wchar_t* native_path = L"\\??\\C:\\Cuckoo"; + UNICODE_STRING path; + rtl_init_unicode_string(&path, native_path); - OBJECT_ATTRIBUTES objAttr; - ZeroMemory(&objAttr, sizeof(objAttr)); - objAttr.Length = sizeof(objAttr); - objAttr.ObjectName = &uPath; - objAttr.Attributes = OBJ_CASE_INSENSITIVE; + OBJECT_ATTRIBUTES object_attributes; + ZeroMemory(&object_attributes, sizeof(object_attributes)); + object_attributes.Length = sizeof(object_attributes); + object_attributes.ObjectName = &path; + object_attributes.Attributes = OBJ_CASE_INSENSITIVE; IO_STATUS_BLOCK iosb; HANDLE hFile = nullptr; - constexpr ACCESS_MASK desiredAccess = FILE_READ_ATTRIBUTES; - constexpr ULONG shareAccess = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE; - constexpr ULONG openOptions = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT | FILE_DIRECTORY_FILE; + constexpr ACCESS_MASK desired_access = FILE_READ_ATTRIBUTES; + constexpr ULONG share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE; + constexpr ULONG open_options = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT | FILE_DIRECTORY_FILE; - const NTSTATUS st = pNtOpenFile(&hFile, desiredAccess, &objAttr, &iosb, shareAccess, openOptions); + const NTSTATUS st = nt_open_file(&hFile, desired_access, &object_attributes, &iosb, share_access, open_options); if (NT_SUCCESS(st)) { - if (hFile) pNtClose(hFile); + if (hFile) nt_close(hFile); return core::add(brands::CUCKOO); } @@ -8136,34 +8130,34 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pNtOpenFile = reinterpret_cast(funcs[0]); - const auto pRtlInitUnicodeString = reinterpret_cast(funcs[1]); - const auto pNtClose = reinterpret_cast(funcs[2]); + const auto nt_open_file = reinterpret_cast(funcs[0]); + const auto rtl_init_unicode_string = reinterpret_cast(funcs[1]); + const auto nt_close = reinterpret_cast(funcs[2]); - if (!pNtOpenFile || !pRtlInitUnicodeString || !pNtClose) { + if (!nt_open_file || !rtl_init_unicode_string || !nt_close) { return false; } - const wchar_t* pipePath = L"\\??\\pipe\\cuckoo"; - UNICODE_STRING uPipe; - pRtlInitUnicodeString(&uPipe, pipePath); + const wchar_t* pipe_path = L"\\??\\pipe\\cuckoo"; + UNICODE_STRING pipe; + rtl_init_unicode_string(&pipe, pipe_path); - OBJECT_ATTRIBUTES objAttr; - ZeroMemory(&objAttr, sizeof(objAttr)); - objAttr.Length = sizeof(objAttr); - objAttr.ObjectName = &uPipe; - objAttr.Attributes = OBJ_CASE_INSENSITIVE; + OBJECT_ATTRIBUTES object_attributes; + ZeroMemory(&object_attributes, sizeof(object_attributes)); + object_attributes.Length = sizeof(object_attributes); + object_attributes.ObjectName = &pipe; + object_attributes.Attributes = OBJ_CASE_INSENSITIVE; IO_STATUS_BLOCK iosb; - HANDLE hPipe = nullptr; + HANDLE h_pipe = nullptr; - constexpr ACCESS_MASK desiredAccess = FILE_READ_DATA | FILE_READ_ATTRIBUTES; - constexpr ULONG shareAccess = 0; - constexpr ULONG openOptions = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT; + constexpr ACCESS_MASK desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES; + constexpr ULONG share_access = 0; + constexpr ULONG open_options = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT; - const NTSTATUS st = pNtOpenFile(&hPipe, desiredAccess, &objAttr, &iosb, shareAccess, openOptions); + const NTSTATUS st = nt_open_file(&h_pipe, desired_access, &object_attributes, &iosb, share_access, open_options); if (NT_SUCCESS(st)) { - if (hPipe) pNtClose(hPipe); + if (h_pipe) nt_close(h_pipe); return core::add(brands::CUCKOO); } @@ -8189,16 +8183,16 @@ struct VM { if (bpp != 32 || logpix < 90 || logpix > 200) return true; - UINT32 pathCount = 0, modeCount = 0; + UINT32 path_count = 0, mode_count = 0; if (QueryDisplayConfig(QDC_ONLY_ACTIVE_PATHS, // win7 and later - &pathCount, nullptr, - &modeCount, nullptr, + &path_count, nullptr, + &mode_count, nullptr, nullptr) != ERROR_SUCCESS) return false; - if ((pathCount <= 1) || (pathCount != modeCount)) { - debug("DISPLAY: Path count: ", pathCount); - debug("DISPLAY: Mode count: ", modeCount); + if ((path_count <= 1) || (path_count != mode_count)) { + debug("DISPLAY: Path count: ", path_count); + debug("DISPLAY: Mode count: ", mode_count); return true; } @@ -8264,7 +8258,7 @@ struct VM { ); using NtFreeVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE ProcessHandle, PVOID* BaseAddress, PSIZE_T RegionSize, ULONG FreeType); - constexpr ULONG SystemModuleInformation = 11; + constexpr ULONG system_module_information = 11; const HMODULE ntdll = util::get_ntdll(); if (!ntdll) return false; @@ -8272,38 +8266,38 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto ntQuerySystemInformation = reinterpret_cast(funcs[0]); - const auto ntAllocateVirtualMemory = reinterpret_cast(funcs[1]); - const auto ntFreeVirtualMemory = reinterpret_cast(funcs[2]); + const auto nt_query_system_information = reinterpret_cast(funcs[0]); + const auto nt_allocate_virtual_memory = reinterpret_cast(funcs[1]); + const auto nt_free_virtual_memory = reinterpret_cast(funcs[2]); - if (ntQuerySystemInformation == nullptr || ntAllocateVirtualMemory == nullptr || ntFreeVirtualMemory == nullptr) + if (nt_query_system_information == nullptr || nt_allocate_virtual_memory == nullptr || nt_free_virtual_memory == nullptr) return false; - ULONG ulSize = 0; - NTSTATUS status = ntQuerySystemInformation(SystemModuleInformation, nullptr, 0, &ulSize); + ULONG ul_size = 0; + NTSTATUS status = nt_query_system_information(system_module_information, nullptr, 0, &ul_size); if (status != ((NTSTATUS)0xC0000004L)) return false; - const HANDLE hCurrentProcess = reinterpret_cast(-1LL); - PVOID allocatedMemory = nullptr; - SIZE_T regionSize = ulSize; - ntAllocateVirtualMemory(hCurrentProcess, &allocatedMemory, 0, ®ionSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + const HANDLE current_process = reinterpret_cast(-1LL); + PVOID allocated_memory = nullptr; + SIZE_T region_size = ul_size; + nt_allocate_virtual_memory(current_process, &allocated_memory, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); - const auto pSystemModuleInfoEx = reinterpret_cast(allocatedMemory); - status = ntQuerySystemInformation(SystemModuleInformation, pSystemModuleInfoEx, ulSize, &ulSize); + const auto system_module_info_ex = reinterpret_cast(allocated_memory); + status = nt_query_system_information(system_module_information, system_module_info_ex, ul_size, &ul_size); if (!(((NTSTATUS)(status)) >= 0)) { - ntFreeVirtualMemory(hCurrentProcess, &allocatedMemory, ®ionSize, MEM_RELEASE); + nt_free_virtual_memory(current_process, &allocated_memory, ®ion_size, MEM_RELEASE); return false; } - for (ULONG i = 0; i < pSystemModuleInfoEx->NumberOfModules; ++i) { - const char* driverPath = reinterpret_cast(pSystemModuleInfoEx->Module[i].ImageName); + for (ULONG i = 0; i < system_module_info_ex->NumberOfModules; ++i) { + const char* driverPath = reinterpret_cast(system_module_info_ex->Module[i].ImageName); if ( strstr(driverPath, "VBoxGuest") || // only installed after vbox guest additions strstr(driverPath, "VBoxMouse") || strstr(driverPath, "VBoxSF") ) { debug("DRIVERS: Detected VBox driver: ", driverPath); - ntFreeVirtualMemory(hCurrentProcess, &allocatedMemory, ®ionSize, MEM_RELEASE); + nt_free_virtual_memory(current_process, &allocated_memory, ®ion_size, MEM_RELEASE); return core::add(brands::VBOX); } @@ -8313,12 +8307,12 @@ struct VM { strstr(driverPath, "vmmemctl") ) { debug("DRIVERS: Detected VMware driver: ", driverPath); - ntFreeVirtualMemory(hCurrentProcess, &allocatedMemory, ®ionSize, MEM_RELEASE); + nt_free_virtual_memory(current_process, &allocated_memory, ®ion_size, MEM_RELEASE); return core::add(brands::VMWARE); } } - ntFreeVirtualMemory(hCurrentProcess, &allocatedMemory, ®ionSize, MEM_RELEASE); + nt_free_virtual_memory(current_process, &allocated_memory, ®ion_size, MEM_RELEASE); return false; } @@ -8339,7 +8333,7 @@ struct VM { bool result = false; constexpr u8 MAX_PHYSICAL_DRIVES = 4; constexpr SIZE_T MAX_DESCRIPTOR_SIZE = 64 * 1024; - u8 successfulOpens = 0; + u8 successful_opens = 0; // Helper to detect QEMU instances based on default hard drive serial patterns // QEMU drives often start with "QM000" followed by digits @@ -8402,15 +8396,15 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pRtlInitUnicodeString = reinterpret_cast(funcs[0]); - const auto pNtOpenFile = reinterpret_cast(funcs[1]); - const auto pNtDeviceIoControlFile = reinterpret_cast(funcs[2]); - const auto pNtAllocateVirtualMemory = reinterpret_cast(funcs[3]); - const auto pNtFreeVirtualMemory = reinterpret_cast(funcs[4]); - const auto pNtClose = reinterpret_cast(funcs[6]); + const auto rtl_init_unicode_string = reinterpret_cast(funcs[0]); + const auto nt_open_file = reinterpret_cast(funcs[1]); + const auto nt_device_io_control_file = reinterpret_cast(funcs[2]); + const auto nt_allocate_virtual_memory = reinterpret_cast(funcs[3]); + const auto nt_free_virtual_memory = reinterpret_cast(funcs[4]); + const auto nt_close = reinterpret_cast(funcs[6]); - if (!pRtlInitUnicodeString || !pNtOpenFile || !pNtDeviceIoControlFile || - !pNtAllocateVirtualMemory || !pNtFreeVirtualMemory || !pNtClose) { + if (!rtl_init_unicode_string || !nt_open_file || !nt_device_io_control_file || + !nt_allocate_virtual_memory || !nt_free_virtual_memory || !nt_close) { return result; } @@ -8420,29 +8414,29 @@ struct VM { wchar_t path[32]; swprintf_s(path, L"\\??\\PhysicalDrive%u", drive); - UNICODE_STRING uPath; - pRtlInitUnicodeString(&uPath, path); + UNICODE_STRING unicode_path; + rtl_init_unicode_string(&unicode_path, path); - OBJECT_ATTRIBUTES objAttr; - RtlZeroMemory(&objAttr, sizeof(objAttr)); - objAttr.Length = sizeof(objAttr); - objAttr.ObjectName = &uPath; - objAttr.Attributes = OBJ_CASE_INSENSITIVE; - objAttr.RootDirectory = nullptr; + OBJECT_ATTRIBUTES object_attributes; + RtlZeroMemory(&object_attributes, sizeof(object_attributes)); + object_attributes.Length = sizeof(object_attributes); + object_attributes.ObjectName = &unicode_path; + object_attributes.Attributes = OBJ_CASE_INSENSITIVE; + object_attributes.RootDirectory = nullptr; IO_STATUS_BLOCK iosb; - HANDLE hDevice = nullptr; + HANDLE device = nullptr; - constexpr ACCESS_MASK desiredAccess = SYNCHRONIZE | FILE_READ_ATTRIBUTES; - constexpr ULONG shareAccess = FILE_SHARE_READ | FILE_SHARE_WRITE; - constexpr ULONG openOptions = FILE_NON_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT; + constexpr ACCESS_MASK desired_access = SYNCHRONIZE | FILE_READ_ATTRIBUTES; + constexpr ULONG share_access = FILE_SHARE_READ | FILE_SHARE_WRITE; + constexpr ULONG open_options = FILE_NON_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT; // Attempt to open the physical drive directly using Native API - NTSTATUS st = pNtOpenFile(&hDevice, desiredAccess, &objAttr, &iosb, shareAccess, openOptions); - if (!NT_SUCCESS(st) || hDevice == nullptr) { + NTSTATUS st = nt_open_file(&device, desired_access, &object_attributes, &iosb, share_access, open_options); + if (!NT_SUCCESS(st) || device == nullptr) { continue; } - ++successfulOpens; + ++successful_opens; // stack buffer attempt // We first try to read the storage properties into a small stack buffer to avoid heap @@ -8455,104 +8449,104 @@ struct VM { const ULONG ioctl = IOCTL_STORAGE_QUERY_PROPERTY; - st = pNtDeviceIoControlFile(hDevice, nullptr, nullptr, nullptr, &iosb, + st = nt_device_io_control_file(device, nullptr, nullptr, nullptr, &iosb, ioctl, &query, sizeof(query), stackBuf, sizeof(stackBuf)); - BYTE* allocatedBuffer = nullptr; - SIZE_T allocatedSize = 0; - const HANDLE hCurrentProcess = reinterpret_cast(-1LL); + BYTE* allocated_buffer = nullptr; + SIZE_T allocated_size = 0; + const HANDLE current_process = reinterpret_cast(-1LL); // If the stack buffer was too small (NtDeviceIoControlFile failed), we fall back // to allocating memory dynamically using NtAllocateVirtualMemory if (!NT_SUCCESS(st)) { - DWORD reportedSize = 0; + DWORD reported_size = 0; if (descriptor && descriptor->Size > 0) { - reportedSize = descriptor->Size; + reported_size = descriptor->Size; } // This branch just ensures the requested size is reasonable before allocating - if (reportedSize > 0 && reportedSize < static_cast(MAX_DESCRIPTOR_SIZE) && reportedSize >= sizeof(STORAGE_DEVICE_DESCRIPTOR)) { - allocatedSize = static_cast(reportedSize); - PVOID allocBase = nullptr; - SIZE_T regionSize = allocatedSize; - st = pNtAllocateVirtualMemory(hCurrentProcess, &allocBase, 0, ®ionSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); - if (!NT_SUCCESS(st) || allocBase == nullptr) { - pNtClose(hDevice); + if (reported_size > 0 && reported_size < static_cast(MAX_DESCRIPTOR_SIZE) && reported_size >= sizeof(STORAGE_DEVICE_DESCRIPTOR)) { + allocated_size = static_cast(reported_size); + PVOID allocation_base = nullptr; + SIZE_T region_size = allocated_size; + st = nt_allocate_virtual_memory(current_process, &allocation_base, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + if (!NT_SUCCESS(st) || allocation_base == nullptr) { + nt_close(device); continue; } - allocatedBuffer = reinterpret_cast(allocBase); + allocated_buffer = reinterpret_cast(allocation_base); // Retry the query with the larger allocated buffer - st = pNtDeviceIoControlFile(hDevice, nullptr, nullptr, nullptr, &iosb, + st = nt_device_io_control_file(device, nullptr, nullptr, nullptr, &iosb, ioctl, &query, sizeof(query), - allocatedBuffer, static_cast(allocatedSize)); + allocated_buffer, static_cast(allocated_size)); if (!NT_SUCCESS(st)) { - PVOID freeBase = reinterpret_cast(allocatedBuffer); - SIZE_T freeSize = allocatedSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); - pNtClose(hDevice); + PVOID free_base = reinterpret_cast(allocated_buffer); + SIZE_T free_size = allocated_size; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); + nt_close(device); continue; } - descriptor = reinterpret_cast(allocatedBuffer); + descriptor = reinterpret_cast(allocated_buffer); } else { - pNtClose(hDevice); + nt_close(device); continue; } } // This part is just to validate the structure size returned by the driver to prevent out-of-bounds reads { - const DWORD reportedSize = descriptor->Size; - if (reportedSize < sizeof(STORAGE_DEVICE_DESCRIPTOR) || static_cast(reportedSize) > MAX_DESCRIPTOR_SIZE) { - if (allocatedBuffer) { - PVOID freeBase = reinterpret_cast(allocatedBuffer); - SIZE_T freeSize = allocatedSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); - allocatedBuffer = nullptr; + const DWORD reported_size = descriptor->Size; + if (reported_size < sizeof(STORAGE_DEVICE_DESCRIPTOR) || static_cast(reported_size) > MAX_DESCRIPTOR_SIZE) { + if (allocated_buffer) { + PVOID free_base = reinterpret_cast(allocated_buffer); + SIZE_T free_size = allocated_size; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); + allocated_buffer = nullptr; } - pNtClose(hDevice); + nt_close(device); continue; } } // Serial number string within the descriptor structure - const u32 serialOffset = descriptor->SerialNumberOffset; - if (serialOffset > 0 && serialOffset < descriptor->Size) { - const char* serial = reinterpret_cast(descriptor) + serialOffset; - const size_t maxAvail = static_cast(descriptor->Size) - static_cast(serialOffset); - const size_t serialLen = strnlen(serial, maxAvail); + const u32 serial_offset = descriptor->SerialNumberOffset; + if (serial_offset > 0 && serial_offset < descriptor->Size) { + const char* serial = reinterpret_cast(descriptor) + serial_offset; + const size_t max_avail = static_cast(descriptor->Size) - static_cast(serial_offset); + const size_t serialLen = strnlen(serial, max_avail); debug("DISK_SERIAL: ", serial); // Check the retrieved serial number against known VM artifacts if (is_qemu_serial(serial) || is_vbox_serial(serial, serialLen)) { - if (allocatedBuffer) { - PVOID freeBase = reinterpret_cast(allocatedBuffer); - SIZE_T freeSize = allocatedSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); - allocatedBuffer = nullptr; + if (allocated_buffer) { + PVOID free_base = reinterpret_cast(allocated_buffer); + SIZE_T free_size = allocated_size; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); + allocated_buffer = nullptr; } - pNtClose(hDevice); + nt_close(device); return true; } } // Cleanup for the current iteration if no VM was detected on this drive - if (allocatedBuffer) { - PVOID freeBase = reinterpret_cast(allocatedBuffer); - SIZE_T freeSize = allocatedSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); - allocatedBuffer = nullptr; + if (allocated_buffer) { + PVOID free_base = reinterpret_cast(allocated_buffer); + SIZE_T free_size = allocated_size; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); + allocated_buffer = nullptr; } - pNtClose(hDevice); + nt_close(device); } // If we couldn't open any physical drives (not even read permissions) it's weird so we flag it. - if (successfulOpens == 0) { + if (successful_opens == 0) { debug("DISK_SERIAL: No physical drives detected"); return true; } @@ -8603,12 +8597,12 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pRtlInitUnicodeString = reinterpret_cast(funcs[0]); - const auto pNtOpenKey = reinterpret_cast(funcs[1]); - const auto pNtQueryKey = reinterpret_cast(funcs[2]); - const auto pNtClose = reinterpret_cast(funcs[3]); + const auto rtl_init_unicode_string = reinterpret_cast(funcs[0]); + const auto nt_open_key = reinterpret_cast(funcs[1]); + const auto nt_query_key = reinterpret_cast(funcs[2]); + const auto nt_close = reinterpret_cast(funcs[3]); - if (!pRtlInitUnicodeString || !pNtOpenKey || !pNtQueryKey || !pNtClose) { + if (!rtl_init_unicode_string || !nt_open_key || !nt_query_key || !nt_close) { return false; } @@ -8630,39 +8624,39 @@ struct VM { GUID_IVSHMEM_IFACE.Data4[6], GUID_IVSHMEM_IFACE.Data4[7] ); - UNICODE_STRING uPath; - pRtlInitUnicodeString(&uPath, interface_class_path); + UNICODE_STRING unicode_path; + rtl_init_unicode_string(&unicode_path, interface_class_path); - OBJECT_ATTRIBUTES objAttr; - RtlZeroMemory(&objAttr, sizeof(objAttr)); - objAttr.Length = sizeof(objAttr); - objAttr.ObjectName = &uPath; - objAttr.Attributes = OBJ_CASE_INSENSITIVE; + OBJECT_ATTRIBUTES object_attributes; + RtlZeroMemory(&object_attributes, sizeof(object_attributes)); + object_attributes.Length = sizeof(object_attributes); + object_attributes.ObjectName = &unicode_path; + object_attributes.Attributes = OBJ_CASE_INSENSITIVE; - HANDLE hKey = nullptr; - NTSTATUS st = pNtOpenKey(&hKey, KEY_READ, &objAttr); - if (!NT_SUCCESS(st) || hKey == nullptr) { + HANDLE key = nullptr; + NTSTATUS st = nt_open_key(&key, KEY_READ, &object_attributes); + if (!NT_SUCCESS(st) || key == nullptr) { return false; } // We query the "Full Information" of the key to get the count of subkeys // The existence of the class key alone isn't enough cuz Windows might register the class but have no devices // If SubKeys > 0, it means actual device instances (for ex. PCI devices) are registered under this interface - BYTE infoBuf[512] = {}; - ULONG returnedLen = 0; - st = pNtQueryKey(hKey, KeyFullInformation, infoBuf, sizeof(infoBuf), &returnedLen); + BYTE info_buffer[512] = {}; + ULONG returned_len = 0; + st = nt_query_key(key, KeyFullInformation, info_buffer, sizeof(info_buffer), &returned_len); DWORD number_of_subkeys = 0; - if (NT_SUCCESS(st) && returnedLen >= sizeof(KEY_FULL_INFORMATION)) { - auto* kfi = reinterpret_cast(infoBuf); + if (NT_SUCCESS(st) && returned_len >= sizeof(KEY_FULL_INFORMATION)) { + auto* kfi = reinterpret_cast(info_buffer); number_of_subkeys = static_cast(kfi->SubKeys); } else { - pNtClose(hKey); + nt_close(key); return false; } - pNtClose(hKey); + nt_close(key); return number_of_subkeys > 0; } @@ -8699,10 +8693,10 @@ struct VM { return true; } - const int colorMgmtCaps = GetDeviceCaps(hdc, COLORMGMTCAPS); + const int color_caps = GetDeviceCaps(hdc, COLORMGMTCAPS); ReleaseDC(nullptr, hdc); - return !(colorMgmtCaps & CM_GAMMA_RAMP) || colorMgmtCaps == 0; + return !(color_caps & CM_GAMMA_RAMP) || color_caps == 0; } @@ -8719,11 +8713,11 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pRtlInitUnicodeString = reinterpret_cast(funcs[0]); - const auto pNtOpenFile = reinterpret_cast(funcs[1]); - const auto pNtClose = reinterpret_cast(funcs[2]); + const auto rtl_init_unicode_string = reinterpret_cast(funcs[0]); + const auto nt_open_file = reinterpret_cast(funcs[1]); + const auto nt_close = reinterpret_cast(funcs[2]); - if (!pRtlInitUnicodeString || !pNtOpenFile || !pNtClose) { + if (!rtl_init_unicode_string || !nt_open_file || !nt_close) { return false; } @@ -8751,7 +8745,7 @@ struct VM { constexpr ULONG share_access = FILE_SHARE_READ; constexpr ULONG open_options = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT; - const NTSTATUS st = pNtOpenFile(&h_file, desired_access, &obj_attr, &iosb, share_access, open_options); + const NTSTATUS st = nt_open_file(&h_file, desired_access, &obj_attr, &iosb, share_access, open_options); if (NT_SUCCESS(st)) { return h_file; @@ -8784,7 +8778,7 @@ struct VM { for (size_t i = 0; i < 4; ++i) { if (handles[i] != INVALID_HANDLE_VALUE) { - pNtClose(handles[i]); + nt_close(handles[i]); } } @@ -8794,13 +8788,13 @@ struct VM { } if (handles[4] != INVALID_HANDLE_VALUE) { - pNtClose(handles[4]); + nt_close(handles[4]); debug("DEVICE_HANDLES: Detected VMware related device (HGFS)"); return core::add(brands::VMWARE); } if (handles[5] != INVALID_HANDLE_VALUE) { - pNtClose(handles[5]); + nt_close(handles[5]); debug("DEVICE_HANDLES: Detected Cuckoo related device (pipe)"); return core::add(brands::CUCKOO); } @@ -8882,14 +8876,14 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const FN_NtQuerySystemInformation pNtQuerySystemInformation = reinterpret_cast(funcs[0]); - if (pNtQuerySystemInformation) { - SYSTEM_HYPERVISOR_DETAIL_INFORMATION hvInfo = { {} }; + const FN_NtQuerySystemInformation nt_query_system_information = reinterpret_cast(funcs[0]); + if (nt_query_system_information) { + SYSTEM_HYPERVISOR_DETAIL_INFORMATION hypervisor_information = { {} }; // Request class 0x9F (SystemHypervisorDetailInformation) // This asks the OS kernel to fill the structure with information about the // hypervisor layer it is running on top of - const NTSTATUS status = pNtQuerySystemInformation(static_cast(0x9F), &hvInfo, sizeof(hvInfo), nullptr); + const NTSTATUS status = nt_query_system_information(static_cast(0x9F), &hypervisor_information, sizeof(hypervisor_information), nullptr); if (status != 0) { return false; @@ -8897,7 +8891,7 @@ struct VM { // If Data[0] is non-zero, it means the kernel has successfully communicated // with a hypervisor and retrieved a vendor signature like "Micr" for Microsoft - if (hvInfo.HvVendorAndMaxFunction.Data[0] != 0) { + if (hypervisor_information.HvVendorAndMaxFunction.Data[0] != 0) { return true; } } @@ -8946,23 +8940,23 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto NtOpenKey = reinterpret_cast(funcs[0]); - const auto NtQueryObject = reinterpret_cast(funcs[1]); - const auto pNtClose = reinterpret_cast(funcs[2]); + const auto nt_open_key = reinterpret_cast(funcs[0]); + const auto nt_query_object = reinterpret_cast(funcs[1]); + const auto nt_close = reinterpret_cast(funcs[2]); - if (!NtOpenKey || !NtQueryObject || !pNtClose) + if (!nt_open_key || !nt_query_object || !nt_close) return false; // Prepare to open the root USER registry hive - UNICODE_STRING keyPath{}; - keyPath.Buffer = const_cast(L"\\REGISTRY\\USER"); - keyPath.Length = static_cast(wcslen(keyPath.Buffer) * sizeof(WCHAR)); - keyPath.MaximumLength = keyPath.Length + sizeof(WCHAR); + UNICODE_STRING key_path{}; + key_path.Buffer = const_cast(L"\\REGISTRY\\USER"); + key_path.Length = static_cast(wcslen(key_path.Buffer) * sizeof(WCHAR)); + key_path.MaximumLength = key_path.Length + sizeof(WCHAR); - OBJECT_ATTRIBUTES objAttr = { + OBJECT_ATTRIBUTES object_attributes = { sizeof(OBJECT_ATTRIBUTES), nullptr, - &keyPath, + &key_path, 0x00000040L, // OBJ_CASE_INSENSITIVE nullptr, nullptr @@ -8970,8 +8964,8 @@ struct VM { // Attempt to open the key. If we are sandboxed, this open call often succeeds, // but the underlying handle will point to a virtualized container, not the real OS path - HANDLE hKey = nullptr; - NTSTATUS status = NtOpenKey(&hKey, KEY_READ, reinterpret_cast(&objAttr)); + HANDLE key = nullptr; + NTSTATUS status = nt_open_key(&key, KEY_READ, reinterpret_cast(&object_attributes)); if (!(((NTSTATUS)(status)) >= 0)) return false; @@ -8980,22 +8974,22 @@ struct VM { // While the API pretends we opened "\REGISTRY\USER", the handle might actually point to // something like "\Device\HarddiskVolume2\Sandbox\User\DefaultBox\RegHive" alignas(16) BYTE buffer[1024]{}; - ULONG returnedLength = 0; - status = NtQueryObject(hKey, ObjectNameInformation, buffer, sizeof(buffer), &returnedLength); - pNtClose(hKey); + ULONG returned_length = 0; + status = nt_query_object(key, ObjectNameInformation, buffer, sizeof(buffer), &returned_length); + nt_close(key); if (!(((NTSTATUS)(status)) >= 0)) return false; - const auto pObjectName = reinterpret_cast(buffer); + const auto object_name = reinterpret_cast(buffer); - UNICODE_STRING expectedName{}; - expectedName.Buffer = const_cast(L"\\REGISTRY\\USER"); - expectedName.Length = static_cast(wcslen(expectedName.Buffer) * sizeof(WCHAR)); + UNICODE_STRING expected_name{}; + expected_name.Buffer = const_cast(L"\\REGISTRY\\USER"); + expected_name.Length = static_cast(wcslen(expected_name.Buffer) * sizeof(WCHAR)); // Compare the requested name vs the actual kernel object name // If they don't match, we have been redirected, confirming the presence of Sandboxie - const bool mismatch = (pObjectName->Name.Length != expectedName.Length) || - (memcmp(pObjectName->Name.Buffer, expectedName.Buffer, expectedName.Length) != 0); + const bool mismatch = (object_name->Name.Length != expected_name.Length) || + (memcmp(object_name->Name.Buffer, expected_name.Buffer, expected_name.Length) != 0); return mismatch ? core::add(brands::SANDBOXIE) : false; } @@ -9043,12 +9037,12 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pRtlInitUnicodeString = reinterpret_cast(funcs[0]); - const auto pNtOpenKey = reinterpret_cast(funcs[1]); - const auto pNtQueryKey = reinterpret_cast(funcs[2]); - const auto pNtClose = reinterpret_cast(funcs[3]); + const auto rtl_init_unicode_string = reinterpret_cast(funcs[0]); + const auto nt_open_key = reinterpret_cast(funcs[1]); + const auto nt_query_key = reinterpret_cast(funcs[2]); + const auto nt_close = reinterpret_cast(funcs[3]); - if (!pRtlInitUnicodeString || !pNtOpenKey || !pNtQueryKey || !pNtClose) { + if (!rtl_init_unicode_string || !nt_open_key || !nt_query_key || !nt_close) { return false; } @@ -9056,56 +9050,56 @@ struct VM { // Most legitimate user PCs have speakers or headphones (audio endpoints) // Automated sandboxes and headless servers often have no audio devices configured // We target the MMDevices\Audio\Render key where these endpoints are registered - const wchar_t* nativePath = L"\\Registry\\Machine\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\MMDevices\\Audio\\Render"; + const wchar_t* native_path = L"\\Registry\\Machine\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\MMDevices\\Audio\\Render"; - UNICODE_STRING uPath; - pRtlInitUnicodeString(&uPath, nativePath); + UNICODE_STRING unicode_path; + rtl_init_unicode_string(&unicode_path, native_path); - OBJECT_ATTRIBUTES objAttr; - RtlZeroMemory(&objAttr, sizeof(objAttr)); - objAttr.Length = sizeof(objAttr); - objAttr.ObjectName = &uPath; - objAttr.Attributes = OBJ_CASE_INSENSITIVE; + OBJECT_ATTRIBUTES object_attributes; + RtlZeroMemory(&object_attributes, sizeof(object_attributes)); + object_attributes.Length = sizeof(object_attributes); + object_attributes.ObjectName = &unicode_path; + object_attributes.Attributes = OBJ_CASE_INSENSITIVE; - HANDLE hKey = nullptr; - const ACCESS_MASK desiredAccess = KEY_READ; + HANDLE key = nullptr; + const ACCESS_MASK desired_access = KEY_READ; - NTSTATUS st = pNtOpenKey(&hKey, desiredAccess, &objAttr); - if (!NT_SUCCESS(st) || hKey == nullptr) { + NTSTATUS st = nt_open_key(&key, desired_access, &object_attributes); + if (!NT_SUCCESS(st) || key == nullptr) { return false; } constexpr KEY_INFORMATION_CLASS InfoClass = KeyFullInformation; - std::vector infoBuf(512); - ULONG returnedLen = 0; + std::vector info_buffer(512); + ULONG returned_len = 0; // Query the key information. If the buffer is too small (STATUS_BUFFER_TOO_SMALL), // resize it to the exact length required by the kernel and try again - st = pNtQueryKey(hKey, InfoClass, infoBuf.data(), static_cast(infoBuf.size()), &returnedLen); + st = nt_query_key(key, InfoClass, info_buffer.data(), static_cast(info_buffer.size()), &returned_len); - if (!NT_SUCCESS(st) && returnedLen > infoBuf.size()) { - infoBuf.resize(returnedLen); - st = pNtQueryKey(hKey, InfoClass, infoBuf.data(), static_cast(infoBuf.size()), &returnedLen); + if (!NT_SUCCESS(st) && returned_len > info_buffer.size()) { + info_buffer.resize(returned_len); + st = nt_query_key(key, InfoClass, info_buffer.data(), static_cast(info_buffer.size()), &returned_len); } - bool hasValues = false; - if (NT_SUCCESS(st) && returnedLen >= sizeof(KEY_FULL_INFORMATION)) { - auto* kfi = reinterpret_cast(infoBuf.data()); + bool has_values = false; + if (NT_SUCCESS(st) && returned_len >= sizeof(KEY_FULL_INFORMATION)) { + const auto* kfi = reinterpret_cast(info_buffer.data()); // Check if the registry key has any values associated with it // If 'Values' is 0, the audio system is likely uninitialized or barren, // which strongly suggests a virtualized/sandbox environment - const DWORD valueCount = static_cast(kfi->Values); // values, not subkeys - hasValues = (valueCount > 0); + const DWORD value_count = static_cast(kfi->Values); // values, not subkeys + has_values = (value_count > 0); } else { - pNtClose(hKey); + nt_close(key); return false; } - pNtClose(hKey); + nt_close(key); - return hasValues; + return has_values; } @@ -9120,15 +9114,15 @@ struct VM { }; // enumerate all DISPLAY devices - const HDEVINFO hDevInfo = SetupDiGetClassDevsW(&GUID_DEVCLASS_DISPLAY, nullptr, nullptr, DIGCF_PRESENT); - if (hDevInfo == INVALID_HANDLE_VALUE) { + const HDEVINFO handle_dev_info = SetupDiGetClassDevsW(&GUID_DEVCLASS_DISPLAY, nullptr, nullptr, DIGCF_PRESENT); + if (handle_dev_info == INVALID_HANDLE_VALUE) { debug("ACPI_SIGNATURE: No display device detected"); return true; } - SP_DEVINFO_DATA devInfo; - ZeroMemory(&devInfo, sizeof(devInfo)); - devInfo.cbSize = sizeof(devInfo); + SP_DEVINFO_DATA dev_info; + ZeroMemory(&dev_info, sizeof(dev_info)); + dev_info.cbSize = sizeof(dev_info); const DEVPROPKEY key = DEVPKEY_Device_LocationPaths; // baremetal tokens (case-sensitive to preserve handling against edge-cases) @@ -9148,16 +9142,16 @@ struct VM { return false; }; - for (DWORD idx = 0; SetupDiEnumDeviceInfo(hDevInfo, idx, &devInfo); ++idx) { - DEVPROPTYPE propType = 0; - DWORD requiredSize = 0; + for (DWORD idx = 0; SetupDiEnumDeviceInfo(handle_dev_info, idx, &dev_info); ++idx) { + DEVPROPTYPE prop_type = 0; + DWORD required_size = 0; // query required size (bytes) - SetupDiGetDevicePropertyW(hDevInfo, &devInfo, &key, &propType, nullptr, 0, &requiredSize, 0); - if (GetLastError() != ERROR_INSUFFICIENT_BUFFER || requiredSize == 0) { + SetupDiGetDevicePropertyW(handle_dev_info, &dev_info, &key, &prop_type, nullptr, 0, &required_size, 0); + if (GetLastError() != ERROR_INSUFFICIENT_BUFFER || required_size == 0) { if (GetLastError() == ERROR_NOT_FOUND) { debug("ACPI_SIGNATURE: No dedicated display/GPU detected"); - SetupDiDestroyDeviceInfoList(hDevInfo); + SetupDiDestroyDeviceInfoList(handle_dev_info); return false; } else { @@ -9166,16 +9160,16 @@ struct VM { } // fetch buffer (multi-sz) - std::vector buffer(requiredSize); - if (!SetupDiGetDevicePropertyW(hDevInfo, &devInfo, &key, &propType, - buffer.data(), requiredSize, &requiredSize, 0)) + std::vector buffer(required_size); + if (!SetupDiGetDevicePropertyW(handle_dev_info, &dev_info, &key, &prop_type, + buffer.data(), required_size, &required_size, 0)) { continue; } const wchar_t* ptr = reinterpret_cast(buffer.data()); // number of wchar_t slots in buffer - const size_t total_wchars = requiredSize / sizeof(wchar_t); + const size_t total_wchars = required_size / sizeof(wchar_t); const wchar_t* buf_end = ptr + (total_wchars ? total_wchars : 0); #ifdef __VMAWARE_DEBUG__ @@ -9184,8 +9178,8 @@ struct VM { } #endif - static const wchar_t acpiPrefix[] = L"#ACPI(S"; - static const wchar_t acpiParen[] = L"ACPI("; + static const wchar_t acpi_prefix[] = L"#ACPI(S"; + static const wchar_t acpi_paren[] = L"ACPI("; // First pass: QEMU-style "#ACPI(Sxx...)" and generic "ACPI(Sxx)" for (const wchar_t* p = ptr; p < buf_end && *p; p += (wcslen(p) + 1)) { @@ -9197,18 +9191,18 @@ struct VM { // search for "#ACPI(S" const wchar_t* search = p; while (true) { - const wchar_t* found = wcsstr(search, acpiPrefix); + const wchar_t* found = wcsstr(search, acpi_prefix); if (!found) break; // after "#ACPI(S" we expect two hex chars - const wchar_t* hexpos = found + wcslen(acpiPrefix); // first hex char + const wchar_t* hexpos = found + wcslen(acpi_prefix); // first hex char if (hexpos && hexpos[0] && hexpos[1]) { wchar_t b = hexpos[0]; wchar_t s = hexpos[1]; if (is_hex(b) && is_hex(s)) { const wchar_t after = hexpos[2]; // may be '_' or ')' if (after == L'_' || after == L')') { - SetupDiDestroyDeviceInfoList(hDevInfo); + SetupDiDestroyDeviceInfoList(handle_dev_info); return core::add(brands::QEMU); } } @@ -9219,12 +9213,12 @@ struct VM { // search for "ACPI(" then check for "S" + two hex digits search = p; while (true) { - const wchar_t* found = wcsstr(search, acpiParen); + const wchar_t* found = wcsstr(search, acpi_paren); if (!found) break; - const wchar_t* start = found + wcslen(acpiParen); // char after '(' + const wchar_t* start = found + wcslen(acpi_paren); // char after '(' if (start && start[0] && start[1] && start[2]) { if (start[0] == L'S' && is_hex(start[1]) && is_hex(start[2])) { - SetupDiDestroyDeviceInfoList(hDevInfo); + SetupDiDestroyDeviceInfoList(handle_dev_info); return core::add(brands::QEMU); } } @@ -9242,14 +9236,14 @@ struct VM { for (const wchar_t* sig : vm_signatures) { if (wcsstr(p, sig) != nullptr) { - SetupDiDestroyDeviceInfoList(hDevInfo); + SetupDiDestroyDeviceInfoList(handle_dev_info); return core::add(brands::HYPERV); } } } } - SetupDiDestroyDeviceInfoList(hDevInfo); + SetupDiDestroyDeviceInfoList(handle_dev_info); return false; } @@ -9260,7 +9254,7 @@ struct VM { * @implements VM::TRAP */ [[nodiscard]] static bool trap() { - bool hypervisorCaught = false; + bool hypervisor_caught = false; #if (x86_64) // when a single - step(TF) and hardware breakpoint(DR0) collide, Intel CPUs set both DR6.BS and DR6.B0 to report both events, which help make this detection trick // AMD CPUs prioritize the breakpoint, setting only its corresponding bit in DR6 and clearing the single-step bit, which is why this technique is not compatible with AMD @@ -9294,7 +9288,7 @@ struct VM { 0x4C, 0x89, 0xC3, // mov rbx, r8 (restore rbx from r8) - trap happens here 0xC3 // ret }; - SIZE_T trampSize = sizeof(trampoline); + SIZE_T trampoline_size = sizeof(trampoline); const HMODULE ntdll = util::get_ntdll(); if (!ntdll) return false; @@ -9320,84 +9314,84 @@ struct VM { using NtSetContextThread_t = NTSTATUS(__stdcall*)(HANDLE, PCONTEXT); // volatile ensures these are loaded from stack after SEH unwind when compiled with aggresive optimizations - NtAllocateVirtualMemory_t volatile pNtAllocateVirtualMemory = reinterpret_cast(funcs[0]); - NtProtectVirtualMemory_t volatile pNtProtectVirtualMemory = reinterpret_cast(funcs[1]); - NtFreeVirtualMemory_t volatile pNtFreeVirtualMemory = reinterpret_cast(funcs[2]); - NtFlushInstructionCache_t volatile pNtFlushInstructionCache = reinterpret_cast(funcs[3]); - NtClose_t volatile pNtClose = reinterpret_cast(funcs[4]); - NtGetContextThread_t volatile pNtGetContextThread = reinterpret_cast(funcs[5]); - NtSetContextThread_t volatile pNtSetContextThread = reinterpret_cast(funcs[6]); - - if (!pNtAllocateVirtualMemory || !pNtProtectVirtualMemory || !pNtFlushInstructionCache || - !pNtFreeVirtualMemory || !pNtGetContextThread || !pNtSetContextThread || !pNtClose) { + NtAllocateVirtualMemory_t volatile nt_allocate_virtual_memory = reinterpret_cast(funcs[0]); + NtProtectVirtualMemory_t volatile nt_protect_virtual_memory = reinterpret_cast(funcs[1]); + NtFreeVirtualMemory_t volatile nt_free_virtual_memory = reinterpret_cast(funcs[2]); + NtFlushInstructionCache_t volatile nt_flush_instruction_cache = reinterpret_cast(funcs[3]); + NtClose_t volatile nt_close = reinterpret_cast(funcs[4]); + NtGetContextThread_t volatile nt_get_context_thread = reinterpret_cast(funcs[5]); + NtSetContextThread_t volatile nt_set_context_thread = reinterpret_cast(funcs[6]); + + if (!nt_allocate_virtual_memory || !nt_protect_virtual_memory || !nt_flush_instruction_cache || + !nt_free_virtual_memory || !nt_get_context_thread || !nt_set_context_thread || !nt_close) { return false; } - PVOID execMem = nullptr; - SIZE_T regionSize = trampSize; - const HANDLE hCurrentProcess = reinterpret_cast(-1LL); - NTSTATUS st = pNtAllocateVirtualMemory(hCurrentProcess, &execMem, 0, ®ionSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); - if (!NT_SUCCESS(st) || !execMem) { + PVOID exec_mem = nullptr; + SIZE_T region_size = trampoline_size; + const HANDLE current_process = reinterpret_cast(-1LL); + NTSTATUS st = nt_allocate_virtual_memory(current_process, &exec_mem, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + if (!NT_SUCCESS(st) || !exec_mem) { return false; } - memcpy(execMem, trampoline, trampSize); + memcpy(exec_mem, trampoline, trampoline_size); { - PVOID tmpBase = execMem; - SIZE_T tmpSz = trampSize; + PVOID tmp_base = exec_mem; + SIZE_T tmp_sz = trampoline_size; ULONG oldProt = 0; - st = pNtProtectVirtualMemory(hCurrentProcess, &tmpBase, &tmpSz, PAGE_EXECUTE_READ, &oldProt); + st = nt_protect_virtual_memory(current_process, &tmp_base, &tmp_sz, PAGE_EXECUTE_READ, &oldProt); if (!NT_SUCCESS(st)) { - PVOID freeBase = execMem; - SIZE_T freeSize = trampSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + PVOID free_base = exec_mem; + SIZE_T free_size = trampoline_size; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); return false; } } - pNtFlushInstructionCache(hCurrentProcess, execMem, trampSize); + nt_flush_instruction_cache(current_process, exec_mem, trampoline_size); u8 hitCount = 0; - CONTEXT origCtx{}; - origCtx.ContextFlags = CONTEXT_DEBUG_REGISTERS; - const HANDLE hCurrentThread = reinterpret_cast(-2LL); + CONTEXT original_context{}; + original_context.ContextFlags = CONTEXT_DEBUG_REGISTERS; + const HANDLE current_thread = reinterpret_cast(-2LL); - if (!NT_SUCCESS(pNtGetContextThread(hCurrentThread, &origCtx))) { - PVOID freeBase = execMem; - SIZE_T freeSize = trampSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + if (!NT_SUCCESS(nt_get_context_thread(current_thread, &original_context))) { + PVOID free_base = exec_mem; + SIZE_T free_size = trampoline_size; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); return false; } // Set DR0 to trampoline + 14 (Instruction: mov rbx, r8) // Offset calculation: mov_r8_rbx(3) + pushfq(1) + or(7) + popfq(1) + cpuid(2) = 14 // This is where single step traps after CPUID, and where we want the collision - const uintptr_t expectedTrapAddr = reinterpret_cast(execMem) + 14; + const uintptr_t expected_trap_address = reinterpret_cast(exec_mem) + 14; // set Dr0 to trampoline+offset - CONTEXT dbgCtx = origCtx; - dbgCtx.Dr0 = expectedTrapAddr; // single step breakpoint address - dbgCtx.Dr7 = 1; // enable Local Breakpoint 0 - - if (!NT_SUCCESS(pNtSetContextThread(hCurrentThread, &dbgCtx))) { - pNtSetContextThread(hCurrentThread, &origCtx); - PVOID freeBase = execMem; - SIZE_T freeSize = trampSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + CONTEXT debug_context = original_context; + debug_context.Dr0 = expected_trap_address; // single step breakpoint address + debug_context.Dr7 = 1; // enable Local Breakpoint 0 + + if (!NT_SUCCESS(nt_set_context_thread(current_thread, &debug_context))) { + nt_set_context_thread(current_thread, &original_context); + PVOID free_base = exec_mem; + SIZE_T free_size = trampoline_size; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); return false; } // Context structure to pass data to the static SEH handler - struct TrapContext { + struct trap_context { uintptr_t expectedTrapAddr; u8* hitCount; - bool* hypervisorCaught; + bool* hypervisor_caught; }; // Static class for SEH filtering to avoid Release mode Lambda corruption struct SEH_Trap { - static LONG Vet(u32 code, EXCEPTION_POINTERS* info, TrapContext* ctx) noexcept { + static LONG Vet(u32 code, EXCEPTION_POINTERS* info, trap_context* ctx) noexcept { // Lambda returns LONG to support EXCEPTION_CONTINUE_EXECUTION if (code != static_cast(0x80000004L)) { return EXCEPTION_CONTINUE_SEARCH; @@ -9407,7 +9401,7 @@ struct VM { if (reinterpret_cast(info->ExceptionRecord->ExceptionAddress) != ctx->expectedTrapAddr) { info->ContextRecord->EFlags &= ~0x100; // Clear TF info->ContextRecord->Dr7 &= ~1; // Clear DR0 Enable - *ctx->hypervisorCaught = true; + *ctx->hypervisor_caught = true; return EXCEPTION_CONTINUE_EXECUTION; } @@ -9419,7 +9413,7 @@ struct VM { if ((status & required_bits) != required_bits) { if (util::hyper_x() != HYPERV_ARTIFACT_VM) // detects type 1 Hyper-V too, which we consider legitimate - *ctx->hypervisorCaught = true; + *ctx->hypervisor_caught = true; } // Clear Trap Flag to stop single stepping @@ -9434,10 +9428,10 @@ struct VM { } }; - TrapContext ctx = { expectedTrapAddr, &hitCount, &hypervisorCaught }; + trap_context ctx = { expected_trap_address, &hitCount, &hypervisor_caught }; __try { - reinterpret_cast(execMem)(); + reinterpret_cast(exec_mem)(); } __except (SEH_Trap::Vet(_exception_code(), reinterpret_cast(_exception_info()), &ctx)) { // This block is effectively unreachable because vetExceptions returns CONTINUE_EXECUTION or CONTINUE_SEARCH @@ -9445,16 +9439,16 @@ struct VM { // If the hypervisor swallowed the exception entirely, hitCount will be 0 if (hitCount != 1) { - hypervisorCaught = true; + hypervisor_caught = true; } - pNtSetContextThread(hCurrentThread, &origCtx); + nt_set_context_thread(current_thread, &original_context); - PVOID freeBase = execMem; - SIZE_T freeSize = trampSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + PVOID free_base = exec_mem; + SIZE_T free_size = trampoline_size; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); #endif - return hypervisorCaught; + return hypervisor_caught; } @@ -9488,33 +9482,33 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pNtAllocateVirtualMemory = reinterpret_cast(funcs[0]); - const auto pNtProtectVirtualMemory = reinterpret_cast(funcs[1]); - const auto pNtFlushInstructionCache = reinterpret_cast(funcs[2]); - const auto pNtFreeVirtualMemory = reinterpret_cast(funcs[3]); + const auto nt_allocate_virtual_memory = reinterpret_cast(funcs[0]); + const auto nt_protect_virtual_memory = reinterpret_cast(funcs[1]); + const auto nt_flush_instruction_cache = reinterpret_cast(funcs[2]); + const auto nt_free_virtual_memory = reinterpret_cast(funcs[3]); - if (!pNtAllocateVirtualMemory || !pNtProtectVirtualMemory || !pNtFlushInstructionCache || !pNtFreeVirtualMemory) { + if (!nt_allocate_virtual_memory || !nt_protect_virtual_memory || !nt_flush_instruction_cache || !nt_free_virtual_memory) { return false; } - const HANDLE hCurrentProcess = reinterpret_cast(-1LL); + const HANDLE current_process = reinterpret_cast(-1LL); PVOID base = nullptr; - SIZE_T regionSize = sizeof(ud_opcodes); - NTSTATUS st = pNtAllocateVirtualMemory(hCurrentProcess, &base, 0, ®ionSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + SIZE_T region_size = sizeof(ud_opcodes); + NTSTATUS st = nt_allocate_virtual_memory(current_process, &base, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); if (!NT_SUCCESS(st) || !base) { return false; } memcpy(base, ud_opcodes, sizeof(ud_opcodes)); - ULONG oldProtect = 0; - st = pNtProtectVirtualMemory(hCurrentProcess, &base, ®ionSize, PAGE_EXECUTE_READ, &oldProtect); + ULONG old_protection = 0; + st = nt_protect_virtual_memory(current_process, &base, ®ion_size, PAGE_EXECUTE_READ, &old_protection); if (!NT_SUCCESS(st)) { - pNtFreeVirtualMemory(hCurrentProcess, &base, ®ionSize, MEM_RELEASE); + nt_free_virtual_memory(current_process, &base, ®ion_size, MEM_RELEASE); return false; } - pNtFlushInstructionCache(hCurrentProcess, base, regionSize); + nt_flush_instruction_cache(current_process, base, region_size); __try { reinterpret_cast(base)(); @@ -9523,7 +9517,7 @@ struct VM { saw_ud = true; } - pNtFreeVirtualMemory(hCurrentProcess, &base, ®ionSize, MEM_RELEASE); + nt_free_virtual_memory(current_process, &base, ®ion_size, MEM_RELEASE); return !saw_ud; } @@ -9589,16 +9583,16 @@ struct VM { constexpr u64 PW3 = 0x0000000090909090ULL; constexpr u32 PW2 = 0xFEDCBA98U; - struct VMCallInfo { + struct vmcall_info { u32 structsize; u32 level2pass; u32 command; }; - VMCallInfo vmcallInfo = {}; - u64 vmcallResult = 0; + vmcall_info vmcall_info = {}; + u64 vmcall_result = 0; - constexpr u8 intelTemplate[44] = { + constexpr u8 intel_template[44] = { 0x48,0xBA,0,0,0,0,0,0,0,0, // mov rdx, imm64 ; PW1 0x48,0xB9,0,0,0,0,0,0,0,0, // mov rcx, imm64 ; PW3 0x48,0xB8,0,0,0,0,0,0,0,0, // mov rax, imm64 ; &vmcallInfo @@ -9607,7 +9601,7 @@ struct VM { 0xC3 // ret }; - constexpr u8 amdTemplate[44] = { + constexpr u8 amd_template[44] = { 0x48,0xBA,0,0,0,0,0,0,0,0, // mov rdx, imm64 ; PW1 0x48,0xB9,0,0,0,0,0,0,0,0, // mov rcx, imm64 ; PW3 0x48,0xB8,0,0,0,0,0,0,0,0, // mov rax, imm64 ; &vmcallInfo @@ -9616,10 +9610,10 @@ struct VM { 0xC3 // ret }; - const SIZE_T stubSize = sizeof(intelTemplate); - const bool isAmd = cpu::is_amd(); + const SIZE_T stub_size = sizeof(intel_template); + const bool is_amd = cpu::is_amd(); - const HANDLE hCurrentProcess = reinterpret_cast(-1LL); + const HANDLE current_process = reinterpret_cast(-1LL); const HMODULE ntdll = util::get_ntdll(); if (!ntdll) return false; @@ -9627,25 +9621,25 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pNtAllocateVirtualMemory = reinterpret_cast(funcs[0]); - const auto pNtProtectVirtualMemory = reinterpret_cast(funcs[1]); - const auto pNtFlushInstructionCache = reinterpret_cast(funcs[2]); - const auto pNtFreeVirtualMemory = reinterpret_cast(funcs[3]); + const auto nt_allocate_virtual_memory = reinterpret_cast(funcs[0]); + const auto nt_protect_virtual_memory = reinterpret_cast(funcs[1]); + const auto nt_flush_instruction_cache = reinterpret_cast(funcs[2]); + const auto nt_free_virtual_memory = reinterpret_cast(funcs[3]); - if (!pNtAllocateVirtualMemory || !pNtProtectVirtualMemory || !pNtFlushInstructionCache || !pNtFreeVirtualMemory) { + if (!nt_allocate_virtual_memory || !nt_protect_virtual_memory || !nt_flush_instruction_cache || !nt_free_virtual_memory) { return false; } PVOID stub = nullptr; - SIZE_T regionSize = stubSize; - NTSTATUS st = pNtAllocateVirtualMemory(hCurrentProcess, &stub, 0, ®ionSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + SIZE_T region_size = stub_size; + NTSTATUS st = nt_allocate_virtual_memory(current_process, &stub, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); if (!NT_SUCCESS(st) || !stub) return false; - if (isAmd) { - memcpy(stub, amdTemplate, stubSize); + if (is_amd) { + memcpy(stub, amd_template, stub_size); } else { - memcpy(stub, intelTemplate, stubSize); + memcpy(stub, intel_template, stub_size); } // rdx imm64 @@ -9654,38 +9648,38 @@ struct VM { // mov [imm64], rax immediate *reinterpret_cast(reinterpret_cast(stub) + 2) = PW1; *reinterpret_cast(reinterpret_cast(stub) + 12) = PW3; - *reinterpret_cast(reinterpret_cast(stub) + 22) = reinterpret_cast(static_cast(&vmcallInfo)); - *reinterpret_cast(reinterpret_cast(stub) + 35) = reinterpret_cast(static_cast(&vmcallResult)); + *reinterpret_cast(reinterpret_cast(stub) + 22) = reinterpret_cast(static_cast(&vmcall_info)); + *reinterpret_cast(reinterpret_cast(stub) + 35) = reinterpret_cast(static_cast(&vmcall_result)); - ULONG oldProtect = 0; - st = pNtProtectVirtualMemory(hCurrentProcess, &stub, ®ionSize, PAGE_EXECUTE_READ, &oldProtect); + ULONG old_protection = 0; + st = nt_protect_virtual_memory(current_process, &stub, ®ion_size, PAGE_EXECUTE_READ, &old_protection); if (!NT_SUCCESS(st)) { - pNtFreeVirtualMemory(hCurrentProcess, &stub, ®ionSize, MEM_RELEASE); + nt_free_virtual_memory(current_process, &stub, ®ion_size, MEM_RELEASE); return false; } - pNtFlushInstructionCache(hCurrentProcess, stub, regionSize); + nt_flush_instruction_cache(current_process, stub, region_size); auto tryPass = [&]() noexcept -> bool { // store forwarding in modern CPUs - vmcallInfo.structsize = static_cast(sizeof(VMCallInfo)); - vmcallInfo.level2pass = PW2; - vmcallInfo.command = 0; - vmcallResult = 0; + vmcall_info.structsize = static_cast(sizeof(vmcall_info)); + vmcall_info.level2pass = PW2; + vmcall_info.command = 0; + vmcall_result = 0; __try { reinterpret_cast(stub)(); } __except (EXCEPTION_EXECUTE_HANDLER) { // EXCEPTION_ILLEGAL_INSTRUCTION normally, EXCEPTION_ACCESS_VIOLATION_READ on edge-cases - vmcallResult = 0; + vmcall_result = 0; } - return (((vmcallResult >> 24) & 0xFF) == 0xCE); // the VM returns status in bits 24–31; Cheat Engine uses 0xCE here + return (((vmcall_result >> 24) & 0xFF) == 0xCE); // the VM returns status in bits 24–31; Cheat Engine uses 0xCE here }; const bool found = tryPass(); - pNtFreeVirtualMemory(hCurrentProcess, &stub, ®ionSize, MEM_RELEASE); + nt_free_virtual_memory(current_process, &stub, ®ion_size, MEM_RELEASE); if (found) return core::add(brands::DBVM); @@ -9714,32 +9708,30 @@ struct VM { void* functions[1] = { nullptr }; util::get_function_address(ntdll, function_names, functions, 1); - using NtQuerySysInfo_t = NTSTATUS(__stdcall*)( - SYSTEM_INFORMATION_CLASS, PVOID, ULONG, PULONG - ); - NtQuerySysInfo_t pNtQuery = reinterpret_cast(functions[0]); - if (!pNtQuery) + using NtQuerySysInfo_t = NTSTATUS(__stdcall*)(SYSTEM_INFORMATION_CLASS, PVOID, ULONG, PULONG); + NtQuerySysInfo_t nt_query = reinterpret_cast(functions[0]); + if (!nt_query) return false; // determine required buffer size - const SYSTEM_INFORMATION_CLASS SysBootInfo = static_cast(140); + const SYSTEM_INFORMATION_CLASS sys_boot_info = static_cast(140); ULONG needed = 0; - NTSTATUS st = pNtQuery(SysBootInfo, nullptr, 0, &needed); + NTSTATUS st = nt_query(sys_boot_info, nullptr, 0, &needed); if (st != static_cast(0xC0000023) && st != static_cast(0x80000005) && st != static_cast(0xC0000004)) return false; std::vector buffer(needed); // fetch the boot-logo data - st = pNtQuery(SysBootInfo, buffer.data(), needed, &needed); + st = nt_query(sys_boot_info, buffer.data(), needed, &needed); if (!NT_SUCCESS(st)) return false; // parse header to locate the bitmap - struct BootLogoInfo { ULONG Flags, BitmapOffset; }; - const auto* info = reinterpret_cast(buffer.data()); - const u8* bmp = buffer.data() + info->BitmapOffset; - const size_t size = static_cast(needed) - info->BitmapOffset; + struct boot_logo_info { ULONG flags, bitmap_offset; }; + const auto* info = reinterpret_cast(buffer.data()); + const u8* bmp = buffer.data() + info->bitmap_offset; + const size_t size = static_cast(needed) - info->bitmap_offset; // struct + function to isolate SEH from the stack frame containing std::vector and use __target__ struct crc { @@ -9782,9 +9774,9 @@ struct VM { } }; - u32 hash = crc::compute(bmp, size); + const u32 hash = crc::compute(bmp, size); - debug("BOOT_LOGO: size=", needed, ", flags=", info->Flags, ", offset=", info->BitmapOffset, ", crc=0x", std::hex, hash); + debug("BOOT_LOGO: size=", needed, ", flags=", info->flags, ", offset=", info->bitmap_offset, ", crc=0x", std::hex, hash); switch (hash) { case 0x110350C5: return core::add(brands::QEMU); // TianoCore EDK2 @@ -9813,9 +9805,9 @@ struct VM { constexpr auto DIRECTORY_QUERY = 0x0001; constexpr NTSTATUS STATUS_NO_MORE_ENTRIES = 0x8000001A; - HANDLE hDir = nullptr; - OBJECT_ATTRIBUTES objAttr{}; - UNICODE_STRING dirName{}; + HANDLE dir = nullptr; + OBJECT_ATTRIBUTES object_attributes{}; + UNICODE_STRING dir_name{}; NTSTATUS status; const HMODULE ntdll = util::get_ntdll(); @@ -9825,23 +9817,23 @@ struct VM { void* funcs[ARRAYSIZE(names)] = {}; util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - const auto pNtOpenDirectoryObject = reinterpret_cast(funcs[0]); - const auto pNtQueryDirectoryObject = reinterpret_cast(funcs[1]); - const auto pNtClose = reinterpret_cast(funcs[2]); + const auto nt_open_directory_object = reinterpret_cast(funcs[0]); + const auto nt_query_directory_object = reinterpret_cast(funcs[1]); + const auto nt_close = reinterpret_cast(funcs[2]); - if (!pNtOpenDirectoryObject || !pNtQueryDirectoryObject || !pNtClose) return false; + if (!nt_open_directory_object || !nt_query_directory_object || !nt_close) return false; // Prepare to open the root "\Device" directory in the Object Manager namespace // This is different from the file system and we are looking for kernel objects created by drivers - const wchar_t* deviceDirPath = L"\\Device"; - dirName.Buffer = (PWSTR)deviceDirPath; - dirName.Length = (USHORT)(wcslen(deviceDirPath) * sizeof(wchar_t)); - dirName.MaximumLength = dirName.Length + sizeof(wchar_t); + const wchar_t* device_dir_path = L"\\Device"; + dir_name.Buffer = (PWSTR)device_dir_path; + dir_name.Length = (USHORT)(wcslen(device_dir_path) * sizeof(wchar_t)); + dir_name.MaximumLength = dir_name.Length + sizeof(wchar_t); - InitializeObjectAttributes(&objAttr, &dirName, OBJ_CASE_INSENSITIVE, nullptr, nullptr); + InitializeObjectAttributes(&object_attributes, &dir_name, OBJ_CASE_INSENSITIVE, nullptr, nullptr); // Open the directory object so we can enumerate its contents - status = pNtOpenDirectoryObject(&hDir, DIRECTORY_QUERY, &objAttr); + status = nt_open_directory_object(&dir, DIRECTORY_QUERY, &object_attributes); if (!NT_SUCCESS(status)) { return false; @@ -9852,19 +9844,19 @@ struct VM { std::vector buffer(4096); constexpr size_t MAX_DIR_BUFFER = 64 * 1024; ULONG context = 0; - ULONG returnedLength = 0; + ULONG returned_length = 0; while (true) { // Query the next single object in the directory // 'ReturnSingleEntry' is TRUE to simplify buffer parsing logic - status = pNtQueryDirectoryObject( - hDir, + status = nt_query_directory_object( + dir, buffer.data(), static_cast(buffer.size()), TRUE, FALSE, &context, - &returnedLength + &returned_length ); // Stop if we have iterated through all objects @@ -9875,110 +9867,114 @@ struct VM { // Handle buffer sizing. If the buffer is too small, the kernel tells us how much it needs // We resize and retry, but impose a sanity cap to prevent memory issues if (!NT_SUCCESS(status)) { - if (returnedLength > buffer.size()) { - size_t newSize = static_cast(returnedLength); - if (newSize > MAX_DIR_BUFFER) newSize = MAX_DIR_BUFFER; - if (newSize <= buffer.size()) { - pNtClose(hDir); + if (returned_length > buffer.size()) { + size_t new_size = static_cast(returned_length); + if (new_size > MAX_DIR_BUFFER) new_size = MAX_DIR_BUFFER; + if (new_size <= buffer.size()) { + nt_close(dir); return false; } try { - buffer.resize(newSize); + buffer.resize(new_size); } catch (...) { - pNtClose(hDir); + nt_close(dir); return false; } continue; } - pNtClose(hDir); + nt_close(dir); return false; } // Validate the returned data length to ensure we don't read out of bounds - const size_t usedLen = (returnedLength == 0) ? buffer.size() : static_cast(returnedLength); - if (usedLen < sizeof(OBJECT_DIRECTORY_INFORMATION) || usedLen > buffer.size()) { - pNtClose(hDir); + const size_t used_len = (returned_length == 0) ? buffer.size() : static_cast(returned_length); + if (used_len < sizeof(OBJECT_DIRECTORY_INFORMATION) || used_len > buffer.size()) { + nt_close(dir); return false; } - const POBJECT_DIRECTORY_INFORMATION pOdi = reinterpret_cast(buffer.data()); + const POBJECT_DIRECTORY_INFORMATION object_directory_information = reinterpret_cast(buffer.data()); // memory boundaries just for safe pointer arithmetic - const uintptr_t bufBase = reinterpret_cast(buffer.data()); - const uintptr_t bufEnd = bufBase + usedLen; + const uintptr_t buf_base = reinterpret_cast(buffer.data()); + const uintptr_t buf_end = buf_base + used_len; - std::wstring objectName; - bool gotName = false; + std::wstring object_name; + bool found_name = false; // Extract the name using the explicit Name pointer in the structure // We strictly validate that the pointer falls within our allocated buffer to prevent crashes - const size_t nameBytes = static_cast(pOdi->Name.Length); - const uintptr_t namePtr = reinterpret_cast(pOdi->Name.Buffer); + const size_t nameBytes = static_cast(object_directory_information->Name.Length); + const uintptr_t name_ptr = reinterpret_cast(object_directory_information->Name.Buffer); if (nameBytes > 0 && (nameBytes % sizeof(wchar_t) == 0)) { - const uintptr_t minValidPtr = bufBase + sizeof(OBJECT_DIRECTORY_INFORMATION); - if (namePtr >= minValidPtr && (namePtr + nameBytes) <= bufEnd && (namePtr % sizeof(wchar_t) == 0)) { - const wchar_t* wname = reinterpret_cast(namePtr); + const uintptr_t min_valid_ptr = buf_base + sizeof(OBJECT_DIRECTORY_INFORMATION); + if (name_ptr >= min_valid_ptr && (name_ptr + nameBytes) <= buf_end && (name_ptr % sizeof(wchar_t) == 0)) { + const wchar_t* wname = reinterpret_cast(name_ptr); const size_t wlen = nameBytes / sizeof(wchar_t); - bool foundTerm = false; + bool found_term = false; // scan for null terminator just in case for (size_t i = 0; i < wlen; ++i) { - if (wname[i] == L'\0') { objectName.assign(wname, i); foundTerm = true; break; } + if (wname[i] == L'\0') { + object_name.assign(wname, i); + found_term = true; + break; + } } - if (!foundTerm) { - objectName.assign(wname, wlen); + if (!found_term) { + object_name.assign(wname, wlen); } - gotName = true; + found_name = true; } } // If the explicit pointer was invalid, assume the string data immediately follows the structure - if (!gotName) { - const uintptr_t altStart = bufBase + sizeof(OBJECT_DIRECTORY_INFORMATION); - if (altStart >= bufEnd) { - pNtClose(hDir); + if (!found_name) { + const uintptr_t altStart = buf_base + sizeof(OBJECT_DIRECTORY_INFORMATION); + if (altStart >= buf_end) { + nt_close(dir); return false; } - const size_t maxBytes = bufEnd - altStart; + const size_t maxBytes = buf_end - altStart; if (maxBytes < sizeof(wchar_t)) { - pNtClose(hDir); + nt_close(dir); return false; } - const wchar_t* altPtr = reinterpret_cast(buffer.data() + (altStart - bufBase)); - const size_t maxChars = maxBytes / sizeof(wchar_t); + const wchar_t* alt_ptr = reinterpret_cast(buffer.data() + (altStart - buf_base)); + const size_t max_chars = maxBytes / sizeof(wchar_t); size_t realChars = 0; - for (; realChars < maxChars; ++realChars) { - if (altPtr[realChars] == L'\0') break; + for (; realChars < max_chars; ++realChars) { + if (alt_ptr[realChars] == L'\0') break; } - if (realChars == maxChars) { - pNtClose(hDir); + if (realChars == max_chars) { + nt_close(dir); return false; } - objectName.assign(altPtr, realChars); - gotName = true; + object_name.assign(alt_ptr, realChars); + found_name = true; } - if (!gotName) { - pNtClose(hDir); + if (!found_name) { + nt_close(dir); return false; } // "VmGenerationCounter" and "VmGid" are created by the Hyper-V VM Bus provider - if (objectName == L"VmGenerationCounter") { - pNtClose(hDir); + if (object_name == L"VmGenerationCounter") { + nt_close(dir); debug("KERNEL_OBJECTS: Detected VmGenerationCounter"); return core::add(brands::HYPERV); } - if (objectName == L"VmGid") { - pNtClose(hDir); + if (object_name == L"VmGid") { + nt_close(dir); debug("KERNEL_OBJECTS: Detected VmGid"); return core::add(brands::HYPERV); } } - pNtClose(hDir); + nt_close(dir); return false; } @@ -10676,22 +10672,22 @@ struct VM { // Initiate a query for all "Monitor" class devices present in the system. // We target monitors because VMs often emulate generic displays (e.g., "Generic Non-PnP Monitor") // or specific virtual hardware signatures in their EDID data. - const HDEVINFO devInfo = SetupDiGetClassDevs(&GUID_DEVCLASS_MONITOR, nullptr, nullptr, DIGCF_PRESENT); - if (devInfo == INVALID_HANDLE_VALUE) return false; + const HDEVINFO dev_info = SetupDiGetClassDevs(&GUID_DEVCLASS_MONITOR, nullptr, nullptr, DIGCF_PRESENT); + if (dev_info == INVALID_HANDLE_VALUE) return false; - SP_DEVINFO_DATA devData{}; - devData.cbSize = sizeof(devData); + SP_DEVINFO_DATA dev_data{}; + dev_data.cbSize = sizeof(dev_data); const int threshold = 3; // Iterate through every enumerated monitor to inspect its hardware details - for (DWORD index = 0; SetupDiEnumDeviceInfo(devInfo, index, &devData); ++index) { + for (DWORD index = 0; SetupDiEnumDeviceInfo(dev_info, index, &dev_data); ++index) { // Open the "Hardware" registry key for the specific device instance // This is where the driver stores low-level configuration, including the EDID - const HKEY hDevKey = SetupDiOpenDevRegKey(devInfo, &devData, DICS_FLAG_GLOBAL, 0, DIREG_DEV, KEY_READ); - if (hDevKey == INVALID_HANDLE_VALUE) { - devData = {}; - devData.cbSize = sizeof(devData); + const HKEY handle_dev_key = SetupDiOpenDevRegKey(dev_info, &dev_data, DICS_FLAG_GLOBAL, 0, DIREG_DEV, KEY_READ); + if (handle_dev_key == INVALID_HANDLE_VALUE) { + dev_data = {}; + dev_data.cbSize = sizeof(dev_data); continue; } @@ -10699,34 +10695,34 @@ struct VM { // EDID is a standard data structure containing the display's manufacturer ID, // serial number, and capabilities BYTE edid_stack[256]; - DWORD bufSize = static_cast(sizeof(edid_stack)); - const LONG rc = RegQueryValueExA(hDevKey, "EDID", nullptr, nullptr, edid_stack, &bufSize); - RegCloseKey(hDevKey); + DWORD buffer_size = static_cast(sizeof(edid_stack)); + const LONG rc = RegQueryValueExA(handle_dev_key, "EDID", nullptr, nullptr, edid_stack, &buffer_size); + RegCloseKey(handle_dev_key); BYTE* edid = nullptr; bool used_heap = false; BYTE* heap_buf = nullptr; // standard EDID is 128 bytes so it should fit in stack - if (rc == ERROR_SUCCESS && bufSize >= 128) { + if (rc == ERROR_SUCCESS && buffer_size >= 128) { edid = edid_stack; } // If for some reason the EDID contains extension blocks (making it larger than our stack buffer) // allocate a heap buffer dynamically to capture the full data else if (rc == ERROR_MORE_DATA) { - if (bufSize > 0 && bufSize < 65536) { - heap_buf = static_cast(LocalAlloc(LMEM_FIXED, bufSize)); + if (buffer_size > 0 && buffer_size < 65536) { + heap_buf = static_cast(LocalAlloc(LMEM_FIXED, buffer_size)); if (heap_buf) { - DWORD bufSize2 = bufSize; + DWORD extra_buffer_size = buffer_size; // Re-open the key to read the full data into the new buffer - const HKEY hDevKey2 = SetupDiOpenDevRegKey(devInfo, &devData, DICS_FLAG_GLOBAL, 0, DIREG_DEV, KEY_READ); - if (hDevKey2 != INVALID_HANDLE_VALUE) { - if (RegQueryValueExA(hDevKey2, "EDID", nullptr, nullptr, heap_buf, &bufSize2) == ERROR_SUCCESS && bufSize2 >= 128) { + const HKEY extra_dev_key = SetupDiOpenDevRegKey(dev_info, &dev_data, DICS_FLAG_GLOBAL, 0, DIREG_DEV, KEY_READ); + if (extra_dev_key != INVALID_HANDLE_VALUE) { + if (RegQueryValueExA(extra_dev_key, "EDID", nullptr, nullptr, heap_buf, &extra_buffer_size) == ERROR_SUCCESS && extra_buffer_size >= 128) { edid = heap_buf; used_heap = true; - bufSize = bufSize2; + buffer_size = extra_buffer_size; } - RegCloseKey(hDevKey2); + RegCloseKey(extra_dev_key); } if (!edid) { LocalFree(heap_buf); @@ -10737,8 +10733,8 @@ struct VM { } if (!edid) { - devData = {}; - devData.cbSize = sizeof(devData); + dev_data = {}; + dev_data.cbSize = sizeof(dev_data); continue; } @@ -10746,12 +10742,12 @@ struct VM { if (!(edid[0] == 0x00 && edid[1] == 0xFF && edid[2] == 0xFF && edid[3] == 0xFF && edid[4] == 0xFF && edid[5] == 0xFF && edid[6] == 0xFF && edid[7] == 0x00)) { if (used_heap) LocalFree(heap_buf); - devData = {}; - devData.cbSize = sizeof(devData); + dev_data = {}; + dev_data.cbSize = sizeof(dev_data); continue; } - const bool checksum_ok = edid_checksum_valid(edid, bufSize); + const bool checksum_ok = edid_checksum_valid(edid, buffer_size); char manu[4]; decode_manufacturer(edid, manu); @@ -10761,11 +10757,11 @@ struct VM { const u32 serial = static_cast(edid[12] | (edid[13] << 8) | (edid[14] << 16) | (edid[15] << 24)); char monname[32]; - const bool hasName = extract_monitor_name(edid, bufSize, monname); + const bool has_name = extract_monitor_name(edid, buffer_size, monname); - char propBuf[512]; - const bool haveFriendly = get_device_property(devInfo, devData, SPDRP_FRIENDLYNAME, propBuf, sizeof(propBuf)); // friendly_name is often empty, like in Digital-Flachbildschirm monitors - const bool haveDevDesc = get_device_property(devInfo, devData, SPDRP_DEVICEDESC, propBuf, sizeof(propBuf)); + char prop_buf[512]; + const bool have_friendly = get_device_property(dev_info, dev_data, SPDRP_FRIENDLYNAME, prop_buf, sizeof(prop_buf)); // friendly_name is often empty, like in Digital-Flachbildschirm monitors + const bool have_dev_desc = get_device_property(dev_info, dev_data, SPDRP_DEVICEDESC, prop_buf, sizeof(prop_buf)); int score = 0; @@ -10779,22 +10775,22 @@ struct VM { if (score > 0) score += 1; } - if (!hasName && score > 0) score += 1; + if (!has_name && score > 0) score += 1; - if (!haveFriendly && !haveDevDesc) score += 1; + if (!have_friendly && !have_dev_desc) score += 1; if (used_heap) LocalFree(heap_buf); if (score >= threshold) { - SetupDiDestroyDeviceInfoList(devInfo); + SetupDiDestroyDeviceInfoList(dev_info); return true; } - devData = {}; - devData.cbSize = sizeof(devData); + dev_data = {}; + dev_data.cbSize = sizeof(dev_data); } - SetupDiDestroyDeviceInfoList(devInfo); + SetupDiDestroyDeviceInfoList(dev_info); return false; } @@ -10938,8 +10934,8 @@ struct VM { LPVOID amd_target_mem = nullptr; LPVOID exec_mem = nullptr; - PVOID freeBase = nullptr; - SIZE_T freeSize = 0; + PVOID free_base = nullptr; + SIZE_T free_size = 0; const bool claimed_amd = cpu::is_amd(); const bool claimed_intel = cpu::is_intel(); @@ -10966,7 +10962,7 @@ struct VM { if (claimed_intel || !claimed_amd) exception = true; // should generate an exception rather than be treated as a NOP, but we will check its side effects anyways // one cache line = 64 bytes - const SIZE_T targetSize = 64; + const SIZE_T target_size = 64; const HMODULE ntdll = util::get_ntdll(); if (!ntdll) return false; @@ -10979,28 +10975,28 @@ struct VM { using NtProtectVirtualMemory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG, PULONG); using NtFreeVirtualMemory_t = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG); using NtFlushInstructionCache_t = NTSTATUS(__stdcall*)(HANDLE, PVOID, SIZE_T); - const auto pNtAllocateVirtualMemory = reinterpret_cast(funcs[0]); - const auto pNtProtectVirtualMemory = reinterpret_cast(funcs[1]); - const auto pNtFlushInstructionCache = reinterpret_cast(funcs[2]); - const auto pNtFreeVirtualMemory = reinterpret_cast(funcs[3]); + const auto nt_allocate_virtual_memory = reinterpret_cast(funcs[0]); + const auto nt_protect_virtual_memory = reinterpret_cast(funcs[1]); + const auto nt_flush_instruction_cache = reinterpret_cast(funcs[2]); + const auto nt_free_virtual_memory = reinterpret_cast(funcs[3]); - if (!pNtAllocateVirtualMemory || !pNtProtectVirtualMemory || !pNtFlushInstructionCache || !pNtFreeVirtualMemory) { + if (!nt_allocate_virtual_memory || !nt_protect_virtual_memory || !nt_flush_instruction_cache || !nt_free_virtual_memory) { return false; } - const HANDLE hCurrentProcess = reinterpret_cast(-1LL); + const HANDLE current_process = reinterpret_cast(-1LL); { PVOID base = nullptr; - SIZE_T sz = targetSize; - NTSTATUS st2 = pNtAllocateVirtualMemory(hCurrentProcess, &base, 0, &sz, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + SIZE_T sz = target_size; + NTSTATUS st2 = nt_allocate_virtual_memory(current_process, &base, 0, &sz, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); if (!NT_SUCCESS(st2) || base == nullptr) { proceed = false; } else { amd_target_mem = base; // fill target with a recognizable non-zero pattern so we can detect CLZERO's effect (in case some obscure Intel CPU treat our instruction as a NOP) - memset(amd_target_mem, 0xA5, targetSize); + memset(amd_target_mem, 0xA5, target_size); const std::uintptr_t paddr = reinterpret_cast(amd_target_mem); // to avoid sign-extension, 32-bit compatible const u64 addr = static_cast(paddr); @@ -11015,22 +11011,22 @@ struct VM { if (proceed) { PVOID base = nullptr; SIZE_T sz = codeSize; - NTSTATUS st2 = pNtAllocateVirtualMemory(hCurrentProcess, &base, 0, &sz, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + NTSTATUS st2 = nt_allocate_virtual_memory(current_process, &base, 0, &sz, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); if (NT_SUCCESS(st2) && base != nullptr) { exec_mem = base; memcpy(exec_mem, bytes, codeSize); // change to RX ULONG oldProt = 0; - PVOID tmpBase = exec_mem; - SIZE_T tmpSz = codeSize; - st2 = pNtProtectVirtualMemory(hCurrentProcess, &tmpBase, &tmpSz, PAGE_EXECUTE_READ, &oldProt); + PVOID tmp_base = exec_mem; + SIZE_T tmp_sz = codeSize; + st2 = nt_protect_virtual_memory(current_process, &tmp_base, &tmp_sz, PAGE_EXECUTE_READ, &oldProt); if (NT_SUCCESS(st2)) { - pNtFlushInstructionCache(hCurrentProcess, exec_mem, codeSize); + nt_flush_instruction_cache(current_process, exec_mem, codeSize); - using CodeFunc = void(*)(); - using RunnerFn = u8(*)(CodeFunc); - RunnerFn runner = +[](CodeFunc func) -> u8 { + using code_func = void(*)(); + using runner_func = u8(*)(code_func); + runner_func runner = +[](code_func func) -> u8 { __try { func(); return 0; @@ -11040,14 +11036,14 @@ struct VM { } }; - const u8 runner_rc = runner(reinterpret_cast(exec_mem)); + const u8 runner_rc = runner(reinterpret_cast(exec_mem)); // check if the target buffer was written to zero by CLZERO bool memory_all_zero = false; if (amd_target_mem) { volatile u8* p = reinterpret_cast(amd_target_mem); memory_all_zero = true; - for (SIZE_T i = 0; i < targetSize; ++i) { + for (SIZE_T i = 0; i < target_size; ++i) { if (p[i] != 0) { memory_all_zero = false; break; } } } @@ -11077,13 +11073,13 @@ struct VM { } if (exec_mem) { - freeBase = exec_mem; freeSize = codeSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + free_base = exec_mem; free_size = codeSize; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); exec_mem = nullptr; } if (amd_target_mem) { - freeBase = amd_target_mem; freeSize = targetSize; - pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE); + free_base = amd_target_mem; free_size = target_size; + nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE); amd_target_mem = nullptr; } @@ -11172,52 +11168,52 @@ struct VM { std::vector heap_buf; // fallback for rare huge strings auto scan_devices = [&](const GUID* classGuid, DWORD flags) noexcept { - HDEVINFO hDevInfo = SetupDiGetClassDevsW(classGuid, nullptr, nullptr, flags); - if (hDevInfo == INVALID_HANDLE_VALUE) return; + HDEVINFO handle_dev_info = SetupDiGetClassDevsW(classGuid, nullptr, nullptr, flags); + if (handle_dev_info == INVALID_HANDLE_VALUE) return; - SP_DEVINFO_DATA devInfoData{}; - devInfoData.cbSize = sizeof(SP_DEVINFO_DATA); + SP_DEVINFO_DATA dev_info_data{}; + dev_info_data.cbSize = sizeof(SP_DEVINFO_DATA); - for (DWORD i = 0; SetupDiEnumDeviceInfo(hDevInfo, i, &devInfoData); ++i) { + for (DWORD i = 0; SetupDiEnumDeviceInfo(handle_dev_info, i, &dev_info_data); ++i) { - const wchar_t* wDesc = nullptr; - DWORD reqSize = 0; - DWORD propType = 0; + const wchar_t* w_desc = nullptr; + DWORD req_size = 0; + DWORD prop_type = 0; - if (SetupDiGetDeviceRegistryPropertyW(hDevInfo, &devInfoData, SPDRP_DEVICEDESC, &propType, reinterpret_cast(stack_buf), sizeof(stack_buf), &reqSize)) { - wDesc = stack_buf; + if (SetupDiGetDeviceRegistryPropertyW(handle_dev_info, &dev_info_data, SPDRP_DEVICEDESC, &prop_type, reinterpret_cast(stack_buf), sizeof(stack_buf), &req_size)) { + w_desc = stack_buf; } else if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) { - if (heap_buf.size() < reqSize) heap_buf.resize(reqSize); - if (SetupDiGetDeviceRegistryPropertyW(hDevInfo, &devInfoData, SPDRP_DEVICEDESC, &propType, heap_buf.data(), reqSize, nullptr)) { - wDesc = reinterpret_cast(heap_buf.data()); + if (heap_buf.size() < req_size) heap_buf.resize(req_size); + if (SetupDiGetDeviceRegistryPropertyW(handle_dev_info, &dev_info_data, SPDRP_DEVICEDESC, &prop_type, heap_buf.data(), req_size, nullptr)) { + w_desc = reinterpret_cast(heap_buf.data()); } } // check if the description contains any interesting stuff - if (wDesc && contains_token(wDesc)) { + if (w_desc && contains_token(w_desc)) { // if interesting get hwid to get vendor - const wchar_t* wHwId = nullptr; + const wchar_t* w_hardware_id = nullptr; - if (SetupDiGetDeviceRegistryPropertyW(hDevInfo, &devInfoData, SPDRP_HARDWAREID, &propType, reinterpret_cast(stack_buf), sizeof(stack_buf), &reqSize)) { - wHwId = stack_buf; + if (SetupDiGetDeviceRegistryPropertyW(handle_dev_info, &dev_info_data, SPDRP_HARDWAREID, &prop_type, reinterpret_cast(stack_buf), sizeof(stack_buf), &req_size)) { + w_hardware_id = stack_buf; } else if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) { - if (heap_buf.size() < reqSize) heap_buf.resize(reqSize); - if (SetupDiGetDeviceRegistryPropertyW(hDevInfo, &devInfoData, SPDRP_HARDWAREID, &propType, heap_buf.data(), reqSize, nullptr)) { - wHwId = reinterpret_cast(heap_buf.data()); + if (heap_buf.size() < req_size) heap_buf.resize(req_size); + if (SetupDiGetDeviceRegistryPropertyW(handle_dev_info, &dev_info_data, SPDRP_HARDWAREID, &prop_type, heap_buf.data(), req_size, nullptr)) { + w_hardware_id = reinterpret_cast(heap_buf.data()); } } - if (wHwId) { - const u32 vid = find_vendor_hex(wHwId); + if (w_hardware_id) { + const u32 vid = find_vendor_hex(w_hardware_id); if (vid == VID_INTEL) intel_hits++; else if (vid == VID_AMD_ATI || vid == VID_AMD_MICRO) amd_hits++; } } } - SetupDiDestroyDeviceInfoList(hDevInfo); + SetupDiDestroyDeviceInfoList(handle_dev_info); }; // GUID_DEVCLASS_SYSTEM covers Host Bridges, LPC, PCI bridges Chipset/CPU etc @@ -11352,38 +11348,38 @@ struct VM { const HDEVINFO devs = SetupDiGetClassDevsW(nullptr, nullptr, nullptr, DIGCF_PRESENT | DIGCF_ALLCLASSES); if (devs == INVALID_HANDLE_VALUE) return false; - SP_DEVINFO_DATA devInfo{}; - devInfo.cbSize = sizeof(SP_DEVINFO_DATA); + SP_DEVINFO_DATA dev_info{}; + dev_info.cbSize = sizeof(SP_DEVINFO_DATA); - DWORD bufBytes = 4096; - BYTE* buffer = static_cast(malloc(bufBytes)); + DWORD buf_bytes = 4096; + BYTE* buffer = static_cast(malloc(buf_bytes)); if (!buffer) { SetupDiDestroyDeviceInfoList(devs); return false; } bool found = false; - for (DWORD idx = 0; SetupDiEnumDeviceInfo(devs, idx, &devInfo); ++idx) { - DWORD propertyType = 0; - if (!SetupDiGetDeviceRegistryPropertyW(devs, &devInfo, SPDRP_HARDWAREID, - &propertyType, buffer, bufBytes, nullptr)) + for (DWORD idx = 0; SetupDiEnumDeviceInfo(devs, idx, &dev_info); ++idx) { + DWORD property_type = 0; + if (!SetupDiGetDeviceRegistryPropertyW(devs, &dev_info, SPDRP_HARDWAREID, + &property_type, buffer, buf_bytes, nullptr)) { const DWORD err = GetLastError(); if (err == ERROR_INSUFFICIENT_BUFFER) { DWORD required = 0; - SetupDiGetDeviceRegistryPropertyW(devs, &devInfo, SPDRP_HARDWAREID, - &propertyType, nullptr, 0, &required); - if (required > bufBytes) { - BYTE* newBuf = static_cast(realloc(buffer, required)); - if (!newBuf) { + SetupDiGetDeviceRegistryPropertyW(devs, &dev_info, SPDRP_HARDWAREID, + &property_type, nullptr, 0, &required); + if (required > buf_bytes) { + BYTE* new_buffer = static_cast(realloc(buffer, required)); + if (!new_buffer) { found = false; break; } - buffer = newBuf; - bufBytes = required; + buffer = new_buffer; + buf_bytes = required; } - if (!SetupDiGetDeviceRegistryPropertyW(devs, &devInfo, SPDRP_HARDWAREID, - &propertyType, buffer, bufBytes, nullptr)) { + if (!SetupDiGetDeviceRegistryPropertyW(devs, &dev_info, SPDRP_HARDWAREID, + &property_type, buffer, buf_bytes, nullptr)) { continue; } } @@ -11392,7 +11388,7 @@ struct VM { } } - if (propertyType != REG_MULTI_SZ) continue; + if (property_type != REG_MULTI_SZ) continue; wchar_t* cur = reinterpret_cast(buffer); while (*cur) { From 5d32ca5f6633dde59c30e5b6576b08cf678ca9b4 Mon Sep 17 00:00:00 2001 From: Requiem Date: Wed, 18 Feb 2026 20:38:33 +0100 Subject: [PATCH 3/3] feat: better timing checks --- src/vmaware.hpp | 613 ++++++++++++++++++------------------------------ 1 file changed, 223 insertions(+), 390 deletions(-) diff --git a/src/vmaware.hpp b/src/vmaware.hpp index ba92a95c..12e403c9 100644 --- a/src/vmaware.hpp +++ b/src/vmaware.hpp @@ -390,7 +390,6 @@ #pragma comment(lib, "setupapi.lib") #pragma comment(lib, "powrprof.lib") - #pragma comment(lib, "mincore.lib") #pragma comment(lib, "wevtapi.lib") #elif (LINUX) #if (x86) @@ -4607,78 +4606,6 @@ struct VM { cycle_threshold = 3250; // if we're running under Hyper-V, make VMAware detect nested virtualization } - #if (WINDOWS) - const HMODULE ntdll = util::get_ntdll(); - if (!ntdll) { - return true; - } - - const char* names[] = { "NtQueryInformationThread", "NtSetInformationThread" }; - void* funcs[ARRAYSIZE(names)] = {}; - util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names)); - - using NtQueryInformationThread_t = NTSTATUS(__stdcall*)(HANDLE, int, PVOID, ULONG, PULONG); - using NtSetInformationThread_t = NTSTATUS(__stdcall*)(HANDLE, int, PVOID, ULONG); - - const auto nt_query_information_thread = reinterpret_cast(funcs[0]); - const auto nt_set_information_thread = reinterpret_cast(funcs[1]); - if (!nt_query_information_thread || !nt_set_information_thread) { - return true; - } - - constexpr int thread_basic_information = 0; - constexpr int thread_affinity_mask = 4; - - struct CLIENT_ID { - ULONG_PTR UniqueProcess; - ULONG_PTR UniqueThread; - }; - struct THREAD_BASIC_INFORMATION { - NTSTATUS ExitStatus; - PVOID TebBaseAddress; - CLIENT_ID ClientId; - ULONG_PTR AffinityMask; - LONG Priority; - LONG BasePriority; - } tbi; - const HANDLE current_thread = reinterpret_cast(-2LL); - - // current affinity - memset(&tbi, 0, sizeof(tbi)); - NTSTATUS status = nt_query_information_thread( - current_thread, - thread_basic_information, - &tbi, - sizeof(tbi), - nullptr - ); - - if (status < 0) { - return false; - } - - const ULONG_PTR original_affinity = tbi.AffinityMask; - - // new affinity - const DWORD_PTR wanted_mask = static_cast(1); - status = nt_set_information_thread( - current_thread, - thread_affinity_mask, - reinterpret_cast(const_cast(&wanted_mask)), - static_cast(sizeof(wanted_mask)) - ); - - // setting a higher priority for the current thread actually makes the ration between rdtsc and other timers like QIT vary much more - // contrary to what someone might think about preempting reschedule - DWORD_PTR previous_mask = 0; - if (status >= 0) { - previous_mask = original_affinity; // emulate SetThreadAffinityMask return - } - else { - previous_mask = 0; - } - #endif - // check for RDTSCP support, we will use it later int regs[4] = { 0 }; cpu::cpuid(regs, 0x80000001); @@ -4688,137 +4615,52 @@ struct VM { return true; } - // ================ START OF TIMING ATTACKS ================ - #if (WINDOWS) - /* TSC offseting detection */ - // This detection uses two clocks and two loops, a loop and a timer that the hypervisor can spoof and a second loop/timer that the hypervisor cannot - // When the TSC is "hooked", the hypervisor usually downscales the result to hide the time passed or doesnt let TSC advance for the time it was vm-exiting - // However, the hypervisor have absolutely no way to downscale time for the second loop because it runs natively on the CPU without exiting - // This creates a massive discrepancy in the ratio of both loops, contrary to the very small ratio if both timers were to run normally - // The hypervisor cannot easily rewind the system wall clock (second loop, QIT/KUSER_SHARED_DATA) without causing system instability (network timeouts, audio lag, etc) - static thread_local volatile u64 g_sink = 0; // thread_local volatile so that it doesnt need to be captured by the lambda - - // the reason why we use CPUID rather than RDTSC is because RDTSC is a conditionally exiting instruction, and you can modify the guest TSC without trapping it - auto vm_exit = []() noexcept -> u64 { - volatile int regs[4] = { 0 }; // doesn't need to be as elaborated as the next cpuid_lambda we will use to calculate the real latency - __cpuid((int*)regs, 0); // unconditional vmexit - return (u64)regs[0]; // dependency to avoid /O2 builds, so that the CPU cannot start the next iteration of the loop until the current __cpuid writes to regs - }; - - auto xor_lambda = []() noexcept -> u64 { - volatile u64 a = 0xDEADBEEFDEADBEEFull; // can be replaced with NOPs, etc, the core idea is to use a non-trappable instruction that the hv cannot virtualize - volatile u64 b = 0x1234567890ABCDEFull; - u64 v = a ^ b; - g_sink ^= v; - return v; - }; - - using fn_t = u64(*)(); - - // make the pointer volatile so the compiler treats the call as opaque/indirect - volatile fn_t cp_ptr = +vm_exit; // +lambda forces conversion to function ptr, so it won't be inlined, we need to prevent the compiler from inlining this - volatile fn_t xor_ptr = +xor_lambda; - volatile u64 dummy = 0; - - // 6 ticks * 15.6ms ~= 100ms - auto accumulate_and_measure = [&](volatile fn_t func_ptr) -> u64 { - u64 total_tsc = 0; - u64 total_qit = 0; - u64 ticks_captured = 0; - constexpr u64 TARGET_TICKS = 6; - - // We continue until we have captured enough full tick windows - while (ticks_captured < TARGET_TICKS) { - u64 start_wait, now_wait; - - // Wait for QIT tick edge to avoid granularity errors - // syncing ensures we always start the measurement at the exact edge of a QIT update, eliminating jitter - QueryInterruptTime(&start_wait); - do { - _mm_pause(); // hint to CPU we-re spin-waiting - QueryInterruptTime(&now_wait); // never touches RDTSC/RDTSCP or transitions to kernel-mode, just reads from KUSER_SHARED_DATA - } while (now_wait == start_wait); - - // start of a new tick window - const u64 qit_start = now_wait; - const u64 tsc_start = __rdtsc(); - - u64 qit_current; - // run until the tick updates again - do { - // unroll slightly to reduce overhead - dummy += func_ptr(); dummy += func_ptr(); - dummy += func_ptr(); dummy += func_ptr(); - dummy += func_ptr(); dummy += func_ptr(); - - QueryInterruptTime(&qit_current); - } while (qit_current == qit_start); - - // end of tick window - const u64 tsc_end = __rdtsc(); - - const u64 delta_qit = qit_current - qit_start; - const u64 delta_tsc = tsc_end - tsc_start; - - // we need to accumulate results, the more we do it, the more the hypervisor will downclock the TSC - if (delta_qit > 0) { - total_qit += delta_qit; - total_tsc += delta_tsc; - ticks_captured++; - } - } + const u64 ITER_XOR = 50000000ULL; + const size_t CPUID_ITER = 100; // per leaf + const unsigned int leaves[] = { + 0xB, 0xD, 0x4, 0x1, 0x7, 0xA, 0x12, 0x5, 0x40000000u, 0x80000008u, 0x0 + }; + const size_t n_leaves = sizeof(leaves) / sizeof(leaves[0]); + const size_t samples_expected = n_leaves * CPUID_ITER; - // Total TSC Cycles / Total QIT Units - if (total_qit == 0) return 0; - return total_tsc / total_qit; - }; + unsigned hw = std::thread::hardware_concurrency(); + if (hw == 0) hw = 1; - // first measurement (CPUID / VMEXIT) - const ULONG64 first_ratio = accumulate_and_measure(cp_ptr); + std::atomic ready_count(0); + std::atomic state(0); - // second measurement (XOR / ALU) - const ULONG64 second_ratio = accumulate_and_measure(xor_ptr); + std::atomic t1_start(0), t1_end(0); + std::atomic t2_start(0), t2_end(0); + std::atomic t2_accum(0); - VMAWARE_UNUSED(dummy); - - /* branchless absolute difference is like: - mask = -(uint64_t)(firstRatio < secondRatio) -> 0 or 0xFFFFFFFFFFFFFFFF - diff = firstRatio - secondRatio - abs = (diff ^ mask) - mask - */ - const ULONG64 diff_mask = (ULONG64)0 - (ULONG64)(first_ratio < second_ratio); // all-ones if first ", first_ratio, ", Interrupt -> ", second_ratio, ", Ratio: ", difference); - - if (previous_mask != 0) { - nt_set_information_thread( - current_thread, - thread_affinity_mask, - reinterpret_cast(const_cast(&original_affinity)), - static_cast(sizeof(original_affinity)) - ); - } + std::vector samples; + samples.resize(samples_expected); + for (size_t i = 0; i < samples.size(); ++i) samples[i] = 0; - // QIT is updated in intervals of 100 nanoseconds - // contrary to what someone could think, under heavy load the ratio will be more close to 0, it will also be closer to 0 if we assign CPUs to a VM in our host machine - // it will increase if the BIOS/UEFI is configured to run the TSC by "core usage", which is why we use this threshold check based on a lot of empirical data - // it increases because the CPUID instruction forces the CPU pipeline to drain and serialize (heavy workload), while the XOR loop is a tight arithmetic loop (throughput workload). - // CPUs will boost to different frequencies for these two scenarios - // A difference of 5-10% in ratio (15-30 points) or even more is normal behavior on bare metal - if (difference >= 100) { - debug("TIMER: An hypervisor has been detected intercepting TSC"); - return true; // both ratios will always differ if TSC is downscaled, since the hypervisor can't account for the XOR/NOP loop - } + auto rdtsc = []() -> u64 { + #if (MSVC) + return static_cast(__rdtsc()); + #else + return static_cast(__rdtsc()); #endif + }; - // An hypervisor might detect that VMAware was spamming instructions to detect rdtsc hooks, and disable interception temporarily or include vm-exit latency in guest TSC - // which is why we run the classic vm-exit latency check immediately after - // to ensure a kernel developer does not hardcode the number of iterations our detector do to change behavior depending on which test we're running (tsc freeze/downscale vs tsc aggregation) - // we used a rng before running the traditional rdtsc-cpuid-rdtsc trick + // best-effort affinity as a local lambda; on macOS it's a no-op + auto try_set_affinity = [](std::thread& t, unsigned core) { + #if (WINDOWS) + HANDLE h = static_cast(t.native_handle()); + DWORD_PTR mask = static_cast(1ULL) << core; + (void)SetThreadAffinityMask(h, mask); + #elif (LINUX) + cpu_set_t cp; + CPU_ZERO(&cp); + CPU_SET(core, &cp); + (void)pthread_setaffinity_np(t.native_handle(), sizeof(cp), &cp); + #else + (void)t; (void)core; + #endif + }; - // sometimes not intercepted in some hvs (like VirtualBox) under compat mode thread_local u32 aux = 0; auto cpuid = [&](unsigned int leaf) noexcept -> u64 { #if (MSVC) @@ -4876,21 +4718,16 @@ struct VM { #endif }; + // calculate_latency (kept as provided, minimal adaptations) auto calculate_latency = [&](const std::vector& samples_in) -> u64 { if (samples_in.empty()) return 0; const size_t N = samples_in.size(); if (N == 1) return samples_in[0]; - - // local sorted copy std::vector s = samples_in; - std::sort(s.begin(), s.end()); // ascending - - // tiny-sample short-circuits + std::sort(s.begin(), s.end()); if (N <= 4) return s.front(); - // median (and works for sorted input) auto median_of_sorted = [](const std::vector& v, size_t lo, size_t hi) -> u64 { - // this is the median of v[lo..hi-1], requires 0 <= lo < hi const size_t len = hi - lo; if (len == 0) return 0; const size_t mid = lo + (len / 2); @@ -4898,7 +4735,6 @@ struct VM { return (v[mid - 1] + v[mid]) / 2; }; - // the robust center: median M and MAD -> approximate sigma const u64 M = median_of_sorted(s, 0, s.size()); std::vector absdev; absdev.reserve(N); @@ -4908,253 +4744,261 @@ struct VM { } std::sort(absdev.begin(), absdev.end()); const u64 MAD = median_of_sorted(absdev, 0, absdev.size()); - // convert MAD to an approximate standard-deviation-like measure - const long double kmad_to_sigma = 1.4826L; // consistent for normal approx + const long double kmad_to_sigma = 1.4826L; const long double sigma = (MAD == 0) ? 1.0L : (static_cast(MAD) * kmad_to_sigma); - // find the densest small-valued cluster by sliding a fixed-count window - // this locates the most concentrated group of samples (likely it would be the true VMEXIT cluster) - // const size_t frac_win = (N * 8 + 99) / 100; // ceil(N * 0.08) - // const size_t win = std::min(N, std::max(MIN_WIN, frac_win)); const size_t MIN_WIN = 10; - const size_t win = std::min( - N, - std::max( - MIN_WIN, - static_cast(std::ceil(static_cast(N) * 0.08)) - ) - ); + const size_t frac_win = static_cast(std::ceil(static_cast(N) * 0.08)); + size_t inner_win = frac_win; + if (inner_win < MIN_WIN) inner_win = MIN_WIN; + const size_t win = (N < inner_win) ? N : inner_win; size_t best_i = 0; - u64 best_span = (s.back() - s.front()) + 1; // large initial + u64 best_span = (s.back() - s.front()) + 1; for (size_t i = 0; i + win <= N; ++i) { const u64 span = s[i + win - 1] - s[i]; - if (span < best_span) { - best_span = span; - best_i = i; - } + if (span < best_span) { best_span = span; best_i = i; } } - // expand the initial window greedily while staying "tight" - // allow expansion while adding samples does not more than multiply the span by EXPAND_FACTOR constexpr long double EXPAND_FACTOR = 1.5L; size_t cluster_lo = best_i; - size_t cluster_hi = best_i + win; // exclusive - // expand left + size_t cluster_hi = best_i + win; while (cluster_lo > 0) { const u64 new_span = s[cluster_hi - 1] - s[cluster_lo - 1]; if (static_cast(new_span) <= EXPAND_FACTOR * static_cast(best_span) || (s[cluster_hi - 1] <= (s[cluster_lo - 1] + static_cast(std::ceil(3.0L * sigma))))) { --cluster_lo; - best_span = std::min(best_span, new_span); + if (new_span < best_span) best_span = new_span; } else break; } - // expand right while (cluster_hi < N) { const u64 new_span = s[cluster_hi] - s[cluster_lo]; if (static_cast(new_span) <= EXPAND_FACTOR * static_cast(best_span) || (s[cluster_hi] <= (s[cluster_lo] + static_cast(std::ceil(3.0L * sigma))))) { ++cluster_hi; - best_span = std::min(best_span, new_span); + if (new_span < best_span) best_span = new_span; } else break; } const size_t cluster_size = (cluster_hi > cluster_lo) ? (cluster_hi - cluster_lo) : 0; - - // cluster must be reasonably dense and cover a non-negligible portion of samples, so this is pure sanity checks const double fraction_in_cluster = static_cast(cluster_size) / static_cast(N); - const size_t MIN_CLUSTER = std::min(static_cast(std::max(5, static_cast(N / 50))), N); // at least 2% or 5 elements + size_t threshold = N / 50; + if (threshold < 5) threshold = 5; + const size_t MIN_CLUSTER = (threshold < N) ? threshold : N; if (cluster_size < MIN_CLUSTER || fraction_in_cluster < 0.02) { - // low-percentile (10th) trimmed median - const size_t fallback_count = std::max(1, static_cast(std::floor(static_cast(N) * 0.10))); - // median of lowest fallback_count elements (if fallback_count==1 that's smallest) + size_t fallback_count = static_cast(std::floor(static_cast(N) * 0.10)); + if (fallback_count < 1) fallback_count = 1; if (fallback_count == 1) return s.front(); const size_t mid = fallback_count / 2; if (fallback_count & 1) return s[mid]; return (s[mid - 1] + s[mid]) / 2; } - // now we try to get a robust estimate inside the cluster, trimmed mean (10% trim) centered on cluster const size_t trim_count = static_cast(std::floor(static_cast(cluster_size) * 0.10)); size_t lo = cluster_lo + trim_count; - size_t hi = cluster_hi - trim_count; // exclusive + size_t hi = cluster_hi - trim_count; if (hi <= lo) { - // degenerate -> median of cluster return median_of_sorted(s, cluster_lo, cluster_hi); } - // sum with long double to avoid overflow and better rounding long double sum = 0.0L; for (size_t i = lo; i < hi; ++i) sum += static_cast(s[i]); const long double avg = sum / static_cast(hi - lo); u64 result = static_cast(std::llround(avg)); - - // final sanity adjustments: - // if the computed result is suspiciously far from the global median (e.g., > +6*sigma) - // clamp toward the median to avoid choosing a high noisy cluster by mistake const long double diff_from_med = static_cast(result) - static_cast(M); if (diff_from_med > 0 && diff_from_med > (6.0L * sigma)) { - // clamp to median + 4*sigma (conservative) result = static_cast(std::llround(static_cast(M) + 4.0L * sigma)); } - - // Also, if result is zero (shouldn't be) or extremely small, return a smallest observed sample if (result == 0) result = s.front(); - return result; }; - // First we start by randomizing counts WITHOUT syscalls and WITHOUT using instructions that can be trapped by hypervisors, this was a hard task - struct entropy_provider { - // prevent inlining so optimizer can't fold this easily - #if (MSVC && !CLANG) - __declspec(noinline) - #else - __attribute__((noinline)) - #endif - u64 operator()() const noexcept { - // TO prevent hoisting across this call - std::atomic_signal_fence(std::memory_order_seq_cst); - - // start state (golden ratio) - volatile u64 v = UINT64_C(0x9E3779B97F4A7C15); - - // mix in addresses (ASLR gives entropy but if ASLR disabled or bypassed we have some tricks still) - // Take addresses of various locals/statics and mark some volatile so they cannot be optimized away - volatile int local_static = 0; // local volatile (stack-like) - static volatile int module_static = 0; // static in function scope (image address) - auto probe_lambda = []() noexcept {}; // stack-local lambda object - std::uintptr_t pa = reinterpret_cast(&v); - std::uintptr_t pb = reinterpret_cast(&local_static); - std::uintptr_t pc = reinterpret_cast(&module_static); - std::uintptr_t pd = reinterpret_cast(&probe_lambda); - - v ^= static_cast(pa) + UINT64_C(0x9E3779B97F4A7C15) + (v << 6) + (v >> 2); - v ^= static_cast(pb) + (v << 7); - v ^= static_cast(pc) + (v >> 11); - v ^= static_cast(pd) + UINT64_C(0xBF58476D1CE4E5B9); - - // dependent operations on volatile locals to prevent elimination - for (int i = 0; i < 24; ++i) { - volatile int stack_local = i ^ static_cast(v); - // take address each iteration and fold it in - std::uintptr_t la = reinterpret_cast(&stack_local); - v ^= (static_cast(la) + (static_cast(i) * UINT64_C(0x9E3779B97F4A7C))); - // dependent shifts to spread any small differences - v ^= (v << ((i & 31))); - v ^= (v >> (((i + 13) & 31))); - // so compiler can't remove the local entirely - std::atomic_signal_fence(std::memory_order_seq_cst); - } - - // final avalanche! (as said before, just in case ASLR can be folded) - v ^= (v << 13); - v ^= (v >> 7); - v ^= (v << 17); - v *= UINT64_C(0x2545F4914F6CDD1D); - v ^= (v >> 33); - - // another compiler fence to prevent hoisting results - std::atomic_signal_fence(std::memory_order_seq_cst); - - return static_cast(v); - } - }; + // to touch pages and exercise cpuid paths + for (int w = 0; w < 128; ++w) { + volatile u64 tmp = cpuid(leaves[w % n_leaves]); + VMAWARE_UNUSED(tmp); + } - // rejection sampling as before to avoid modulo bias - auto rng = [](u64 min, u64 max, auto getrand) noexcept -> u64 { - const u64 range = max - min + 1; - const u64 max_val = std::numeric_limits::max(); - const u64 limit = max_val - (max_val % range); - for (;;) { - const u64 r = getrand(); - if (r < limit) return min + (r % range); - // small local mix to change subsequent outputs (still in user-mode and not a syscall) - volatile u64 scrub = r; - scrub ^= (scrub << 11); - scrub ^= (scrub >> 9); - (void)scrub; + // Thread 1: start near same cycle, do XOR work, set end + std::thread th1([&]() { + ready_count.fetch_add(1, std::memory_order_acq_rel); + while (ready_count.load(std::memory_order_acquire) < 2) { /* spin */ } + + u64 s = rdtsc(); + t1_start.store(s, std::memory_order_release); + state.store(1, std::memory_order_release); + + volatile u64 x = 0xDEADBEEFCAFEBABEULL; + for (u64 i = 0; i < ITER_XOR; ++i) { + x ^= i; + x = (x << 1) ^ (x >> 3); + } + VMAWARE_UNUSED(x); + + u64 e = rdtsc(); + t1_end.store(e, std::memory_order_release); + state.store(2, std::memory_order_release); + }); + + // Thread 2: barrier, sample start, perform cpuid sampling and keep accumulating rdtsc deltas + std::thread th2([&]() { + ready_count.fetch_add(1, std::memory_order_acq_rel); + while (ready_count.load(std::memory_order_acquire) < 2) { /* spin */ } + + u64 last = rdtsc(); + t2_start.store(last, std::memory_order_release); + + // local accumulator (fast) and local index into samples + u64 acc = 0; + size_t idx = 0; + + // per-leaf sampling but do not stop entirely if thread1 is still running after completing planned samples + for (size_t li = 0; li < n_leaves; ++li) { + const unsigned int leaf = leaves[li]; + for (unsigned i = 0; i < CPUID_ITER; ++i) { + // accumulate rdtsc delta up to now (this includes time since last sample and includes previous cpuid) + u64 now = rdtsc(); + acc += (now >= last) ? (now - last) : (u64)((u64)0 - last + now); + last = now; + + // run cpuid and store latency + if (idx < samples.size()) samples[idx] = cpuid(leaf); + ++idx; + + // if thread1 finished, capture a final rdtsc and exit sampling loops + if (state.load(std::memory_order_acquire) == 2) { + u64 final_now = rdtsc(); + acc += (final_now >= last) ? (final_now - last) : (u64)((u64)0 - last + final_now); + last = final_now; + t2_end.store(final_now, std::memory_order_release); + t2_accum.store(acc, std::memory_order_release); + return; + } + } } - }; - - const entropy_provider entropy_prov{}; - - // Intel leaves on an AMD CPU and viceversa will still work for this probe - // for leafs like 0 that just returns static data, like "AuthenticAMD" or "GenuineIntel", a fast exit path could be made - // for other leaves like the extended state that rely on dynamic system states like APIC IDs and XState, kernel data locks are required - // we try different leaves so that is not worth to just create a "fast" exit path, forcing guest TSC manipulation - // the vmexit itself has a latency of around 800 cycles, combined with the registers save and the cpuid information we require, it costs 1000+ cycles - constexpr unsigned int leaves[] = { - 0xB, // topology - 0xD, // xsave/xstate - 0x4, // deterministic cache params - 0x1, // basic features - 0x7, // extended features - 0xA, // architectural performance monitoring - 0x12, // SGX/enclave - 0x5, // MONITOR/MWAIT - 0x40000000u, // hypervisor range start - 0x80000008u, // extended address limits (amd/intel ext) - 0x0 // fallback to leaf 0 occasionally, the easiest to patch - }; - constexpr size_t n_leaves = sizeof(leaves) / sizeof(leaves[0]); - const size_t iterations = static_cast(rng(100, 200, [&entropy_prov]() noexcept { return entropy_prov(); })); + // If we reach here, we completed planned samples but thread1 might still be running, so continue spamming + while (state.load(std::memory_order_acquire) != 2) { + u64 now = rdtsc(); + acc += (now >= last) ? (now - last) : (u64)((u64)0 - last + now); + last = now; + } - // pre-allocate sample buffer and touch pages to avoid page faults by MMU during measurement - std::vector samples; - samples.resize(n_leaves * iterations); - for (size_t i = 0; i < samples.size(); ++i) samples[i] = 0; // or RtlSecureZeroMemory (memset) if Windows + // final sample after seeing finished + u64 final_now = rdtsc(); + acc += (final_now >= last) ? (final_now - last) : (u64)((u64)0 - last + final_now); + last = final_now; + t2_end.store(final_now, std::memory_order_release); + t2_accum.store(acc, std::memory_order_release); + }); - /* - * We want to move our thread from the Running state to the Waiting state - * When the sleep expires (at the next timer tick), the kernel moves VMAware's thread to the Ready state - * When it picks us up again, it grants VMAware a fresh quantum, typically varying between 2 ticks (30ms) and 6 ticks (90ms) on Windows Client editions - * The default resolution of the Windows clock we're using is 64Hz - * Because we're calling NtDelayExecution with only 1ms, the kernel interprets this as "Sleep for at least 1ms" - * Since the hardware interrupt (tick) only fires every 15.6ms and we're not using timeBeginPeriod, the kernel cannot wake us after exactly 1ms - * So instead, it does what we want and wakes us up at the very next timer interrupt - * That's the reason why it's only 1ms and we're not using CreateWaitableTimerEx / SetWaitableTimerEx - * Sleep(0) would return instantly in some circumstances - * This gives us more time for sampling before we're rescheduled again - */ + // Try to pin to different cores + if (hw >= 2) { + try_set_affinity(th1, 0); + try_set_affinity(th2, 1); + } - #if (WINDOWS) - // voluntary context switch to get a fresh quantum - SleepEx(1, FALSE); - #else - // should work similarly in Unix-like operating systems - std::this_thread::sleep_for(std::chrono::milliseconds(1)); - #endif + th1.join(); + th2.join(); - // warm up but rotating through leaves to exercise different cpuid paths - for (int w = 0; w < 128; ++w) { - volatile u64 tmp = cpuid(leaves[w % n_leaves]); - VMAWARE_UNUSED(tmp); - } + const u64 a = t1_start.load(std::memory_order_acquire); + const u64 b = t1_end.load(std::memory_order_acquire); + const u64 c = t2_start.load(std::memory_order_acquire); + const u64 d = t2_end.load(std::memory_order_acquire); + const u64 acc = t2_accum.load(std::memory_order_acquire); - // 100 iterations per leaf, store contiguously per-leaf, so 1100 runs in total - for (size_t li = 0; li < n_leaves; ++li) { - const unsigned int leaf = leaves[li]; - for (unsigned i = 0; i < iterations; ++i) { - samples[li * iterations + i] = cpuid(leaf); - } - } + const u64 t1_delta = (b > a) ? (b - a) : 0; + const u64 t2_delta = acc; - const u64 cpuid_latency = calculate_latency(samples); + std::vector used; + used.reserve(samples_expected); + for (size_t i = 0; i < samples.size(); ++i) + if (samples[i] != 0) + used.push_back(samples[i]); + const u64 cpuid_latency = calculate_latency(used); - debug("TIMER: VMEXIT latency -> ", cpuid_latency); + debug("TIMER: thread1 cycles: start=", a, " end=", b, " delta=", t1_delta); + debug("TIMER: thread2 cycles: start=", c, " end=", d, " acc=", t2_delta); + debug("TIMER: vmexit latency: ", cpuid_latency); if (cpuid_latency >= cycle_threshold) { return true; } - else if (cpuid_latency <= 25) { + else if (cpuid_latency <= 25) { // cpuid is fully serializing, no CPU have this low average cycles in real-world scenarios // however, in patches, zero or even negative deltas can be seen oftenly return true; } - // TLB flushes or side channel cache attacks are not even tried due to how unreliable they are against stealthy hypervisors + + if (t1_delta == 0) { + return false; + } + + const double ratio = double(t2_delta) / double(t1_delta); + if (ratio < 0.95 || ratio > 1.05) { + debug("TIMER: VMAware detected an hypervisor offsetting TSC: ", ratio); + } + else { + debug("TIMER: Ratio: ", ratio); + } + + #if (WINDOWS) + typedef struct _PROCESSOR_POWER_INFORMATION { + u32 Number; + u32 MaxMhz; + u32 CurrentMhz; + u32 MhzLimit; + u32 MaxIdleState; + u32 CurrentIdleState; + } PROCESSOR_POWER_INFORMATION, * PPROCESSOR_POWER_INFORMATION; + + enum POWER_INFORMATION_LEVEL_MIN { + ProcessorInformation = 11 + }; + + HMODULE hPowr = GetModuleHandleA("powrprof.dll"); + if (!hPowr) hPowr = LoadLibraryA("powrprof.dll"); + if (!hPowr) return 0; + + const char* names[] = { "CallNtPowerInformation" }; + void* funcs[1] = { nullptr }; + util::get_function_address(hPowr, names, funcs, 1); + if (!funcs[0]) return 0; + + using CallNtPowerInformation_t = NTSTATUS(__stdcall*)(int, PVOID, ULONG, PVOID, ULONG); + CallNtPowerInformation_t CallNtPowerInformation = + reinterpret_cast(funcs[0]); + + SYSTEM_INFO si; + GetSystemInfo(&si); + const DWORD procCount = si.dwNumberOfProcessors; + if (procCount == 0) return 0; + + const SIZE_T bufSize = static_cast(procCount) * sizeof(PROCESSOR_POWER_INFORMATION); + void* raw = _malloca(bufSize); + if (!raw) return 0; + memset(raw, 0, bufSize); + + NTSTATUS status = CallNtPowerInformation( + ProcessorInformation, + nullptr, 0, + raw, static_cast(bufSize) + ); + + unsigned speed = 0; + if ((LONG)status >= 0) { + PROCESSOR_POWER_INFORMATION* info = reinterpret_cast(raw); + speed = static_cast(info[0].CurrentMhz); + } + + _freea(raw); + + if (speed < 800) { + debug("TIMER: VMAware detected an hypervisor offsetting TSC: ", speed); + return true; + } + #endif #endif return false; } @@ -10032,17 +9876,6 @@ struct VM { // Helper Lambdas // ------------------------------------------------------------------------- - auto ascii_string_equals_ci = [](const char* s1, const char* s2) noexcept -> bool { - if (!s1 || !s2) return false; - while (*s1 && *s2) { - char c1 = *s1; if (c1 >= 'A' && c1 <= 'Z') c1 += 32; - char c2 = *s2; if (c2 >= 'A' && c2 <= 'Z') c2 += 32; - if (c1 != c2) return false; - s1++; s2++; - } - return *s1 == *s2; - }; - auto buffer_contains_ascii_ci = [](const BYTE* data, size_t len, const char* pat) noexcept -> bool { if (!data || len == 0 || !pat) return false; const size_t plen = strlen(pat); if (len < plen) return false;