diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 313fc816..b6942570 100755
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -68,11 +68,6 @@ int main() {
}
```
-> [!WARNING]
-> ## Note from the developer:
-> It should be mentioned that not all of the codebase is formatted this way. This standard guideline has been introduced 2 years after the project has started, and the lack of any guideline has resulted in the codebase looking fragmented, inconsistent, and very different in some portions due to differing coding styles among developers. This is completely my fault, and it has accumulated technical debt over the years. Although the current state isn't formatted consistently, the guideline is meant to slowly evolve the library into a much simpler version that's approachable to anybody trying to contribute and read through the code.
-
-
## I want to add a new technique, how would I do that?
There's a few steps that should be taken:
1. Make sure to add the technique name in the enums of all the techniques in the appropriate place.
diff --git a/docs/documentation.md b/docs/documentation.md
index ab8a0a03..35eb4698 100644
--- a/docs/documentation.md
+++ b/docs/documentation.md
@@ -515,85 +515,85 @@ VMAware provides a convenient way to not only check for VMs, but also have the f
| `VM::HYPERVISOR_BIT` | Check if hypervisor feature bit in CPUID ECX bit 31 is enabled (always false for physical CPUs) | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4376) |
| `VM::HYPERVISOR_STR` | Check for hypervisor brand string length (would be around 2 characters in a host machine) | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4402) |
| `VM::TIMER` | Check for timing anomalies in the system | 🐧🪟🍏 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4588) |
-| `VM::THREAD_COUNT` | Check if there are only 1 or 2 threads, which is a common pattern in VMs with default settings, nowadays physical CPUs should have at least 4 threads for modern CPUs | 🐧🪟🍏 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7227) |
-| `VM::MAC` | Check if mac address starts with certain VM designated values | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5284) |
-| `VM::TEMPERATURE` | Check for device's temperature | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6136) |
-| `VM::SYSTEMD` | Check result from systemd-detect-virt tool | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5165) |
-| `VM::CVENDOR` | Check if the chassis vendor is a VM vendor | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5189) |
-| `VM::CTYPE` | Check if the chassis type is valid (it's very often invalid in VMs) | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5214) |
-| `VM::DOCKERENV` | Check if /.dockerenv or /.dockerinit file is present | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5232) |
-| `VM::DMIDECODE` | Check if dmidecode output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5247) |
-| `VM::DMESG` | Check if dmesg output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5390) |
-| `VM::HWMON` | Check if /sys/class/hwmon/ directory is present. If not, likely a VM | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5431) |
-| `VM::DLL` | Check for VM-specific DLLs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7527) |
-| `VM::HWMODEL` | Check if the sysctl for the hwmodel does not contain the "Mac" string | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7251) |
-| `VM::WINE` | Check if the function "wine_get_unix_file_name" is present and if the OS booted from a VHD container | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7558) |
-| `VM::POWER_CAPABILITIES` | Check what power states are enabled | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7597) |
-| `VM::PROCESSES` | Check for any VM processes that are active | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6147) |
-| `VM::LINUX_USER_HOST` | Check for default VM username and hostname for linux | 🐧 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5441) |
-| `VM::GAMARUE` | Check for Gamarue ransomware technique which compares VM-specific Window product IDs | 🪟 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7657) |
+| `VM::THREAD_COUNT` | Check if there are only 1 or 2 threads, which is a common pattern in VMs with default settings, nowadays physical CPUs should have at least 4 threads for modern CPUs | 🐧🪟🍏 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7228) |
+| `VM::MAC` | Check if mac address starts with certain VM designated values | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5285) |
+| `VM::TEMPERATURE` | Check for device's temperature | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6137) |
+| `VM::SYSTEMD` | Check result from systemd-detect-virt tool | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5166) |
+| `VM::CVENDOR` | Check if the chassis vendor is a VM vendor | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5190) |
+| `VM::CTYPE` | Check if the chassis type is valid (it's very often invalid in VMs) | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5215) |
+| `VM::DOCKERENV` | Check if /.dockerenv or /.dockerinit file is present | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5233) |
+| `VM::DMIDECODE` | Check if dmidecode output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5248) |
+| `VM::DMESG` | Check if dmesg output matches a VM brand | 🐧 | 55% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5391) |
+| `VM::HWMON` | Check if /sys/class/hwmon/ directory is present. If not, likely a VM | 🐧 | 35% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5432) |
+| `VM::DLL` | Check for VM-specific DLLs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7528) |
+| `VM::HWMODEL` | Check if the sysctl for the hwmodel does not contain the "Mac" string | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7252) |
+| `VM::WINE` | Check if the function "wine_get_unix_file_name" is present and if the OS booted from a VHD container | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7559) |
+| `VM::POWER_CAPABILITIES` | Check what power states are enabled | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7669) |
+| `VM::PROCESSES` | Check for any VM processes that are active | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6148) |
+| `VM::LINUX_USER_HOST` | Check for default VM username and hostname for linux | 🐧 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5442) |
+| `VM::GAMARUE` | Check for Gamarue ransomware technique which compares VM-specific Window product IDs | 🪟 | 10% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7729) |
| `VM::BOCHS_CPU` | Check for various Bochs-related emulation oversights through CPU checks | 🐧🪟🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4430) |
-| `VM::MAC_MEMSIZE` | Check if memory is too low for MacOS system | 🍏 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7287) |
-| `VM::MAC_IOKIT` | Check MacOS' IO kit registry for VM-specific strings | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7320) |
-| `VM::IOREG_GREP` | Check for VM-strings in ioreg commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7417) |
-| `VM::MAC_SIP` | Check for the status of System Integrity Protection and hv_mm_present | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7458) |
-| `VM::VPC_INVALID` | Check for official VPC method | 🪟 | 75% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7766) |
+| `VM::MAC_MEMSIZE` | Check if memory is too low for MacOS system | 🍏 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7288) |
+| `VM::MAC_IOKIT` | Check MacOS' IO kit registry for VM-specific strings | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7321) |
+| `VM::IOREG_GREP` | Check for VM-strings in ioreg commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7418) |
+| `VM::MAC_SIP` | Check for the status of System Integrity Protection and hv_mm_present | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7459) |
+| `VM::VPC_INVALID` | Check for official VPC method | 🪟 | 75% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7838) |
| `VM::SYSTEM_REGISTERS` | | | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L1) |
-| `VM::VMWARE_IOMEM` | Check for VMware string in /proc/iomem | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5470) |
-| `VM::VMWARE_IOPORTS` | Check for VMware string in /proc/ioports | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5982) |
-| `VM::VMWARE_SCSI` | Check for VMware string in /proc/scsi/scsi | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5779) |
-| `VM::VMWARE_DMESG` | Check for VMware-specific device name in dmesg output | 🐧 | 65% | Admin | | Disabled by default | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5798) |
-| `VM::VMWARE_STR` | Check str assembly instruction method for VMware | 🪟 | 35% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7817) |
-| `VM::VMWARE_BACKDOOR` | Check for official VMware io port backdoor technique | 🪟 | 100% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7842) |
-| `VM::MUTEX` | Check for mutex strings of VM brands | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7903) |
+| `VM::VMWARE_IOMEM` | Check for VMware string in /proc/iomem | 🐧 | 65% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5471) |
+| `VM::VMWARE_IOPORTS` | Check for VMware string in /proc/ioports | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5983) |
+| `VM::VMWARE_SCSI` | Check for VMware string in /proc/scsi/scsi | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5780) |
+| `VM::VMWARE_DMESG` | Check for VMware-specific device name in dmesg output | 🐧 | 65% | Admin | | Disabled by default | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5799) |
+| `VM::VMWARE_STR` | Check str assembly instruction method for VMware | 🪟 | 35% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7889) |
+| `VM::VMWARE_BACKDOOR` | Check for official VMware io port backdoor technique | 🪟 | 100% | | 32-bit | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7914) |
+| `VM::MUTEX` | Check for mutex strings of VM brands | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7975) |
| `VM::THREAD_MISMATCH` | Check if the system's thread count matches the expected thread count for the detected CPU model | 🐧🪟🍏 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4510) |
-| `VM::CUCKOO_DIR` | Check for cuckoo directory using crt and WIN API directory functions | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7989) |
-| `VM::CUCKOO_PIPE` | Check for Cuckoo specific piping mechanism | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8045) |
-| `VM::AZURE` | Check for default Azure hostname format (Azure uses Hyper-V as their base VM brand) | 🐧🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6384) |
-| `VM::DISPLAY` | Check for display configurations commonly found in VMs | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8101) |
-| `VM::DEVICE_STRING` | Check if bogus device string would be accepted | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8136) |
-| `VM::BLUESTACKS_FOLDERS` | Check for the presence of BlueStacks-specific folders | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5486) |
+| `VM::CUCKOO_DIR` | Check for cuckoo directory using crt and WIN API directory functions | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8061) |
+| `VM::CUCKOO_PIPE` | Check for Cuckoo specific piping mechanism | 🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8117) |
+| `VM::AZURE` | Check for default Azure hostname format (Azure uses Hyper-V as their base VM brand) | 🐧🪟 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6385) |
+| `VM::DISPLAY` | Check for display configurations commonly found in VMs | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8173) |
+| `VM::DEVICE_STRING` | Check if bogus device string would be accepted | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8208) |
+| `VM::BLUESTACKS_FOLDERS` | Check for the presence of BlueStacks-specific folders | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5487) |
| `VM::CPUID_SIGNATURE` | Check for signatures in leaf 0x40000001 in CPUID | 🐧🪟🍏 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4538) |
| `VM::KGT_SIGNATURE` | Check for Intel KGT (Trusty branch) hypervisor signature in CPUID | 🐧🪟🍏 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L4564) |
-| `VM::QEMU_VIRTUAL_DMI` | Check for presence of QEMU in the /sys/devices/virtual/dmi/id directory | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5567) |
-| `VM::QEMU_USB` | Check for presence of QEMU in the /sys/kernel/debug/usb/devices directory | 🐧 | 20% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5596) |
-| `VM::HYPERVISOR_DIR` | Check for presence of any files in /sys/hypervisor directory | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5624) |
-| `VM::UML_CPU` | Check for the "UML" string in the CPU brand | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5672) |
-| `VM::KMSG` | Check for any indications of hypervisors in the kernel message logs | 🐧 | 5% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5702) |
-| `VM::VBOX_MODULE` | Check for a VBox kernel module | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5756) |
-| `VM::SYSINFO_PROC` | Check for potential VM info in /proc/sysinfo | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5832) |
-| `VM::DMI_SCAN` | Check for string matches of VM brands in the linux DMI | 🐧 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5854) |
-| `VM::SMBIOS_VM_BIT` | Check for the VM bit in the SMBIOS data | 🐧 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5937) |
-| `VM::PODMAN_FILE` | Check for podman file in /run/ | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5967) |
-| `VM::WSL_PROC` | Check for WSL or microsoft indications in /proc/ subdirectories | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5999) |
-| `VM::DRIVERS` | Check for VM-specific names for drivers | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8153) |
-| `VM::DISK_SERIAL` | Check for serial numbers of virtual disks | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8251) |
-| `VM::IVSHMEM` | Check for IVSHMEM device presence | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8490) |
-| `VM::GPU_CAPABILITIES` | Check for GPU capabilities related to VMs | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8596) |
-| `VM::DEVICE_HANDLES` | Check for vm-specific devices | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8634) |
-| `VM::QEMU_FW_CFG` | Detect QEMU fw_cfg interface. This first checks the Device Tree for a fw-cfg node or hypervisor tag, then verifies the presence of the qemu_fw_cfg module and firmware directories in sysfs. | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6027) |
-| `VM::VIRTUAL_PROCESSORS` | Check if the number of virtual and logical processors are reported correctly by the system | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8737) |
-| `VM::HYPERVISOR_QUERY` | Check if a call to NtQuerySystemInformation with the 0x9f leaf fills a _SYSTEM_HYPERVISOR_DETAIL_INFORMATION structure | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8767) |
-| `VM::AMD_SEV` | Check for AMD-SEV MSR running on the system | 🐧🍏 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5509) |
-| `VM::VIRTUAL_REGISTRY` | Check for particular object directory which is present in Sandboxie virtual environment but not in usual host systems | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8835) |
-| `VM::FIRMWARE` | Check for VM signatures on all firmware tables | 🐧🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6444) |
-| `VM::FILE_ACCESS_HISTORY` | Check if the number of accessed files are too low for a human-managed environment | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6057) |
-| `VM::AUDIO` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8929) |
-| `VM::NSJAIL_PID` | Check if process status matches with nsjail patterns with PID anomalies | 🐧 | 75% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6084) |
-| `VM::PCI_DEVICES` | Check for PCI vendor and device IDs that are VM-specific | 🐧🪟 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6871) |
-| `VM::ACPI_SIGNATURE` | Check for VM-specific ACPI device signatures | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9037) |
-| `VM::TRAP` | Check if after raising two traps at the same RIP, a hypervisor interferes with the instruction pointer delivery | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9182) |
-| `VM::UD` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8929) |
-| `VM::BLOCKSTEP` | Check if a hypervisor does not properly restore the interruptibility state after a VM-exit in compatibility mode | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9457) |
-| `VM::DBVM` | Check if Dark Byte's VM is present | 🪟 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9504) |
-| `VM::BOOT_LOGO` | Check boot logo for known VM images | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9623) |
-| `VM::MAC_SYS` | Check for VM-strings in system profiler commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7502) |
-| `VM::KERNEL_OBJECTS` | Check for any signs of VMs in Windows kernel object entities | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9726) |
-| `VM::NVRAM` | Check for known NVRAM signatures that are present on virtual firmware | 🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9912) |
-| `VM::SMBIOS_INTEGRITY` | Check if SMBIOS is malformed/corrupted in a way that is typical for VMs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10460) |
-| `VM::EDID` | Check for non-standard EDID configurations | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10471) |
-| `VM::CPU_HEURISTIC` | Check whether the CPU is genuine and its reported instruction capabilities are not masked | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10727) |
-| `VM::CLOCK` | Check the presence of system timers | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11195) |
+| `VM::QEMU_VIRTUAL_DMI` | Check for presence of QEMU in the /sys/devices/virtual/dmi/id directory | 🐧 | 40% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5568) |
+| `VM::QEMU_USB` | Check for presence of QEMU in the /sys/kernel/debug/usb/devices directory | 🐧 | 20% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5597) |
+| `VM::HYPERVISOR_DIR` | Check for presence of any files in /sys/hypervisor directory | 🐧 | 20% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5625) |
+| `VM::UML_CPU` | Check for the "UML" string in the CPU brand | 🐧 | 80% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5673) |
+| `VM::KMSG` | Check for any indications of hypervisors in the kernel message logs | 🐧 | 5% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5703) |
+| `VM::VBOX_MODULE` | Check for a VBox kernel module | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5757) |
+| `VM::SYSINFO_PROC` | Check for potential VM info in /proc/sysinfo | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5833) |
+| `VM::DMI_SCAN` | Check for string matches of VM brands in the linux DMI | 🐧 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5855) |
+| `VM::SMBIOS_VM_BIT` | Check for the VM bit in the SMBIOS data | 🐧 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5938) |
+| `VM::PODMAN_FILE` | Check for podman file in /run/ | 🐧 | 5% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5968) |
+| `VM::WSL_PROC` | Check for WSL or microsoft indications in /proc/ subdirectories | 🐧 | 30% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6000) |
+| `VM::DRIVERS` | Check for VM-specific names for drivers | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8225) |
+| `VM::DISK_SERIAL` | Check for serial numbers of virtual disks | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8323) |
+| `VM::IVSHMEM` | Check for IVSHMEM device presence | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8562) |
+| `VM::GPU_CAPABILITIES` | Check for GPU capabilities related to VMs | 🪟 | 45% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8668) |
+| `VM::DEVICE_HANDLES` | Check for vm-specific devices | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8706) |
+| `VM::QEMU_FW_CFG` | Detect QEMU fw_cfg interface. This first checks the Device Tree for a fw-cfg node or hypervisor tag, then verifies the presence of the qemu_fw_cfg module and firmware directories in sysfs. | 🐧 | 70% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6028) |
+| `VM::VIRTUAL_PROCESSORS` | Check if the number of virtual and logical processors are reported correctly by the system | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8809) |
+| `VM::HYPERVISOR_QUERY` | Check if a call to NtQuerySystemInformation with the 0x9f leaf fills a _SYSTEM_HYPERVISOR_DETAIL_INFORMATION structure | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8839) |
+| `VM::AMD_SEV` | Check for AMD-SEV MSR running on the system | 🐧🍏 | 50% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L5510) |
+| `VM::VIRTUAL_REGISTRY` | Check for particular object directory which is present in Sandboxie virtual environment but not in usual host systems | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L8907) |
+| `VM::FIRMWARE` | Check for VM signatures on all firmware tables | 🐧🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6445) |
+| `VM::FILE_ACCESS_HISTORY` | Check if the number of accessed files are too low for a human-managed environment | 🐧 | 15% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6058) |
+| `VM::AUDIO` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9001) |
+| `VM::NSJAIL_PID` | Check if process status matches with nsjail patterns with PID anomalies | 🐧 | 75% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6085) |
+| `VM::PCI_DEVICES` | Check for PCI vendor and device IDs that are VM-specific | 🐧🪟 | 95% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L6872) |
+| `VM::ACPI_SIGNATURE` | Check for VM-specific ACPI device signatures | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9109) |
+| `VM::TRAP` | Check if after raising two traps at the same RIP, a hypervisor interferes with the instruction pointer delivery | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9254) |
+| `VM::UD` | Check if no waveform-audio output devices are present in the system | 🪟 | 25% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9001) |
+| `VM::BLOCKSTEP` | Check if a hypervisor does not properly restore the interruptibility state after a VM-exit in compatibility mode | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9529) |
+| `VM::DBVM` | Check if Dark Byte's VM is present | 🪟 | 150% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9576) |
+| `VM::BOOT_LOGO` | Check boot logo for known VM images | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9695) |
+| `VM::MAC_SYS` | Check for VM-strings in system profiler commands for MacOS | 🍏 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L7503) |
+| `VM::KERNEL_OBJECTS` | Check for any signs of VMs in Windows kernel object entities | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9796) |
+| `VM::NVRAM` | Check for known NVRAM signatures that are present on virtual firmware | 🪟 | 100% | Admin | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L9986) |
+| `VM::SMBIOS_INTEGRITY` | Check if SMBIOS is malformed/corrupted in a way that is typical for VMs | 🪟 | 50% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10534) |
+| `VM::EDID` | Check for non-standard EDID configurations | 🪟 | 100% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10545) |
+| `VM::CPU_HEURISTIC` | Check whether the CPU is genuine and its reported instruction capabilities are not masked | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L10801) |
+| `VM::CLOCK` | Check the presence of system timers | 🪟 | 90% | | | | [link](https://github.com/kernelwernel/VMAware/tree/main/src/vmaware.hpp#L11269) |
diff --git a/src/vmaware.hpp b/src/vmaware.hpp
index c02f8020..12e403c9 100644
--- a/src/vmaware.hpp
+++ b/src/vmaware.hpp
@@ -58,10 +58,10 @@
* - struct for internal cpu operations => line 718
* - struct for internal memoization => line 3042
* - struct for internal utility functions => line 3224
- * - struct for internal core components => line 11344
+ * - struct for internal core components => line 11418
* - start of VM detection technique list => line 4279
- * - start of public VM detection functions => line 11722
- * - start of externally defined variables => line 12742
+ * - start of public VM detection functions => line 11796
+ * - start of externally defined variables => line 12816
*
*
* ============================== EXAMPLE ===================================
@@ -90,7 +90,7 @@
* Welcome! This is just a preliminary text to lay the context of how it works,
* how it's structured, and to guide anybody who's trying to understand the whole code.
* Reading over 12k+ lines of other people's C++ code is obviously not an easy task,
- * and that's perfectly understandable. We'd struggle as well if I were in your position
+ * and that's perfectly understandable. We'd struggle as well if we were in your position
* while not even knowing where to start. So here's a more human-friendly explanation:
*
*
@@ -390,7 +390,6 @@
#pragma comment(lib, "setupapi.lib")
#pragma comment(lib, "powrprof.lib")
- #pragma comment(lib, "mincore.lib")
#pragma comment(lib, "wevtapi.lib")
#elif (LINUX)
#if (x86)
@@ -3352,8 +3351,8 @@ struct VM {
#elif (WINDOWS)
bool is_admin = false;
HANDLE hToken = nullptr;
- const HANDLE hCurrentProcess = reinterpret_cast(-1LL);
- if (OpenProcessToken(hCurrentProcess, TOKEN_QUERY, &hToken)) {
+ const HANDLE current_process = reinterpret_cast(-1LL);
+ if (OpenProcessToken(current_process, TOKEN_QUERY, &hToken)) {
TOKEN_ELEVATION elevation{};
DWORD dwSize;
if (GetTokenInformation(hToken, TokenElevation, &elevation, sizeof(elevation), &dwSize)) {
@@ -3464,7 +3463,7 @@ struct VM {
return util::make_unique();
#else
#if (LINUX || APPLE)
- struct FileDeleter {
+ struct file_deleter {
void operator()(FILE* f) const noexcept {
if (f) {
pclose(f);
@@ -3472,7 +3471,7 @@ struct VM {
}
};
- std::unique_ptr pipe(popen(cmd, "r"), FileDeleter());
+ std::unique_ptr pipe(popen(cmd, "r"), file_deleter());
if (!pipe) {
return util::make_unique();
}
@@ -3481,9 +3480,9 @@ struct VM {
char* line = nullptr;
// to ensure line is freed even if string::append throws std::bad_alloc
- struct LineGuard {
+ struct line_guard {
char*& ptr;
- ~LineGuard() { if (ptr) free(ptr); }
+ ~line_guard() { if (ptr) free(ptr); }
} guard{ line };
size_t len = 0;
@@ -3581,11 +3580,11 @@ struct VM {
[[nodiscard]] static bool is_running_under_translator() {
#if (WINDOWS && _WIN32_WINNT >= _WIN32_WINNT_WIN10)
- const HANDLE hCurrentProcess = reinterpret_cast(-1LL);
+ const HANDLE current_process = reinterpret_cast(-1LL);
USHORT procMachine = 0, nativeMachine = 0;
const auto pIsWow64Process2 = &IsWow64Process2;
- if (pIsWow64Process2(hCurrentProcess, &procMachine, &nativeMachine)) {
+ if (pIsWow64Process2(current_process, &procMachine, &nativeMachine)) {
if (nativeMachine == IMAGE_FILE_MACHINE_ARM64 &&
(procMachine == IMAGE_FILE_MACHINE_AMD64 || procMachine == IMAGE_FILE_MACHINE_I386)) {
debug("Translator detected x64/x86 process on ARM64");
@@ -3595,7 +3594,7 @@ struct VM {
// only if we got MACHINE_UNKNOWN on process but native is ARM64
if (nativeMachine == IMAGE_FILE_MACHINE_ARM64) {
- using PGetProcessInformation = BOOL(__stdcall*)(HANDLE, PROCESS_INFORMATION_CLASS, PVOID, DWORD);
+ using get_process_information = BOOL(__stdcall*)(HANDLE, PROCESS_INFORMATION_CLASS, PVOID, DWORD);
const HMODULE ntdll = util::get_ntdll();
if (ntdll == nullptr) {
return false;
@@ -3605,15 +3604,15 @@ struct VM {
void* funcs[1] = { nullptr };
util::get_function_address(ntdll, names, funcs, 1);
- PGetProcessInformation pGetProcInfo = reinterpret_cast(funcs[0]);
- if (pGetProcInfo) {
+ get_process_information get_proc_info = reinterpret_cast(funcs[0]);
+ if (get_proc_info) {
struct PROCESS_MACHINE_INFORMATION {
USHORT ProcessMachine;
USHORT Res0;
DWORD MachineAttributes;
} pmInfo = {};
// ProcessMachineTypeInfo == 9 per MS Q&A
- if (pGetProcInfo(hCurrentProcess, (PROCESS_INFORMATION_CLASS)9, &pmInfo, sizeof(pmInfo))) {
+ if (get_proc_info(current_process, (PROCESS_INFORMATION_CLASS)9, &pmInfo, sizeof(pmInfo))) {
if (pmInfo.ProcessMachine == IMAGE_FILE_MACHINE_AMD64 || pmInfo.ProcessMachine == IMAGE_FILE_MACHINE_I386) {
debug("Translator detected x64/x86 process on ARM64 by fallback");
return true;
@@ -3947,8 +3946,8 @@ struct VM {
#if (WINDOWS)
// retrieves the addresses of specified functions from a loaded module using the export directory, manual implementation of GetProcAddress
static void get_function_address(const HMODULE hModule, const char* names[], void** functions, size_t count) {
- using FuncMap = std::unordered_map;
- static std::unordered_map function_cache;
+ using func_map = std::unordered_map;
+ static std::unordered_map function_cache;
for (size_t i = 0; i < count; ++i) functions[i] = nullptr;
if (!hModule) return;
@@ -4036,7 +4035,7 @@ struct VM {
const DWORD* funcRvas = reinterpret_cast(base + addr_funcs);
const WORD* ordinals = reinterpret_cast(base + addr_ord);
- FuncMap& module_cache = function_cache[hModule];
+ func_map& module_cache = function_cache[hModule];
for (size_t i = 0; i < count; ++i) {
const char* current_name = names[i];
@@ -4088,9 +4087,9 @@ struct VM {
[[nodiscard]] static HMODULE get_ntdll() {
- static HMODULE cachedNtdll = nullptr;
- if (cachedNtdll != nullptr) {
- return cachedNtdll;
+ static HMODULE cached_ntdll = nullptr;
+ if (cached_ntdll != nullptr) {
+ return cached_ntdll;
}
#ifndef _WINTERNL_
@@ -4151,15 +4150,15 @@ struct VM {
#endif
if (!peb) { // not x86 or tampered with
- const HMODULE h = GetModuleHandleW(L"ntdll.dll");
- if (h) cachedNtdll = h;
- return h;
+ const HMODULE ntdll = GetModuleHandleW(L"ntdll.dll");
+ if (ntdll) cached_ntdll = ntdll;
+ return ntdll;
}
PPEB_LDR_DATA ldr = peb->Ldr;
if (!ldr) {
const HMODULE h = GetModuleHandleW(L"ntdll.dll");
- if (h) cachedNtdll = h;
+ if (h) cached_ntdll = h;
return h;
}
@@ -4167,8 +4166,8 @@ struct VM {
#define CONTAINING_RECORD(address, type, field) ((type *)((char*)(address) - (size_t)(&((type *)0)->field)))
#endif
- constexpr WCHAR targetName[] = L"ntdll.dll";
- constexpr size_t targetLen = (std::size(targetName) - 1);
+ constexpr WCHAR target_name[] = L"ntdll.dll";
+ constexpr size_t target_length = (std::size(target_name) - 1);
LIST_ENTRY* head = &ldr->InMemoryOrderModuleList;
// static analyzers don't know that InMemoryOrderModuleList is a circular list managed by the loader
@@ -4180,35 +4179,35 @@ struct VM {
auto* fullname = &ent->FullDllName;
if (!fullname->Buffer || fullname->Length == 0) continue;
- const auto totalChars = static_cast(fullname->Length / sizeof(WCHAR));
+ const auto total_chars = static_cast(fullname->Length / sizeof(WCHAR));
- size_t start = totalChars;
+ size_t start = total_chars;
while (start > 0) {
const WCHAR c = fullname->Buffer[start - 1];
if (c == L'\\' || c == L'/') break;
--start;
}
- const size_t fileLen = totalChars - start;
- if (fileLen != targetLen) continue;
+ const size_t file_length = total_chars - start;
+ if (file_length != target_length) continue;
bool match = true;
- for (size_t i = 0; i < fileLen; ++i) {
+ for (size_t i = 0; i < file_length; ++i) {
WCHAR a = fullname->Buffer[start + i];
- WCHAR b = targetName[i];
+ WCHAR b = target_name[i];
if (a >= L'A' && a <= L'Z') a = static_cast(a + 32);
if (b >= L'A' && b <= L'Z') b = static_cast(b + 32);
if (a != b) { match = false; break; }
}
if (match) {
- cachedNtdll = reinterpret_cast(ent->DllBase);
- return cachedNtdll;
+ cached_ntdll = reinterpret_cast(ent->DllBase);
+ return cached_ntdll;
}
}
const HMODULE h = GetModuleHandleW(L"ntdll.dll");
- if (h) cachedNtdll = h;
+ if (h) cached_ntdll = h;
return h;
}
@@ -4307,15 +4306,15 @@ struct VM {
#else
const std::string& brand = cpu::get_brand();
- struct CStrView {
+ struct cstrview {
const char* data;
std::size_t size;
- constexpr CStrView(const char* d, std::size_t s) noexcept
+ constexpr cstrview(const char* d, std::size_t s) noexcept
: data(d), size(s) {
}
};
- static constexpr std::array checks{ {
+ static constexpr std::array checks{ {
{ "qemu", 4 },
{ "kvm", 3 },
{ "vbox", 4 },
@@ -4607,218 +4606,61 @@ struct VM {
cycle_threshold = 3250; // if we're running under Hyper-V, make VMAware detect nested virtualization
}
- #if (WINDOWS)
- const HMODULE ntdll = util::get_ntdll();
- if (!ntdll) {
- return true;
- }
-
- const char* names[] = { "NtQueryInformationThread", "NtSetInformationThread" };
- void* funcs[ARRAYSIZE(names)] = {};
- util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
-
- using NtQueryInformationThread_t = NTSTATUS(__stdcall*)(HANDLE, int, PVOID, ULONG, PULONG);
- using NtSetInformationThread_t = NTSTATUS(__stdcall*)(HANDLE, int, PVOID, ULONG);
-
- const auto pNtQueryInformationThread = reinterpret_cast(funcs[0]);
- const auto pNtSetInformationThread = reinterpret_cast(funcs[1]);
- if (!pNtQueryInformationThread || !pNtSetInformationThread) {
- return true;
- }
-
- constexpr int ThreadBasicInformation = 0;
- constexpr int ThreadAffinityMask = 4;
-
- struct CLIENT_ID {
- ULONG_PTR UniqueProcess;
- ULONG_PTR UniqueThread;
- };
- struct THREAD_BASIC_INFORMATION {
- NTSTATUS ExitStatus;
- PVOID TebBaseAddress;
- CLIENT_ID ClientId;
- ULONG_PTR AffinityMask;
- LONG Priority;
- LONG BasePriority;
- } tbi;
- const HANDLE hCurrentThread = reinterpret_cast(-2LL);
-
- // current affinity
- memset(&tbi, 0, sizeof(tbi));
- NTSTATUS status = pNtQueryInformationThread(
- hCurrentThread,
- ThreadBasicInformation,
- &tbi,
- sizeof(tbi),
- nullptr
- );
-
- if (status < 0) {
- return false;
- }
-
- const ULONG_PTR originalAffinity = tbi.AffinityMask;
-
- // new affinity
- const DWORD_PTR wantedMask = static_cast(1);
- status = pNtSetInformationThread(
- hCurrentThread,
- ThreadAffinityMask,
- reinterpret_cast(const_cast(&wantedMask)),
- static_cast(sizeof(wantedMask))
- );
-
- // setting a higher priority for the current thread actually makes the ration between rdtsc and other timers like QIT vary much more
- // contrary to what someone might think about preempting reschedule
- DWORD_PTR prevMask = 0;
- if (status >= 0) {
- prevMask = originalAffinity; // emulate SetThreadAffinityMask return
- }
- else {
- prevMask = 0;
- }
- #endif
-
// check for RDTSCP support, we will use it later
int regs[4] = { 0 };
cpu::cpuid(regs, 0x80000001);
- const bool haveRdtscp = (regs[3] & (1u << 27)) != 0;
- if (!haveRdtscp) {
+ const bool have_rdtscp = (regs[3] & (1u << 27)) != 0;
+ if (!have_rdtscp) {
debug("TIMER: RDTSCP instruction not supported"); // __rdtscp should be supported nowadays
return true;
}
- // ================ START OF TIMING ATTACKS ================
- #if (WINDOWS)
- /* TSC offseting detection */
- // This detection uses two clocks and two loops, a loop and a timer that the hypervisor can spoof and a second loop/timer that the hypervisor cannot
- // When the TSC is "hooked", the hypervisor usually downscales the result to hide the time passed or doesnt let TSC advance for the time it was vm-exiting
- // However, the hypervisor have absolutely no way to downscale time for the second loop because it runs natively on the CPU without exiting
- // This creates a massive discrepancy in the ratio of both loops, contrary to the very small ratio if both timers were to run normally
- // The hypervisor cannot easily rewind the system wall clock (second loop, QIT/KUSER_SHARED_DATA) without causing system instability (network timeouts, audio lag, etc)
- static thread_local volatile u64 g_sink = 0; // thread_local volatile so that it doesnt need to be captured by the lambda
-
- // the reason why we use CPUID rather than RDTSC is because RDTSC is a conditionally exiting instruction, and you can modify the guest TSC without trapping it
- auto vm_exit = []() noexcept -> u64 {
- volatile int regs[4] = { 0 }; // doesn't need to be as elaborated as the next cpuid_lambda we will use to calculate the real latency
- __cpuid((int*)regs, 0); // unconditional vmexit
- return (u64)regs[0]; // dependency to avoid /O2 builds, so that the CPU cannot start the next iteration of the loop until the current __cpuid writes to regs
- };
-
- auto xor_lambda = []() noexcept -> u64 {
- volatile u64 a = 0xDEADBEEFDEADBEEFull; // can be replaced with NOPs, etc, the core idea is to use a non-trappable instruction that the hv cannot virtualize
- volatile u64 b = 0x1234567890ABCDEFull;
- u64 v = a ^ b;
- g_sink ^= v;
- return v;
- };
-
- using fn_t = u64(*)();
-
- // make the pointer volatile so the compiler treats the call as opaque/indirect
- volatile fn_t cp_ptr = +vm_exit; // +lambda forces conversion to function ptr, so it won't be inlined, we need to prevent the compiler from inlining this
- volatile fn_t xor_ptr = +xor_lambda;
- volatile u64 dummy = 0;
-
- // 6 ticks * 15.6ms ~= 100ms
- auto accumulate_and_measure = [&](volatile fn_t func_ptr) -> u64 {
- u64 total_tsc = 0;
- u64 total_qit = 0;
- u64 ticks_captured = 0;
- constexpr u64 TARGET_TICKS = 6;
-
- // We continue until we have captured enough full tick windows
- while (ticks_captured < TARGET_TICKS) {
- u64 start_wait, now_wait;
-
- // Wait for QIT tick edge to avoid granularity errors
- // syncing ensures we always start the measurement at the exact edge of a QIT update, eliminating jitter
- QueryInterruptTime(&start_wait);
- do {
- _mm_pause(); // hint to CPU we-re spin-waiting
- QueryInterruptTime(&now_wait); // never touches RDTSC/RDTSCP or transitions to kernel-mode, just reads from KUSER_SHARED_DATA
- } while (now_wait == start_wait);
-
- // start of a new tick window
- const u64 qit_start = now_wait;
- const u64 tsc_start = __rdtsc();
-
- u64 qit_current;
- // run until the tick updates again
- do {
- // unroll slightly to reduce overhead
- dummy += func_ptr(); dummy += func_ptr();
- dummy += func_ptr(); dummy += func_ptr();
- dummy += func_ptr(); dummy += func_ptr();
-
- QueryInterruptTime(&qit_current);
- } while (qit_current == qit_start);
-
- // end of tick window
- const u64 tsc_end = __rdtsc();
-
- const u64 delta_qit = qit_current - qit_start;
- const u64 delta_tsc = tsc_end - tsc_start;
-
- // we need to accumulate results, the more we do it, the more the hypervisor will downclock the TSC
- if (delta_qit > 0) {
- total_qit += delta_qit;
- total_tsc += delta_tsc;
- ticks_captured++;
- }
- }
-
- // Total TSC Cycles / Total QIT Units
- if (total_qit == 0) return 0;
- return total_tsc / total_qit;
- };
+ const u64 ITER_XOR = 50000000ULL;
+ const size_t CPUID_ITER = 100; // per leaf
+ const unsigned int leaves[] = {
+ 0xB, 0xD, 0x4, 0x1, 0x7, 0xA, 0x12, 0x5, 0x40000000u, 0x80000008u, 0x0
+ };
+ const size_t n_leaves = sizeof(leaves) / sizeof(leaves[0]);
+ const size_t samples_expected = n_leaves * CPUID_ITER;
- // first measurement (CPUID / VMEXIT)
- const ULONG64 firstRatio = accumulate_and_measure(cp_ptr);
+ unsigned hw = std::thread::hardware_concurrency();
+ if (hw == 0) hw = 1;
- // second measurement (XOR / ALU)
- const ULONG64 secondRatio = accumulate_and_measure(xor_ptr);
+ std::atomic ready_count(0);
+ std::atomic state(0);
- VMAWARE_UNUSED(dummy);
+ std::atomic t1_start(0), t1_end(0);
+ std::atomic t2_start(0), t2_end(0);
+ std::atomic t2_accum(0);
- /* branchless absolute difference is like:
- mask = -(uint64_t)(firstRatio < secondRatio) -> 0 or 0xFFFFFFFFFFFFFFFF
- diff = firstRatio - secondRatio
- abs = (diff ^ mask) - mask
- */
- const ULONG64 diffMask = (ULONG64)0 - (ULONG64)(firstRatio < secondRatio); // all-ones if first ", firstRatio, ", Interrupt -> ", secondRatio, ", Ratio: ", difference);
-
- if (prevMask != 0) {
- pNtSetInformationThread(
- hCurrentThread,
- ThreadAffinityMask,
- reinterpret_cast(const_cast(&originalAffinity)),
- static_cast(sizeof(originalAffinity))
- );
- }
+ std::vector samples;
+ samples.resize(samples_expected);
+ for (size_t i = 0; i < samples.size(); ++i) samples[i] = 0;
- // QIT is updated in intervals of 100 nanoseconds
- // contrary to what someone could think, under heavy load the ratio will be more close to 0, it will also be closer to 0 if we assign CPUs to a VM in our host machine
- // it will increase if the BIOS/UEFI is configured to run the TSC by "core usage", which is why we use this threshold check based on a lot of empirical data
- // it increases because the CPUID instruction forces the CPU pipeline to drain and serialize (heavy workload), while the XOR loop is a tight arithmetic loop (throughput workload).
- // CPUs will boost to different frequencies for these two scenarios
- // A difference of 5-10% in ratio (15-30 points) or even more is normal behavior on bare metal
- if (difference >= 100) {
- debug("TIMER: An hypervisor has been detected intercepting TSC");
- return true; // both ratios will always differ if TSC is downscaled, since the hypervisor can't account for the XOR/NOP loop
- }
+ auto rdtsc = []() -> u64 {
+ #if (MSVC)
+ return static_cast(__rdtsc());
+ #else
+ return static_cast(__rdtsc());
#endif
+ };
- // An hypervisor might detect that VMAware was spamming instructions to detect rdtsc hooks, and disable interception temporarily or include vm-exit latency in guest TSC
- // which is why we run the classic vm-exit latency check immediately after
- // to ensure a kernel developer does not hardcode the number of iterations our detector do to change behavior depending on which test we're running (tsc freeze/downscale vs tsc aggregation)
- // we used a rng before running the traditional rdtsc-cpuid-rdtsc trick
+ // best-effort affinity as a local lambda; on macOS it's a no-op
+ auto try_set_affinity = [](std::thread& t, unsigned core) {
+ #if (WINDOWS)
+ HANDLE h = static_cast(t.native_handle());
+ DWORD_PTR mask = static_cast(1ULL) << core;
+ (void)SetThreadAffinityMask(h, mask);
+ #elif (LINUX)
+ cpu_set_t cp;
+ CPU_ZERO(&cp);
+ CPU_SET(core, &cp);
+ (void)pthread_setaffinity_np(t.native_handle(), sizeof(cp), &cp);
+ #else
+ (void)t; (void)core;
+ #endif
+ };
- // sometimes not intercepted in some hvs (like VirtualBox) under compat mode
thread_local u32 aux = 0;
auto cpuid = [&](unsigned int leaf) noexcept -> u64 {
#if (MSVC)
@@ -4876,21 +4718,16 @@ struct VM {
#endif
};
+ // calculate_latency (kept as provided, minimal adaptations)
auto calculate_latency = [&](const std::vector& samples_in) -> u64 {
if (samples_in.empty()) return 0;
const size_t N = samples_in.size();
if (N == 1) return samples_in[0];
-
- // local sorted copy
std::vector s = samples_in;
- std::sort(s.begin(), s.end()); // ascending
-
- // tiny-sample short-circuits
+ std::sort(s.begin(), s.end());
if (N <= 4) return s.front();
- // median (and works for sorted input)
auto median_of_sorted = [](const std::vector& v, size_t lo, size_t hi) -> u64 {
- // this is the median of v[lo..hi-1], requires 0 <= lo < hi
const size_t len = hi - lo;
if (len == 0) return 0;
const size_t mid = lo + (len / 2);
@@ -4898,7 +4735,6 @@ struct VM {
return (v[mid - 1] + v[mid]) / 2;
};
- // the robust center: median M and MAD -> approximate sigma
const u64 M = median_of_sorted(s, 0, s.size());
std::vector absdev;
absdev.reserve(N);
@@ -4908,253 +4744,261 @@ struct VM {
}
std::sort(absdev.begin(), absdev.end());
const u64 MAD = median_of_sorted(absdev, 0, absdev.size());
- // convert MAD to an approximate standard-deviation-like measure
- const long double kMADtoSigma = 1.4826L; // consistent for normal approx
- const long double sigma = (MAD == 0) ? 1.0L : (static_cast(MAD) * kMADtoSigma);
-
- // find the densest small-valued cluster by sliding a fixed-count window
- // this locates the most concentrated group of samples (likely it would be the true VMEXIT cluster)
- // const size_t frac_win = (N * 8 + 99) / 100; // ceil(N * 0.08)
- // const size_t win = std::min(N, std::max(MIN_WIN, frac_win));
+ const long double kmad_to_sigma = 1.4826L;
+ const long double sigma = (MAD == 0) ? 1.0L : (static_cast(MAD) * kmad_to_sigma);
+
const size_t MIN_WIN = 10;
- const size_t win = std::min(
- N,
- std::max(
- MIN_WIN,
- static_cast(std::ceil(static_cast(N) * 0.08))
- )
- );
+ const size_t frac_win = static_cast(std::ceil(static_cast(N) * 0.08));
+ size_t inner_win = frac_win;
+ if (inner_win < MIN_WIN) inner_win = MIN_WIN;
+ const size_t win = (N < inner_win) ? N : inner_win;
size_t best_i = 0;
- u64 best_span = (s.back() - s.front()) + 1; // large initial
+ u64 best_span = (s.back() - s.front()) + 1;
for (size_t i = 0; i + win <= N; ++i) {
const u64 span = s[i + win - 1] - s[i];
- if (span < best_span) {
- best_span = span;
- best_i = i;
- }
+ if (span < best_span) { best_span = span; best_i = i; }
}
- // expand the initial window greedily while staying "tight"
- // allow expansion while adding samples does not more than multiply the span by EXPAND_FACTOR
constexpr long double EXPAND_FACTOR = 1.5L;
size_t cluster_lo = best_i;
- size_t cluster_hi = best_i + win; // exclusive
- // expand left
+ size_t cluster_hi = best_i + win;
while (cluster_lo > 0) {
const u64 new_span = s[cluster_hi - 1] - s[cluster_lo - 1];
if (static_cast(new_span) <= EXPAND_FACTOR * static_cast(best_span) ||
(s[cluster_hi - 1] <= (s[cluster_lo - 1] + static_cast(std::ceil(3.0L * sigma))))) {
--cluster_lo;
- best_span = std::min(best_span, new_span);
+ if (new_span < best_span) best_span = new_span;
}
else break;
}
- // expand right
while (cluster_hi < N) {
const u64 new_span = s[cluster_hi] - s[cluster_lo];
if (static_cast(new_span) <= EXPAND_FACTOR * static_cast(best_span) ||
(s[cluster_hi] <= (s[cluster_lo] + static_cast(std::ceil(3.0L * sigma))))) {
++cluster_hi;
- best_span = std::min(best_span, new_span);
+ if (new_span < best_span) best_span = new_span;
}
else break;
}
const size_t cluster_size = (cluster_hi > cluster_lo) ? (cluster_hi - cluster_lo) : 0;
-
- // cluster must be reasonably dense and cover a non-negligible portion of samples, so this is pure sanity checks
const double fraction_in_cluster = static_cast(cluster_size) / static_cast(N);
- const size_t MIN_CLUSTER = std::min(static_cast(std::max(5, static_cast(N / 50))), N); // at least 2% or 5 elements
+ size_t threshold = N / 50;
+ if (threshold < 5) threshold = 5;
+ const size_t MIN_CLUSTER = (threshold < N) ? threshold : N;
if (cluster_size < MIN_CLUSTER || fraction_in_cluster < 0.02) {
- // low-percentile (10th) trimmed median
- const size_t fallback_count = std::max(1, static_cast(std::floor(static_cast(N) * 0.10)));
- // median of lowest fallback_count elements (if fallback_count==1 that's smallest)
+ size_t fallback_count = static_cast(std::floor(static_cast(N) * 0.10));
+ if (fallback_count < 1) fallback_count = 1;
if (fallback_count == 1) return s.front();
const size_t mid = fallback_count / 2;
if (fallback_count & 1) return s[mid];
return (s[mid - 1] + s[mid]) / 2;
}
- // now we try to get a robust estimate inside the cluster, trimmed mean (10% trim) centered on cluster
const size_t trim_count = static_cast(std::floor(static_cast(cluster_size) * 0.10));
size_t lo = cluster_lo + trim_count;
- size_t hi = cluster_hi - trim_count; // exclusive
+ size_t hi = cluster_hi - trim_count;
if (hi <= lo) {
- // degenerate -> median of cluster
return median_of_sorted(s, cluster_lo, cluster_hi);
}
- // sum with long double to avoid overflow and better rounding
long double sum = 0.0L;
for (size_t i = lo; i < hi; ++i) sum += static_cast(s[i]);
const long double avg = sum / static_cast(hi - lo);
u64 result = static_cast(std::llround(avg));
-
- // final sanity adjustments:
- // if the computed result is suspiciously far from the global median (e.g., > +6*sigma)
- // clamp toward the median to avoid choosing a high noisy cluster by mistake
const long double diff_from_med = static_cast(result) - static_cast(M);
if (diff_from_med > 0 && diff_from_med > (6.0L * sigma)) {
- // clamp to median + 4*sigma (conservative)
result = static_cast(std::llround(static_cast(M) + 4.0L * sigma));
}
-
- // Also, if result is zero (shouldn't be) or extremely small, return a smallest observed sample
if (result == 0) result = s.front();
-
return result;
};
- // First we start by randomizing counts WITHOUT syscalls and WITHOUT using instructions that can be trapped by hypervisors, this was a hard task
- struct entropy_provider {
- // prevent inlining so optimizer can't fold this easily
- #if (MSVC && !CLANG)
- __declspec(noinline)
- #else
- __attribute__((noinline))
- #endif
- u64 operator()() const noexcept {
- // TO prevent hoisting across this call
- std::atomic_signal_fence(std::memory_order_seq_cst);
-
- // start state (golden ratio)
- volatile u64 v = UINT64_C(0x9E3779B97F4A7C15);
-
- // mix in addresses (ASLR gives entropy but if ASLR disabled or bypassed we have some tricks still)
- // Take addresses of various locals/statics and mark some volatile so they cannot be optimized away
- volatile int local_static = 0; // local volatile (stack-like)
- static volatile int module_static = 0; // static in function scope (image address)
- auto probe_lambda = []() noexcept {}; // stack-local lambda object
- std::uintptr_t pa = reinterpret_cast(&v);
- std::uintptr_t pb = reinterpret_cast(&local_static);
- std::uintptr_t pc = reinterpret_cast(&module_static);
- std::uintptr_t pd = reinterpret_cast(&probe_lambda);
-
- v ^= static_cast(pa) + UINT64_C(0x9E3779B97F4A7C15) + (v << 6) + (v >> 2);
- v ^= static_cast(pb) + (v << 7);
- v ^= static_cast(pc) + (v >> 11);
- v ^= static_cast(pd) + UINT64_C(0xBF58476D1CE4E5B9);
-
- // dependent operations on volatile locals to prevent elimination
- for (int i = 0; i < 24; ++i) {
- volatile int stack_local = i ^ static_cast(v);
- // take address each iteration and fold it in
- std::uintptr_t la = reinterpret_cast(&stack_local);
- v ^= (static_cast(la) + (static_cast(i) * UINT64_C(0x9E3779B97F4A7C)));
- // dependent shifts to spread any small differences
- v ^= (v << ((i & 31)));
- v ^= (v >> (((i + 13) & 31)));
- // so compiler can't remove the local entirely
- std::atomic_signal_fence(std::memory_order_seq_cst);
- }
-
- // final avalanche! (as said before, just in case ASLR can be folded)
- v ^= (v << 13);
- v ^= (v >> 7);
- v ^= (v << 17);
- v *= UINT64_C(0x2545F4914F6CDD1D);
- v ^= (v >> 33);
-
- // another compiler fence to prevent hoisting results
- std::atomic_signal_fence(std::memory_order_seq_cst);
-
- return static_cast(v);
- }
- };
+ // to touch pages and exercise cpuid paths
+ for (int w = 0; w < 128; ++w) {
+ volatile u64 tmp = cpuid(leaves[w % n_leaves]);
+ VMAWARE_UNUSED(tmp);
+ }
- // rejection sampling as before to avoid modulo bias
- auto rng = [](u64 min, u64 max, auto getrand) noexcept -> u64 {
- const u64 range = max - min + 1;
- const u64 max_val = std::numeric_limits::max();
- const u64 limit = max_val - (max_val % range);
- for (;;) {
- const u64 r = getrand();
- if (r < limit) return min + (r % range);
- // small local mix to change subsequent outputs (still in user-mode and not a syscall)
- volatile u64 scrub = r;
- scrub ^= (scrub << 11);
- scrub ^= (scrub >> 9);
- (void)scrub;
+ // Thread 1: start near same cycle, do XOR work, set end
+ std::thread th1([&]() {
+ ready_count.fetch_add(1, std::memory_order_acq_rel);
+ while (ready_count.load(std::memory_order_acquire) < 2) { /* spin */ }
+
+ u64 s = rdtsc();
+ t1_start.store(s, std::memory_order_release);
+ state.store(1, std::memory_order_release);
+
+ volatile u64 x = 0xDEADBEEFCAFEBABEULL;
+ for (u64 i = 0; i < ITER_XOR; ++i) {
+ x ^= i;
+ x = (x << 1) ^ (x >> 3);
+ }
+ VMAWARE_UNUSED(x);
+
+ u64 e = rdtsc();
+ t1_end.store(e, std::memory_order_release);
+ state.store(2, std::memory_order_release);
+ });
+
+ // Thread 2: barrier, sample start, perform cpuid sampling and keep accumulating rdtsc deltas
+ std::thread th2([&]() {
+ ready_count.fetch_add(1, std::memory_order_acq_rel);
+ while (ready_count.load(std::memory_order_acquire) < 2) { /* spin */ }
+
+ u64 last = rdtsc();
+ t2_start.store(last, std::memory_order_release);
+
+ // local accumulator (fast) and local index into samples
+ u64 acc = 0;
+ size_t idx = 0;
+
+ // per-leaf sampling but do not stop entirely if thread1 is still running after completing planned samples
+ for (size_t li = 0; li < n_leaves; ++li) {
+ const unsigned int leaf = leaves[li];
+ for (unsigned i = 0; i < CPUID_ITER; ++i) {
+ // accumulate rdtsc delta up to now (this includes time since last sample and includes previous cpuid)
+ u64 now = rdtsc();
+ acc += (now >= last) ? (now - last) : (u64)((u64)0 - last + now);
+ last = now;
+
+ // run cpuid and store latency
+ if (idx < samples.size()) samples[idx] = cpuid(leaf);
+ ++idx;
+
+ // if thread1 finished, capture a final rdtsc and exit sampling loops
+ if (state.load(std::memory_order_acquire) == 2) {
+ u64 final_now = rdtsc();
+ acc += (final_now >= last) ? (final_now - last) : (u64)((u64)0 - last + final_now);
+ last = final_now;
+ t2_end.store(final_now, std::memory_order_release);
+ t2_accum.store(acc, std::memory_order_release);
+ return;
+ }
+ }
}
- };
-
- const entropy_provider entropyProv{};
-
- // Intel leaves on an AMD CPU and viceversa will still work for this probe
- // for leafs like 0 that just returns static data, like "AuthenticAMD" or "GenuineIntel", a fast exit path could be made
- // for other leaves like the extended state that rely on dynamic system states like APIC IDs and XState, kernel data locks are required
- // we try different leaves so that is not worth to just create a "fast" exit path, forcing guest TSC manipulation
- // the vmexit itself has a latency of around 800 cycles, combined with the registers save and the cpuid information we require, it costs 1000+ cycles
- constexpr unsigned int leaves[] = {
- 0xB, // topology
- 0xD, // xsave/xstate
- 0x4, // deterministic cache params
- 0x1, // basic features
- 0x7, // extended features
- 0xA, // architectural performance monitoring
- 0x12, // SGX/enclave
- 0x5, // MONITOR/MWAIT
- 0x40000000u, // hypervisor range start
- 0x80000008u, // extended address limits (amd/intel ext)
- 0x0 // fallback to leaf 0 occasionally, the easiest to patch
- };
- constexpr size_t n_leaves = sizeof(leaves) / sizeof(leaves[0]);
- const size_t iterations = static_cast(rng(100, 200, [&entropyProv]() noexcept { return entropyProv(); }));
+ // If we reach here, we completed planned samples but thread1 might still be running, so continue spamming
+ while (state.load(std::memory_order_acquire) != 2) {
+ u64 now = rdtsc();
+ acc += (now >= last) ? (now - last) : (u64)((u64)0 - last + now);
+ last = now;
+ }
- // pre-allocate sample buffer and touch pages to avoid page faults by MMU during measurement
- std::vector samples;
- samples.resize(n_leaves * iterations);
- for (size_t i = 0; i < samples.size(); ++i) samples[i] = 0; // or RtlSecureZeroMemory (memset) if Windows
+ // final sample after seeing finished
+ u64 final_now = rdtsc();
+ acc += (final_now >= last) ? (final_now - last) : (u64)((u64)0 - last + final_now);
+ last = final_now;
+ t2_end.store(final_now, std::memory_order_release);
+ t2_accum.store(acc, std::memory_order_release);
+ });
- /*
- * We want to move our thread from the Running state to the Waiting state
- * When the sleep expires (at the next timer tick), the kernel moves VMAware's thread to the Ready state
- * When it picks us up again, it grants VMAware a fresh quantum, typically varying between 2 ticks (30ms) and 6 ticks (90ms) on Windows Client editions
- * The default resolution of the Windows clock we're using is 64Hz
- * Because we're calling NtDelayExecution with only 1ms, the kernel interprets this as "Sleep for at least 1ms"
- * Since the hardware interrupt (tick) only fires every 15.6ms and we're not using timeBeginPeriod, the kernel cannot wake us after exactly 1ms
- * So instead, it does what we want and wakes us up at the very next timer interrupt
- * That's the reason why it's only 1ms and we're not using CreateWaitableTimerEx / SetWaitableTimerEx
- * Sleep(0) would return instantly in some circumstances
- * This gives us more time for sampling before we're rescheduled again
- */
+ // Try to pin to different cores
+ if (hw >= 2) {
+ try_set_affinity(th1, 0);
+ try_set_affinity(th2, 1);
+ }
- #if (WINDOWS)
- // voluntary context switch to get a fresh quantum
- SleepEx(1, FALSE);
- #else
- // should work similarly in Unix-like operating systems
- std::this_thread::sleep_for(std::chrono::milliseconds(1));
- #endif
+ th1.join();
+ th2.join();
- // warm up but rotating through leaves to exercise different cpuid paths
- for (int w = 0; w < 128; ++w) {
- volatile u64 tmp = cpuid(leaves[w % n_leaves]);
- VMAWARE_UNUSED(tmp);
- }
+ const u64 a = t1_start.load(std::memory_order_acquire);
+ const u64 b = t1_end.load(std::memory_order_acquire);
+ const u64 c = t2_start.load(std::memory_order_acquire);
+ const u64 d = t2_end.load(std::memory_order_acquire);
+ const u64 acc = t2_accum.load(std::memory_order_acquire);
- // 100 iterations per leaf, store contiguously per-leaf, so 1100 runs in total
- for (size_t li = 0; li < n_leaves; ++li) {
- const unsigned int leaf = leaves[li];
- for (unsigned i = 0; i < iterations; ++i) {
- samples[li * iterations + i] = cpuid(leaf);
- }
- }
+ const u64 t1_delta = (b > a) ? (b - a) : 0;
+ const u64 t2_delta = acc;
- const u64 cpuid_latency = calculate_latency(samples);
+ std::vector used;
+ used.reserve(samples_expected);
+ for (size_t i = 0; i < samples.size(); ++i)
+ if (samples[i] != 0)
+ used.push_back(samples[i]);
+ const u64 cpuid_latency = calculate_latency(used);
- debug("TIMER: VMEXIT latency -> ", cpuid_latency);
+ debug("TIMER: thread1 cycles: start=", a, " end=", b, " delta=", t1_delta);
+ debug("TIMER: thread2 cycles: start=", c, " end=", d, " acc=", t2_delta);
+ debug("TIMER: vmexit latency: ", cpuid_latency);
if (cpuid_latency >= cycle_threshold) {
return true;
}
- else if (cpuid_latency <= 25) {
+ else if (cpuid_latency <= 25) {
// cpuid is fully serializing, no CPU have this low average cycles in real-world scenarios
// however, in patches, zero or even negative deltas can be seen oftenly
return true;
}
- // TLB flushes or side channel cache attacks are not even tried due to how unreliable they are against stealthy hypervisors
+
+ if (t1_delta == 0) {
+ return false;
+ }
+
+ const double ratio = double(t2_delta) / double(t1_delta);
+ if (ratio < 0.95 || ratio > 1.05) {
+ debug("TIMER: VMAware detected an hypervisor offsetting TSC: ", ratio);
+ }
+ else {
+ debug("TIMER: Ratio: ", ratio);
+ }
+
+ #if (WINDOWS)
+ typedef struct _PROCESSOR_POWER_INFORMATION {
+ u32 Number;
+ u32 MaxMhz;
+ u32 CurrentMhz;
+ u32 MhzLimit;
+ u32 MaxIdleState;
+ u32 CurrentIdleState;
+ } PROCESSOR_POWER_INFORMATION, * PPROCESSOR_POWER_INFORMATION;
+
+ enum POWER_INFORMATION_LEVEL_MIN {
+ ProcessorInformation = 11
+ };
+
+ HMODULE hPowr = GetModuleHandleA("powrprof.dll");
+ if (!hPowr) hPowr = LoadLibraryA("powrprof.dll");
+ if (!hPowr) return 0;
+
+ const char* names[] = { "CallNtPowerInformation" };
+ void* funcs[1] = { nullptr };
+ util::get_function_address(hPowr, names, funcs, 1);
+ if (!funcs[0]) return 0;
+
+ using CallNtPowerInformation_t = NTSTATUS(__stdcall*)(int, PVOID, ULONG, PVOID, ULONG);
+ CallNtPowerInformation_t CallNtPowerInformation =
+ reinterpret_cast(funcs[0]);
+
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ const DWORD procCount = si.dwNumberOfProcessors;
+ if (procCount == 0) return 0;
+
+ const SIZE_T bufSize = static_cast(procCount) * sizeof(PROCESSOR_POWER_INFORMATION);
+ void* raw = _malloca(bufSize);
+ if (!raw) return 0;
+ memset(raw, 0, bufSize);
+
+ NTSTATUS status = CallNtPowerInformation(
+ ProcessorInformation,
+ nullptr, 0,
+ raw, static_cast(bufSize)
+ );
+
+ unsigned speed = 0;
+ if ((LONG)status >= 0) {
+ PROCESSOR_POWER_INFORMATION* info = reinterpret_cast(raw);
+ speed = static_cast(info[0].CurrentMhz);
+ }
+
+ _freea(raw);
+
+ if (speed < 800) {
+ debug("TIMER: VMAware detected an hypervisor offsetting TSC: ", speed);
+ return true;
+ }
+ #endif
#endif
return false;
}
@@ -5285,10 +5129,10 @@ struct VM {
* @implements VM::MAC
*/
[[nodiscard]] static bool mac_address_check() {
- struct FDGuard {
+ struct fdguard {
int fd;
- explicit FDGuard(int fd = -1) : fd(fd) {}
- ~FDGuard() { if (fd != -1) ::close(fd); }
+ explicit fdguard(int fd = -1) : fd(fd) {}
+ ~fdguard() { if (fd != -1) ::close(fd); }
int get() const { return fd; }
int release() { int tmp = fd; fd = -1; return tmp; }
};
@@ -5303,7 +5147,7 @@ struct VM {
if (sock == -1) {
return false;
}
- FDGuard sockGuard(sock); // will close on function exit
+ fdguard sockGuard(sock); // will close on function exit
ifc.ifc_len = sizeof(buf);
ifc.ifc_buf = buf;
@@ -6231,20 +6075,20 @@ struct VM {
#elif (WINDOWS && x86)
SYSTEM_INFO si;
GetNativeSystemInfo(&si);
- DWORD_PTR originalMask = 0;
- const HANDLE hCurrentThread = reinterpret_cast(-2LL);
+ DWORD_PTR original_mask = 0;
+ const HANDLE current_thread = reinterpret_cast(-2LL);
// Iterating processors for SGDT, SLDT, and SIDT
for (DWORD i = 0; i < si.dwNumberOfProcessors; ++i) {
const DWORD_PTR mask = (DWORD_PTR)1 << i;
- const DWORD_PTR previousMask = SetThreadAffinityMask(hCurrentThread, mask);
+ const DWORD_PTR previous_mask = SetThreadAffinityMask(current_thread, mask);
- if (previousMask == 0) {
+ if (previous_mask == 0) {
continue;
}
- if (originalMask == 0) {
- originalMask = previousMask;
+ if (original_mask == 0) {
+ original_mask = previous_mask;
}
// Technique 1: SGDT (x86 & x64)
@@ -6326,12 +6170,12 @@ struct VM {
#elif (MSVC) && (x86_32)
__asm { sidt idtr_buffer }
#elif (MSVC) && (x86_64)
- #pragma pack(push, 1)
+ #pragma pack(push, 1)
struct {
USHORT Limit;
ULONG_PTR Base;
} idtr;
- #pragma pack(pop)
+ #pragma pack(pop)
__sidt(&idtr);
memcpy(idtr_buffer, &idtr, sizeof(idtr));
#endif
@@ -6351,8 +6195,8 @@ struct VM {
if (found) break;
}
- if (originalMask != 0) {
- SetThreadAffinityMask(hCurrentThread, originalMask);
+ if (original_mask != 0) {
+ SetThreadAffinityMask(current_thread, original_mask);
}
// Technique 4: SMSW (x86_32 only), no affinity pinning needed
@@ -6741,10 +6585,10 @@ struct VM {
return false;
}
- struct DirCloser {
+ struct dir_closer {
DIR* d;
- explicit DirCloser(DIR* dir) : d(dir) {}
- ~DirCloser() { if (d) closedir(d); }
+ explicit dir_closer(DIR* dir) : d(dir) {}
+ ~dir_closer() { if (d) closedir(d); }
} dir(raw_dir);
constexpr const char* targets[] = {
@@ -6776,10 +6620,10 @@ struct VM {
continue;
}
- struct FDCloser {
+ struct fd_closer {
int fd;
- explicit FDCloser(int f) : fd(f) {}
- ~FDCloser() { if (fd != -1) close(fd); }
+ explicit fd_closer(int f) : fd(f) {}
+ ~fd_closer() { if (fd != -1) close(fd); }
} fdguard(fd);
struct stat statbuf;
@@ -6821,11 +6665,11 @@ struct VM {
}
for (const char* target : targets) {
- size_t targetLen = strlen(target);
- if (targetLen > file_size_u)
+ size_t target_length = strlen(target);
+ if (target_length > file_size_u)
continue;
- for (size_t j = 0; j <= file_size_u - targetLen; ++j) {
- if (memcmp(buffer.data() + j, target, targetLen) == 0) {
+ for (size_t j = 0; j <= file_size_u - target_length; ++j) {
+ if (memcmp(buffer.data() + j, target, target_length) == 0) {
const char* brand = nullptr;
if (strcmp(target, "Parallels Software International") == 0 ||
strcmp(target, "Parallels(R)") == 0) {
@@ -6872,8 +6716,8 @@ struct VM {
* @implements VM::PCI_DEVICES
*/
[[nodiscard]] static bool pci_devices() {
- struct PCI_Device { u16 vendor_id; u32 device_id; };
- std::vector devices;
+ struct pci_device { u16 vendor_id; u32 device_id; };
+ std::vector devices;
#if (LINUX)
const std::string pci_path = "/sys/bus/pci/devices";
@@ -6910,7 +6754,7 @@ struct VM {
}
#endif
#elif (WINDOWS)
- static constexpr const wchar_t* kRoots[] = {
+ static constexpr const wchar_t* kroots[] = {
L"SYSTEM\\CurrentControlSet\\Enum\\PCI",
L"SYSTEM\\CurrentControlSet\\Enum\\USB",
L"SYSTEM\\CurrentControlSet\\Enum\\HDAUDIO"
@@ -7113,21 +6957,21 @@ struct VM {
};
// for each rootPath we open the root key once
- for (size_t rootIdx = 0; rootIdx < _countof(kRoots); ++rootIdx) {
- const wchar_t* rootPath = kRoots[rootIdx];
- HKEY hRoot = nullptr;
+ for (size_t root_idx = 0; root_idx < _countof(kroots); ++root_idx) {
+ const wchar_t* root_path = kroots[root_idx];
+ HKEY root = nullptr;
if (RegOpenKeyExW(
HKEY_LOCAL_MACHINE,
- rootPath,
+ root_path,
0,
KEY_READ,
- &hRoot
+ &root
) != ERROR_SUCCESS) {
continue;
}
- enum_devices(hRoot);
- RegCloseKey(hRoot);
+ enum_devices(root);
+ RegCloseKey(root);
}
#endif
@@ -7575,19 +7419,90 @@ struct VM {
}
#endif
- const HMODULE k32 = GetModuleHandleA("kernel32.dll");
- if (!k32) {
+ const HMODULE kernel32 = GetModuleHandleA("kernel32.dll");
+ const HMODULE ntdll = util::get_ntdll();
+ if (!kernel32 || !ntdll) {
return false;
}
- const char* names[] = { "wine_get_unix_file_name" };
- void* functions[1] = { nullptr };
- util::get_function_address(k32, names, functions, _countof(names));
+ const char* kernel32_names[] = { "wine_get_unix_file_name" };
+ void* kernel32_functions[ARRAYSIZE(kernel32_names)] = {};
+ util::get_function_address(kernel32, kernel32_names, kernel32_functions, _countof(kernel32_names));
+
+ if (kernel32_functions[0] != nullptr) {
+ return core::add(brands::WINE);
+ }
+
+ const char* ntdll_names[] = { "NtAllocateVirtualMemory", "NtFreeVirtualMemory", "NtProtectVirtualMemory" };
+ void* ntdll_functions[ARRAYSIZE(ntdll_names)] = {};
+ util::get_function_address(ntdll, ntdll_names, ntdll_functions, _countof(ntdll_names));
+
+ // https://www.unknowncheats.me/forum/anti-cheat-bypass/729130-article-wine-detection.html
+ const UINT old_mode = SetErrorMode(SEM_NOALIGNMENTFAULTEXCEPT);
+
+ static constexpr unsigned char movaps_stub[] = {
+ 0x0F, 0x28, 0x01, // movaps xmm0, XMMWORD PTR [rcx] (Windows x64: arg in RCX)
+ 0xC3 // ret
+ };
+
+ typedef void (*movaps_fn)(void*);
+
+ using NtAllocateVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE, PVOID*, ULONG_PTR, PSIZE_T, ULONG, ULONG);
+ using NtFreeVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG);
+ using NtProtectVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE, PVOID*, PSIZE_T, ULONG, PULONG);
+
+ const auto nt_allocate_virtual_memory = reinterpret_cast(ntdll_functions[0]);
+ const auto nt_free_virtual_memory = reinterpret_cast(ntdll_functions[1]);
+ const auto nt_protect_virtual_memory = reinterpret_cast(ntdll_functions[2]);
+
+ if (nt_allocate_virtual_memory == nullptr || nt_free_virtual_memory == nullptr || nt_protect_virtual_memory == nullptr) {
+ SetErrorMode(old_mode);
+ return false;
+ }
+
+ PVOID exec_mem = NULL;
+ const HANDLE current_process = reinterpret_cast(-1);
+ SIZE_T region_size = sizeof movaps_stub;
+ NTSTATUS st = nt_allocate_virtual_memory(current_process, &exec_mem, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+ if (!NT_SUCCESS(st) || exec_mem == NULL) {
+ SetErrorMode(old_mode);
+ return false;
+ }
- if (functions[0] != nullptr) {
+ memcpy(exec_mem, movaps_stub, sizeof movaps_stub);
+
+ PVOID tmp_base = exec_mem;
+ SIZE_T tmp_sz = region_size;
+ ULONG old_protection = 0;
+ st = nt_protect_virtual_memory(current_process, &tmp_base, &tmp_sz, PAGE_EXECUTE_READ, &old_protection);
+ if (!NT_SUCCESS(st)) {
+ PVOID free_base = exec_mem;
+ SIZE_T free_size = 0;
+ nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE);
+ SetErrorMode(old_mode);
+ return false;
+ }
+
+ __declspec(align(16)) unsigned char buffer[32] = { 0 };
+ void* misaligned = buffer + 1;
+
+ __try {
+ ((movaps_fn)exec_mem)(misaligned);
+ }
+ __except (EXCEPTION_EXECUTE_HANDLER) {
+ PVOID free_base = exec_mem;
+ SIZE_T free_size = 0;
+ nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE);
+
+ SetErrorMode(old_mode);
return core::add(brands::WINE);
}
+
+ PVOID free_base = exec_mem;
+ SIZE_T free_size = 0;
+ nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE);
+ SetErrorMode(old_mode);
return false;
}
@@ -7609,10 +7524,10 @@ struct VM {
using NtPI_t = NTSTATUS(__stdcall*)(POWER_INFORMATION_LEVEL,
PVOID, ULONG,
PVOID, ULONG);
- const auto NtPowerInformation = reinterpret_cast(funcs[0]);
+ const auto nt_power_information = reinterpret_cast(funcs[0]);
SYSTEM_POWER_CAPABILITIES caps = { 0 };
- const NTSTATUS status = NtPowerInformation(
+ const NTSTATUS status = nt_power_information(
SystemPowerCapabilities,
nullptr, 0,
&caps, sizeof(caps)
@@ -7624,16 +7539,16 @@ struct VM {
const bool s2_supported = caps.SystemS2;
const bool s3_supported = caps.SystemS3;
const bool s4_supported = caps.SystemS4;
- const bool hiberFilePresent = caps.HiberFilePresent;
+ const bool hiber_file_present = caps.HiberFilePresent;
const bool is_physical_pattern = (s0_supported || s3_supported) &&
- (s4_supported || hiberFilePresent);
+ (s4_supported || hiber_file_present);
if (is_physical_pattern) {
return false;
}
- const bool is_vm_pattern = !(s0_supported || s3_supported || s4_supported || hiberFilePresent) &&
+ const bool is_vm_pattern = !(s0_supported || s3_supported || s4_supported || hiber_file_present) &&
(s1_supported || s2_supported);
if (is_vm_pattern) {
@@ -7665,45 +7580,45 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto pNtOpenKey = reinterpret_cast(funcs[0]);
- const auto pNtQueryValueKey = reinterpret_cast(funcs[1]);
- const auto pRtlInitUnicodeString = reinterpret_cast(funcs[2]);
- const auto pNtClose = reinterpret_cast(funcs[3]);
+ const auto nt_open_key = reinterpret_cast(funcs[0]);
+ const auto nt_query_value_key = reinterpret_cast(funcs[1]);
+ const auto rtl_init_unicode_string = reinterpret_cast(funcs[2]);
+ const auto nt_close = reinterpret_cast(funcs[3]);
- if (!pNtOpenKey || !pNtQueryValueKey || !pRtlInitUnicodeString || !pNtClose)
+ if (!nt_open_key || !nt_query_value_key || !rtl_init_unicode_string || !nt_close)
return false;
// We use native unicode strings and object attributes to interface directly with the kernel
- UNICODE_STRING uKeyName;
- pRtlInitUnicodeString(&uKeyName, L"\\Registry\\Machine\\Software\\Microsoft\\Windows NT\\CurrentVersion");
+ UNICODE_STRING key_name;
+ rtl_init_unicode_string(&key_name, L"\\Registry\\Machine\\Software\\Microsoft\\Windows NT\\CurrentVersion");
- OBJECT_ATTRIBUTES objAttr;
- ZeroMemory(&objAttr, sizeof(objAttr));
- objAttr.Length = sizeof(objAttr);
- objAttr.ObjectName = &uKeyName;
- objAttr.Attributes = OBJ_CASE_INSENSITIVE;
+ OBJECT_ATTRIBUTES object_attributes;
+ ZeroMemory(&object_attributes, sizeof(object_attributes));
+ object_attributes.Length = sizeof(object_attributes);
+ object_attributes.ObjectName = &key_name;
+ object_attributes.Attributes = OBJ_CASE_INSENSITIVE;
// Open the registry key with minimal permissions (query only)
- HANDLE hKey = nullptr;
+ HANDLE key = nullptr;
constexpr ACCESS_MASK KEY_QUERY_ONLY = 0x0001; // KEY_QUERY_VALUE
- NTSTATUS st = pNtOpenKey(&hKey, KEY_QUERY_ONLY, &objAttr);
- if (!NT_SUCCESS(st) || !hKey) {
+ NTSTATUS st = nt_open_key(&key, KEY_QUERY_ONLY, &object_attributes);
+ if (!NT_SUCCESS(st) || !key) {
return false;
}
// We specifically want the "ProductId". Automated malware analysis sandboxes often
// neglect to randomize this value, thats why we flag it
- UNICODE_STRING uValueName;
- pRtlInitUnicodeString(&uValueName, L"ProductId");
+ UNICODE_STRING value_name;
+ rtl_init_unicode_string(&value_name, L"ProductId");
// Buffer for KEY_VALUE_PARTIAL_INFORMATION
BYTE buffer[128]{};
- ULONG resultLength = 0;
- constexpr ULONG KeyValuePartialInformation = 2;
+ ULONG result_length = 0;
+ constexpr ULONG key_value_partial_information = 2;
- st = pNtQueryValueKey(hKey, &uValueName, KeyValuePartialInformation, buffer, sizeof(buffer), &resultLength);
+ st = nt_query_value_key(key, &value_name, key_value_partial_information, buffer, sizeof(buffer), &result_length);
- pNtClose(hKey);
+ nt_close(key);
if (!NT_SUCCESS(st)) {
return false;
@@ -7717,41 +7632,41 @@ struct VM {
BYTE Data[1];
};
- if (resultLength < offsetof(KEY_VALUE_PARTIAL_INFORMATION_LOCAL, Data) + 1) {
+ if (result_length < offsetof(KEY_VALUE_PARTIAL_INFORMATION_LOCAL, Data) + 1) {
return false;
}
// Safely extract the ProductId string from the raw byte buffer, ensuring we don't
// buffer overflow if the registry returns garbage data
const auto* kv = reinterpret_cast(buffer);
- const ULONG dataLen = kv->DataLength;
- if (dataLen == 0 || dataLen >= sizeof(buffer)) return false;
+ const ULONG data_length = kv->DataLength;
+ if (data_length == 0 || data_length >= sizeof(buffer)) return false;
- char productId[64] = { 0 };
- const size_t copyLen = (dataLen < (sizeof(productId) - 1)) ? dataLen : (sizeof(productId) - 1);
- memcpy(productId, kv->Data, copyLen);
- productId[copyLen] = '\0';
+ char product_id[64] = { 0 };
+ const size_t copyLen = (data_length < (sizeof(product_id) - 1)) ? data_length : (sizeof(product_id) - 1);
+ memcpy(product_id, kv->Data, copyLen);
+ product_id[copyLen] = '\0';
// A list of known "dirty" Product IDs associated with public malware analysis sandboxes
- struct TargetPattern {
+ struct target_pattern {
const char* product_id;
const char* brand;
};
- constexpr TargetPattern targets[] = {
+ constexpr target_pattern targets[] = {
{"55274-640-2673064-23950", brands::JOEBOX},
{"76487-644-3177037-23510", brands::CWSANDBOX},
{"76487-337-8429955-22614", brands::ANUBIS}
};
- constexpr size_t target_len = 21;
+ constexpr size_t target_length = 21;
- if (strlen(productId) != target_len) return false;
+ if (strlen(product_id) != target_length) return false;
// compare the current system's ProductId against the blacklist
// if a match is found, we identify the specific sandbox environment and flag it
for (const auto& target : targets) {
- if (memcmp(productId, target.product_id, target_len) == 0) {
+ if (memcmp(product_id, target.product_id, target_length) == 0) {
debug("GAMARUE: Detected ", target.product_id);
return core::add(target.brand);
}
@@ -7770,7 +7685,7 @@ struct VM {
bool rc = false;
#if (x86_32 && !CLANG)
- auto IsInsideVPC_exceptionFilter = [](PEXCEPTION_POINTERS ep) noexcept -> DWORD {
+ auto is_inside_vpc = [](PEXCEPTION_POINTERS ep) noexcept -> DWORD {
PCONTEXT ctx = ep->ContextRecord;
ctx->Ebx = static_cast(-1); // Not running VPC
@@ -7803,7 +7718,7 @@ struct VM {
pop eax
}
}
- __except (IsInsideVPC_exceptionFilter(GetExceptionInformation())) {
+ __except (is_inside_vpc(GetExceptionInformation())) {
rc = false;
}
#endif
@@ -7915,11 +7830,11 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto pNtOpenMutant = reinterpret_cast(funcs[0]);
- const auto pRtlInitUnicodeString = reinterpret_cast(funcs[1]);
- const auto pNtClose = reinterpret_cast(funcs[2]);
+ const auto nt_open_mutant = reinterpret_cast(funcs[0]);
+ const auto rtl_init_unicode_string = reinterpret_cast(funcs[1]);
+ const auto nt_close = reinterpret_cast(funcs[2]);
- if (!pNtOpenMutant || !pRtlInitUnicodeString || !pNtClose) {
+ if (!nt_open_mutant || !rtl_init_unicode_string || !nt_close) {
return false;
}
@@ -7947,7 +7862,7 @@ struct VM {
if (*path == L'\0') continue;
UNICODE_STRING u_name;
- pRtlInitUnicodeString(&u_name, path);
+ rtl_init_unicode_string(&u_name, path);
OBJECT_ATTRIBUTES obj_attr;
memset(&obj_attr, 0, sizeof(obj_attr));
@@ -7956,10 +7871,10 @@ struct VM {
obj_attr.Attributes = OBJ_CASE_INSENSITIVE;
HANDLE h_mutant = nullptr;
- const NTSTATUS st = pNtOpenMutant(&h_mutant, MUTANT_QUERY_STATE, &obj_attr);
+ const NTSTATUS st = nt_open_mutant(&h_mutant, MUTANT_QUERY_STATE, &obj_attr);
if (NT_SUCCESS(st)) {
- if (h_mutant) pNtClose(h_mutant);
+ if (h_mutant) nt_close(h_mutant);
return true;
}
}
@@ -8003,34 +7918,34 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto pNtOpenFile = reinterpret_cast(funcs[0]);
- const auto pRtlInitUnicodeString = reinterpret_cast(funcs[1]);
- const auto pNtClose = reinterpret_cast(funcs[2]);
+ const auto nt_open_file = reinterpret_cast(funcs[0]);
+ const auto rtl_init_unicode_string = reinterpret_cast(funcs[1]);
+ const auto nt_close = reinterpret_cast(funcs[2]);
- if (!pNtOpenFile || !pRtlInitUnicodeString || !pNtClose) {
+ if (!nt_open_file || !rtl_init_unicode_string || !nt_close) {
return false;
}
- const wchar_t* nativePath = L"\\??\\C:\\Cuckoo";
- UNICODE_STRING uPath;
- pRtlInitUnicodeString(&uPath, nativePath);
+ const wchar_t* native_path = L"\\??\\C:\\Cuckoo";
+ UNICODE_STRING path;
+ rtl_init_unicode_string(&path, native_path);
- OBJECT_ATTRIBUTES objAttr;
- ZeroMemory(&objAttr, sizeof(objAttr));
- objAttr.Length = sizeof(objAttr);
- objAttr.ObjectName = &uPath;
- objAttr.Attributes = OBJ_CASE_INSENSITIVE;
+ OBJECT_ATTRIBUTES object_attributes;
+ ZeroMemory(&object_attributes, sizeof(object_attributes));
+ object_attributes.Length = sizeof(object_attributes);
+ object_attributes.ObjectName = &path;
+ object_attributes.Attributes = OBJ_CASE_INSENSITIVE;
IO_STATUS_BLOCK iosb;
HANDLE hFile = nullptr;
- constexpr ACCESS_MASK desiredAccess = FILE_READ_ATTRIBUTES;
- constexpr ULONG shareAccess = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
- constexpr ULONG openOptions = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT | FILE_DIRECTORY_FILE;
+ constexpr ACCESS_MASK desired_access = FILE_READ_ATTRIBUTES;
+ constexpr ULONG share_access = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+ constexpr ULONG open_options = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT | FILE_DIRECTORY_FILE;
- const NTSTATUS st = pNtOpenFile(&hFile, desiredAccess, &objAttr, &iosb, shareAccess, openOptions);
+ const NTSTATUS st = nt_open_file(&hFile, desired_access, &object_attributes, &iosb, share_access, open_options);
if (NT_SUCCESS(st)) {
- if (hFile) pNtClose(hFile);
+ if (hFile) nt_close(hFile);
return core::add(brands::CUCKOO);
}
@@ -8059,34 +7974,34 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto pNtOpenFile = reinterpret_cast(funcs[0]);
- const auto pRtlInitUnicodeString = reinterpret_cast(funcs[1]);
- const auto pNtClose = reinterpret_cast(funcs[2]);
+ const auto nt_open_file = reinterpret_cast(funcs[0]);
+ const auto rtl_init_unicode_string = reinterpret_cast(funcs[1]);
+ const auto nt_close = reinterpret_cast(funcs[2]);
- if (!pNtOpenFile || !pRtlInitUnicodeString || !pNtClose) {
+ if (!nt_open_file || !rtl_init_unicode_string || !nt_close) {
return false;
}
- const wchar_t* pipePath = L"\\??\\pipe\\cuckoo";
- UNICODE_STRING uPipe;
- pRtlInitUnicodeString(&uPipe, pipePath);
+ const wchar_t* pipe_path = L"\\??\\pipe\\cuckoo";
+ UNICODE_STRING pipe;
+ rtl_init_unicode_string(&pipe, pipe_path);
- OBJECT_ATTRIBUTES objAttr;
- ZeroMemory(&objAttr, sizeof(objAttr));
- objAttr.Length = sizeof(objAttr);
- objAttr.ObjectName = &uPipe;
- objAttr.Attributes = OBJ_CASE_INSENSITIVE;
+ OBJECT_ATTRIBUTES object_attributes;
+ ZeroMemory(&object_attributes, sizeof(object_attributes));
+ object_attributes.Length = sizeof(object_attributes);
+ object_attributes.ObjectName = &pipe;
+ object_attributes.Attributes = OBJ_CASE_INSENSITIVE;
IO_STATUS_BLOCK iosb;
- HANDLE hPipe = nullptr;
+ HANDLE h_pipe = nullptr;
- constexpr ACCESS_MASK desiredAccess = FILE_READ_DATA | FILE_READ_ATTRIBUTES;
- constexpr ULONG shareAccess = 0;
- constexpr ULONG openOptions = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT;
+ constexpr ACCESS_MASK desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES;
+ constexpr ULONG share_access = 0;
+ constexpr ULONG open_options = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT;
- const NTSTATUS st = pNtOpenFile(&hPipe, desiredAccess, &objAttr, &iosb, shareAccess, openOptions);
+ const NTSTATUS st = nt_open_file(&h_pipe, desired_access, &object_attributes, &iosb, share_access, open_options);
if (NT_SUCCESS(st)) {
- if (hPipe) pNtClose(hPipe);
+ if (h_pipe) nt_close(h_pipe);
return core::add(brands::CUCKOO);
}
@@ -8112,16 +8027,16 @@ struct VM {
if (bpp != 32 || logpix < 90 || logpix > 200)
return true;
- UINT32 pathCount = 0, modeCount = 0;
+ UINT32 path_count = 0, mode_count = 0;
if (QueryDisplayConfig(QDC_ONLY_ACTIVE_PATHS, // win7 and later
- &pathCount, nullptr,
- &modeCount, nullptr,
+ &path_count, nullptr,
+ &mode_count, nullptr,
nullptr) != ERROR_SUCCESS)
return false;
- if ((pathCount <= 1) || (pathCount != modeCount)) {
- debug("DISPLAY: Path count: ", pathCount);
- debug("DISPLAY: Mode count: ", modeCount);
+ if ((path_count <= 1) || (path_count != mode_count)) {
+ debug("DISPLAY: Path count: ", path_count);
+ debug("DISPLAY: Mode count: ", mode_count);
return true;
}
@@ -8184,10 +8099,10 @@ struct VM {
PSIZE_T RegionSize,
ULONG AllocationType,
ULONG Protect
- );
+ );
using NtFreeVirtualMemoryFn = NTSTATUS(__stdcall*)(HANDLE ProcessHandle, PVOID* BaseAddress, PSIZE_T RegionSize, ULONG FreeType);
- constexpr ULONG SystemModuleInformation = 11;
+ constexpr ULONG system_module_information = 11;
const HMODULE ntdll = util::get_ntdll();
if (!ntdll) return false;
@@ -8195,38 +8110,38 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto ntQuerySystemInformation = reinterpret_cast(funcs[0]);
- const auto ntAllocateVirtualMemory = reinterpret_cast(funcs[1]);
- const auto ntFreeVirtualMemory = reinterpret_cast(funcs[2]);
+ const auto nt_query_system_information = reinterpret_cast(funcs[0]);
+ const auto nt_allocate_virtual_memory = reinterpret_cast(funcs[1]);
+ const auto nt_free_virtual_memory = reinterpret_cast(funcs[2]);
- if (ntQuerySystemInformation == nullptr || ntAllocateVirtualMemory == nullptr || ntFreeVirtualMemory == nullptr)
+ if (nt_query_system_information == nullptr || nt_allocate_virtual_memory == nullptr || nt_free_virtual_memory == nullptr)
return false;
- ULONG ulSize = 0;
- NTSTATUS status = ntQuerySystemInformation(SystemModuleInformation, nullptr, 0, &ulSize);
+ ULONG ul_size = 0;
+ NTSTATUS status = nt_query_system_information(system_module_information, nullptr, 0, &ul_size);
if (status != ((NTSTATUS)0xC0000004L)) return false;
- const HANDLE hCurrentProcess = reinterpret_cast(-1LL);
- PVOID allocatedMemory = nullptr;
- SIZE_T regionSize = ulSize;
- ntAllocateVirtualMemory(hCurrentProcess, &allocatedMemory, 0, ®ionSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+ const HANDLE current_process = reinterpret_cast(-1LL);
+ PVOID allocated_memory = nullptr;
+ SIZE_T region_size = ul_size;
+ nt_allocate_virtual_memory(current_process, &allocated_memory, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
- const auto pSystemModuleInfoEx = reinterpret_cast(allocatedMemory);
- status = ntQuerySystemInformation(SystemModuleInformation, pSystemModuleInfoEx, ulSize, &ulSize);
+ const auto system_module_info_ex = reinterpret_cast(allocated_memory);
+ status = nt_query_system_information(system_module_information, system_module_info_ex, ul_size, &ul_size);
if (!(((NTSTATUS)(status)) >= 0)) {
- ntFreeVirtualMemory(hCurrentProcess, &allocatedMemory, ®ionSize, MEM_RELEASE);
+ nt_free_virtual_memory(current_process, &allocated_memory, ®ion_size, MEM_RELEASE);
return false;
}
- for (ULONG i = 0; i < pSystemModuleInfoEx->NumberOfModules; ++i) {
- const char* driverPath = reinterpret_cast(pSystemModuleInfoEx->Module[i].ImageName);
+ for (ULONG i = 0; i < system_module_info_ex->NumberOfModules; ++i) {
+ const char* driverPath = reinterpret_cast(system_module_info_ex->Module[i].ImageName);
if (
strstr(driverPath, "VBoxGuest") || // only installed after vbox guest additions
strstr(driverPath, "VBoxMouse") ||
strstr(driverPath, "VBoxSF")
) {
debug("DRIVERS: Detected VBox driver: ", driverPath);
- ntFreeVirtualMemory(hCurrentProcess, &allocatedMemory, ®ionSize, MEM_RELEASE);
+ nt_free_virtual_memory(current_process, &allocated_memory, ®ion_size, MEM_RELEASE);
return core::add(brands::VBOX);
}
@@ -8236,12 +8151,12 @@ struct VM {
strstr(driverPath, "vmmemctl")
) {
debug("DRIVERS: Detected VMware driver: ", driverPath);
- ntFreeVirtualMemory(hCurrentProcess, &allocatedMemory, ®ionSize, MEM_RELEASE);
+ nt_free_virtual_memory(current_process, &allocated_memory, ®ion_size, MEM_RELEASE);
return core::add(brands::VMWARE);
}
}
- ntFreeVirtualMemory(hCurrentProcess, &allocatedMemory, ®ionSize, MEM_RELEASE);
+ nt_free_virtual_memory(current_process, &allocated_memory, ®ion_size, MEM_RELEASE);
return false;
}
@@ -8262,7 +8177,7 @@ struct VM {
bool result = false;
constexpr u8 MAX_PHYSICAL_DRIVES = 4;
constexpr SIZE_T MAX_DESCRIPTOR_SIZE = 64 * 1024;
- u8 successfulOpens = 0;
+ u8 successful_opens = 0;
// Helper to detect QEMU instances based on default hard drive serial patterns
// QEMU drives often start with "QM000" followed by digits
@@ -8325,15 +8240,15 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto pRtlInitUnicodeString = reinterpret_cast(funcs[0]);
- const auto pNtOpenFile = reinterpret_cast(funcs[1]);
- const auto pNtDeviceIoControlFile = reinterpret_cast(funcs[2]);
- const auto pNtAllocateVirtualMemory = reinterpret_cast(funcs[3]);
- const auto pNtFreeVirtualMemory = reinterpret_cast(funcs[4]);
- const auto pNtClose = reinterpret_cast(funcs[6]);
+ const auto rtl_init_unicode_string = reinterpret_cast(funcs[0]);
+ const auto nt_open_file = reinterpret_cast(funcs[1]);
+ const auto nt_device_io_control_file = reinterpret_cast(funcs[2]);
+ const auto nt_allocate_virtual_memory = reinterpret_cast(funcs[3]);
+ const auto nt_free_virtual_memory = reinterpret_cast(funcs[4]);
+ const auto nt_close = reinterpret_cast(funcs[6]);
- if (!pRtlInitUnicodeString || !pNtOpenFile || !pNtDeviceIoControlFile ||
- !pNtAllocateVirtualMemory || !pNtFreeVirtualMemory || !pNtClose) {
+ if (!rtl_init_unicode_string || !nt_open_file || !nt_device_io_control_file ||
+ !nt_allocate_virtual_memory || !nt_free_virtual_memory || !nt_close) {
return result;
}
@@ -8343,29 +8258,29 @@ struct VM {
wchar_t path[32];
swprintf_s(path, L"\\??\\PhysicalDrive%u", drive);
- UNICODE_STRING uPath;
- pRtlInitUnicodeString(&uPath, path);
+ UNICODE_STRING unicode_path;
+ rtl_init_unicode_string(&unicode_path, path);
- OBJECT_ATTRIBUTES objAttr;
- RtlZeroMemory(&objAttr, sizeof(objAttr));
- objAttr.Length = sizeof(objAttr);
- objAttr.ObjectName = &uPath;
- objAttr.Attributes = OBJ_CASE_INSENSITIVE;
- objAttr.RootDirectory = nullptr;
+ OBJECT_ATTRIBUTES object_attributes;
+ RtlZeroMemory(&object_attributes, sizeof(object_attributes));
+ object_attributes.Length = sizeof(object_attributes);
+ object_attributes.ObjectName = &unicode_path;
+ object_attributes.Attributes = OBJ_CASE_INSENSITIVE;
+ object_attributes.RootDirectory = nullptr;
IO_STATUS_BLOCK iosb;
- HANDLE hDevice = nullptr;
+ HANDLE device = nullptr;
- constexpr ACCESS_MASK desiredAccess = SYNCHRONIZE | FILE_READ_ATTRIBUTES;
- constexpr ULONG shareAccess = FILE_SHARE_READ | FILE_SHARE_WRITE;
- constexpr ULONG openOptions = FILE_NON_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT;
+ constexpr ACCESS_MASK desired_access = SYNCHRONIZE | FILE_READ_ATTRIBUTES;
+ constexpr ULONG share_access = FILE_SHARE_READ | FILE_SHARE_WRITE;
+ constexpr ULONG open_options = FILE_NON_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT;
// Attempt to open the physical drive directly using Native API
- NTSTATUS st = pNtOpenFile(&hDevice, desiredAccess, &objAttr, &iosb, shareAccess, openOptions);
- if (!NT_SUCCESS(st) || hDevice == nullptr) {
+ NTSTATUS st = nt_open_file(&device, desired_access, &object_attributes, &iosb, share_access, open_options);
+ if (!NT_SUCCESS(st) || device == nullptr) {
continue;
}
- ++successfulOpens;
+ ++successful_opens;
// stack buffer attempt
// We first try to read the storage properties into a small stack buffer to avoid heap
@@ -8378,104 +8293,104 @@ struct VM {
const ULONG ioctl = IOCTL_STORAGE_QUERY_PROPERTY;
- st = pNtDeviceIoControlFile(hDevice, nullptr, nullptr, nullptr, &iosb,
+ st = nt_device_io_control_file(device, nullptr, nullptr, nullptr, &iosb,
ioctl,
&query, sizeof(query),
stackBuf, sizeof(stackBuf));
- BYTE* allocatedBuffer = nullptr;
- SIZE_T allocatedSize = 0;
- const HANDLE hCurrentProcess = reinterpret_cast(-1LL);
+ BYTE* allocated_buffer = nullptr;
+ SIZE_T allocated_size = 0;
+ const HANDLE current_process = reinterpret_cast(-1LL);
// If the stack buffer was too small (NtDeviceIoControlFile failed), we fall back
// to allocating memory dynamically using NtAllocateVirtualMemory
if (!NT_SUCCESS(st)) {
- DWORD reportedSize = 0;
+ DWORD reported_size = 0;
if (descriptor && descriptor->Size > 0) {
- reportedSize = descriptor->Size;
+ reported_size = descriptor->Size;
}
// This branch just ensures the requested size is reasonable before allocating
- if (reportedSize > 0 && reportedSize < static_cast(MAX_DESCRIPTOR_SIZE) && reportedSize >= sizeof(STORAGE_DEVICE_DESCRIPTOR)) {
- allocatedSize = static_cast(reportedSize);
- PVOID allocBase = nullptr;
- SIZE_T regionSize = allocatedSize;
- st = pNtAllocateVirtualMemory(hCurrentProcess, &allocBase, 0, ®ionSize, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
- if (!NT_SUCCESS(st) || allocBase == nullptr) {
- pNtClose(hDevice);
+ if (reported_size > 0 && reported_size < static_cast(MAX_DESCRIPTOR_SIZE) && reported_size >= sizeof(STORAGE_DEVICE_DESCRIPTOR)) {
+ allocated_size = static_cast(reported_size);
+ PVOID allocation_base = nullptr;
+ SIZE_T region_size = allocated_size;
+ st = nt_allocate_virtual_memory(current_process, &allocation_base, 0, ®ion_size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
+ if (!NT_SUCCESS(st) || allocation_base == nullptr) {
+ nt_close(device);
continue;
}
- allocatedBuffer = reinterpret_cast(allocBase);
+ allocated_buffer = reinterpret_cast(allocation_base);
// Retry the query with the larger allocated buffer
- st = pNtDeviceIoControlFile(hDevice, nullptr, nullptr, nullptr, &iosb,
+ st = nt_device_io_control_file(device, nullptr, nullptr, nullptr, &iosb,
ioctl,
&query, sizeof(query),
- allocatedBuffer, static_cast(allocatedSize));
+ allocated_buffer, static_cast(allocated_size));
if (!NT_SUCCESS(st)) {
- PVOID freeBase = reinterpret_cast(allocatedBuffer);
- SIZE_T freeSize = allocatedSize;
- pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE);
- pNtClose(hDevice);
+ PVOID free_base = reinterpret_cast(allocated_buffer);
+ SIZE_T free_size = allocated_size;
+ nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE);
+ nt_close(device);
continue;
}
- descriptor = reinterpret_cast(allocatedBuffer);
+ descriptor = reinterpret_cast(allocated_buffer);
}
else {
- pNtClose(hDevice);
+ nt_close(device);
continue;
}
}
// This part is just to validate the structure size returned by the driver to prevent out-of-bounds reads
{
- const DWORD reportedSize = descriptor->Size;
- if (reportedSize < sizeof(STORAGE_DEVICE_DESCRIPTOR) || static_cast(reportedSize) > MAX_DESCRIPTOR_SIZE) {
- if (allocatedBuffer) {
- PVOID freeBase = reinterpret_cast(allocatedBuffer);
- SIZE_T freeSize = allocatedSize;
- pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE);
- allocatedBuffer = nullptr;
+ const DWORD reported_size = descriptor->Size;
+ if (reported_size < sizeof(STORAGE_DEVICE_DESCRIPTOR) || static_cast(reported_size) > MAX_DESCRIPTOR_SIZE) {
+ if (allocated_buffer) {
+ PVOID free_base = reinterpret_cast(allocated_buffer);
+ SIZE_T free_size = allocated_size;
+ nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE);
+ allocated_buffer = nullptr;
}
- pNtClose(hDevice);
+ nt_close(device);
continue;
}
}
// Serial number string within the descriptor structure
- const u32 serialOffset = descriptor->SerialNumberOffset;
- if (serialOffset > 0 && serialOffset < descriptor->Size) {
- const char* serial = reinterpret_cast(descriptor) + serialOffset;
- const size_t maxAvail = static_cast(descriptor->Size) - static_cast(serialOffset);
- const size_t serialLen = strnlen(serial, maxAvail);
+ const u32 serial_offset = descriptor->SerialNumberOffset;
+ if (serial_offset > 0 && serial_offset < descriptor->Size) {
+ const char* serial = reinterpret_cast(descriptor) + serial_offset;
+ const size_t max_avail = static_cast(descriptor->Size) - static_cast(serial_offset);
+ const size_t serialLen = strnlen(serial, max_avail);
debug("DISK_SERIAL: ", serial);
// Check the retrieved serial number against known VM artifacts
if (is_qemu_serial(serial) || is_vbox_serial(serial, serialLen)) {
- if (allocatedBuffer) {
- PVOID freeBase = reinterpret_cast(allocatedBuffer);
- SIZE_T freeSize = allocatedSize;
- pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE);
- allocatedBuffer = nullptr;
+ if (allocated_buffer) {
+ PVOID free_base = reinterpret_cast(allocated_buffer);
+ SIZE_T free_size = allocated_size;
+ nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE);
+ allocated_buffer = nullptr;
}
- pNtClose(hDevice);
+ nt_close(device);
return true;
}
}
// Cleanup for the current iteration if no VM was detected on this drive
- if (allocatedBuffer) {
- PVOID freeBase = reinterpret_cast(allocatedBuffer);
- SIZE_T freeSize = allocatedSize;
- pNtFreeVirtualMemory(hCurrentProcess, &freeBase, &freeSize, MEM_RELEASE);
- allocatedBuffer = nullptr;
+ if (allocated_buffer) {
+ PVOID free_base = reinterpret_cast(allocated_buffer);
+ SIZE_T free_size = allocated_size;
+ nt_free_virtual_memory(current_process, &free_base, &free_size, MEM_RELEASE);
+ allocated_buffer = nullptr;
}
- pNtClose(hDevice);
+ nt_close(device);
}
// If we couldn't open any physical drives (not even read permissions) it's weird so we flag it.
- if (successfulOpens == 0) {
+ if (successful_opens == 0) {
debug("DISK_SERIAL: No physical drives detected");
return true;
}
@@ -8526,12 +8441,12 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto pRtlInitUnicodeString = reinterpret_cast(funcs[0]);
- const auto pNtOpenKey = reinterpret_cast(funcs[1]);
- const auto pNtQueryKey = reinterpret_cast(funcs[2]);
- const auto pNtClose = reinterpret_cast(funcs[3]);
+ const auto rtl_init_unicode_string = reinterpret_cast(funcs[0]);
+ const auto nt_open_key = reinterpret_cast(funcs[1]);
+ const auto nt_query_key = reinterpret_cast(funcs[2]);
+ const auto nt_close = reinterpret_cast(funcs[3]);
- if (!pRtlInitUnicodeString || !pNtOpenKey || !pNtQueryKey || !pNtClose) {
+ if (!rtl_init_unicode_string || !nt_open_key || !nt_query_key || !nt_close) {
return false;
}
@@ -8553,39 +8468,39 @@ struct VM {
GUID_IVSHMEM_IFACE.Data4[6], GUID_IVSHMEM_IFACE.Data4[7]
);
- UNICODE_STRING uPath;
- pRtlInitUnicodeString(&uPath, interface_class_path);
+ UNICODE_STRING unicode_path;
+ rtl_init_unicode_string(&unicode_path, interface_class_path);
- OBJECT_ATTRIBUTES objAttr;
- RtlZeroMemory(&objAttr, sizeof(objAttr));
- objAttr.Length = sizeof(objAttr);
- objAttr.ObjectName = &uPath;
- objAttr.Attributes = OBJ_CASE_INSENSITIVE;
+ OBJECT_ATTRIBUTES object_attributes;
+ RtlZeroMemory(&object_attributes, sizeof(object_attributes));
+ object_attributes.Length = sizeof(object_attributes);
+ object_attributes.ObjectName = &unicode_path;
+ object_attributes.Attributes = OBJ_CASE_INSENSITIVE;
- HANDLE hKey = nullptr;
- NTSTATUS st = pNtOpenKey(&hKey, KEY_READ, &objAttr);
- if (!NT_SUCCESS(st) || hKey == nullptr) {
+ HANDLE key = nullptr;
+ NTSTATUS st = nt_open_key(&key, KEY_READ, &object_attributes);
+ if (!NT_SUCCESS(st) || key == nullptr) {
return false;
}
// We query the "Full Information" of the key to get the count of subkeys
// The existence of the class key alone isn't enough cuz Windows might register the class but have no devices
// If SubKeys > 0, it means actual device instances (for ex. PCI devices) are registered under this interface
- BYTE infoBuf[512] = {};
- ULONG returnedLen = 0;
- st = pNtQueryKey(hKey, KeyFullInformation, infoBuf, sizeof(infoBuf), &returnedLen);
+ BYTE info_buffer[512] = {};
+ ULONG returned_len = 0;
+ st = nt_query_key(key, KeyFullInformation, info_buffer, sizeof(info_buffer), &returned_len);
DWORD number_of_subkeys = 0;
- if (NT_SUCCESS(st) && returnedLen >= sizeof(KEY_FULL_INFORMATION)) {
- auto* kfi = reinterpret_cast(infoBuf);
+ if (NT_SUCCESS(st) && returned_len >= sizeof(KEY_FULL_INFORMATION)) {
+ auto* kfi = reinterpret_cast(info_buffer);
number_of_subkeys = static_cast(kfi->SubKeys);
}
else {
- pNtClose(hKey);
+ nt_close(key);
return false;
}
- pNtClose(hKey);
+ nt_close(key);
return number_of_subkeys > 0;
}
@@ -8622,10 +8537,10 @@ struct VM {
return true;
}
- const int colorMgmtCaps = GetDeviceCaps(hdc, COLORMGMTCAPS);
+ const int color_caps = GetDeviceCaps(hdc, COLORMGMTCAPS);
ReleaseDC(nullptr, hdc);
- return !(colorMgmtCaps & CM_GAMMA_RAMP) || colorMgmtCaps == 0;
+ return !(color_caps & CM_GAMMA_RAMP) || color_caps == 0;
}
@@ -8642,11 +8557,11 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto pRtlInitUnicodeString = reinterpret_cast(funcs[0]);
- const auto pNtOpenFile = reinterpret_cast(funcs[1]);
- const auto pNtClose = reinterpret_cast(funcs[2]);
+ const auto rtl_init_unicode_string = reinterpret_cast(funcs[0]);
+ const auto nt_open_file = reinterpret_cast(funcs[1]);
+ const auto nt_close = reinterpret_cast(funcs[2]);
- if (!pRtlInitUnicodeString || !pNtOpenFile || !pNtClose) {
+ if (!rtl_init_unicode_string || !nt_open_file || !nt_close) {
return false;
}
@@ -8674,7 +8589,7 @@ struct VM {
constexpr ULONG share_access = FILE_SHARE_READ;
constexpr ULONG open_options = FILE_OPEN | FILE_SYNCHRONOUS_IO_NONALERT;
- const NTSTATUS st = pNtOpenFile(&h_file, desired_access, &obj_attr, &iosb, share_access, open_options);
+ const NTSTATUS st = nt_open_file(&h_file, desired_access, &obj_attr, &iosb, share_access, open_options);
if (NT_SUCCESS(st)) {
return h_file;
@@ -8707,7 +8622,7 @@ struct VM {
for (size_t i = 0; i < 4; ++i) {
if (handles[i] != INVALID_HANDLE_VALUE) {
- pNtClose(handles[i]);
+ nt_close(handles[i]);
}
}
@@ -8717,13 +8632,13 @@ struct VM {
}
if (handles[4] != INVALID_HANDLE_VALUE) {
- pNtClose(handles[4]);
+ nt_close(handles[4]);
debug("DEVICE_HANDLES: Detected VMware related device (HGFS)");
return core::add(brands::VMWARE);
}
if (handles[5] != INVALID_HANDLE_VALUE) {
- pNtClose(handles[5]);
+ nt_close(handles[5]);
debug("DEVICE_HANDLES: Detected Cuckoo related device (pipe)");
return core::add(brands::CUCKOO);
}
@@ -8805,14 +8720,14 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const FN_NtQuerySystemInformation pNtQuerySystemInformation = reinterpret_cast(funcs[0]);
- if (pNtQuerySystemInformation) {
- SYSTEM_HYPERVISOR_DETAIL_INFORMATION hvInfo = { {} };
+ const FN_NtQuerySystemInformation nt_query_system_information = reinterpret_cast(funcs[0]);
+ if (nt_query_system_information) {
+ SYSTEM_HYPERVISOR_DETAIL_INFORMATION hypervisor_information = { {} };
// Request class 0x9F (SystemHypervisorDetailInformation)
// This asks the OS kernel to fill the structure with information about the
// hypervisor layer it is running on top of
- const NTSTATUS status = pNtQuerySystemInformation(static_cast(0x9F), &hvInfo, sizeof(hvInfo), nullptr);
+ const NTSTATUS status = nt_query_system_information(static_cast(0x9F), &hypervisor_information, sizeof(hypervisor_information), nullptr);
if (status != 0) {
return false;
@@ -8820,7 +8735,7 @@ struct VM {
// If Data[0] is non-zero, it means the kernel has successfully communicated
// with a hypervisor and retrieved a vendor signature like "Micr" for Microsoft
- if (hvInfo.HvVendorAndMaxFunction.Data[0] != 0) {
+ if (hypervisor_information.HvVendorAndMaxFunction.Data[0] != 0) {
return true;
}
}
@@ -8869,23 +8784,23 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto NtOpenKey = reinterpret_cast(funcs[0]);
- const auto NtQueryObject = reinterpret_cast(funcs[1]);
- const auto pNtClose = reinterpret_cast(funcs[2]);
+ const auto nt_open_key = reinterpret_cast(funcs[0]);
+ const auto nt_query_object = reinterpret_cast(funcs[1]);
+ const auto nt_close = reinterpret_cast(funcs[2]);
- if (!NtOpenKey || !NtQueryObject || !pNtClose)
+ if (!nt_open_key || !nt_query_object || !nt_close)
return false;
// Prepare to open the root USER registry hive
- UNICODE_STRING keyPath{};
- keyPath.Buffer = const_cast(L"\\REGISTRY\\USER");
- keyPath.Length = static_cast(wcslen(keyPath.Buffer) * sizeof(WCHAR));
- keyPath.MaximumLength = keyPath.Length + sizeof(WCHAR);
+ UNICODE_STRING key_path{};
+ key_path.Buffer = const_cast(L"\\REGISTRY\\USER");
+ key_path.Length = static_cast(wcslen(key_path.Buffer) * sizeof(WCHAR));
+ key_path.MaximumLength = key_path.Length + sizeof(WCHAR);
- OBJECT_ATTRIBUTES objAttr = {
+ OBJECT_ATTRIBUTES object_attributes = {
sizeof(OBJECT_ATTRIBUTES),
nullptr,
- &keyPath,
+ &key_path,
0x00000040L, // OBJ_CASE_INSENSITIVE
nullptr,
nullptr
@@ -8893,8 +8808,8 @@ struct VM {
// Attempt to open the key. If we are sandboxed, this open call often succeeds,
// but the underlying handle will point to a virtualized container, not the real OS path
- HANDLE hKey = nullptr;
- NTSTATUS status = NtOpenKey(&hKey, KEY_READ, reinterpret_cast(&objAttr));
+ HANDLE key = nullptr;
+ NTSTATUS status = nt_open_key(&key, KEY_READ, reinterpret_cast(&object_attributes));
if (!(((NTSTATUS)(status)) >= 0))
return false;
@@ -8903,22 +8818,22 @@ struct VM {
// While the API pretends we opened "\REGISTRY\USER", the handle might actually point to
// something like "\Device\HarddiskVolume2\Sandbox\User\DefaultBox\RegHive"
alignas(16) BYTE buffer[1024]{};
- ULONG returnedLength = 0;
- status = NtQueryObject(hKey, ObjectNameInformation, buffer, sizeof(buffer), &returnedLength);
- pNtClose(hKey);
+ ULONG returned_length = 0;
+ status = nt_query_object(key, ObjectNameInformation, buffer, sizeof(buffer), &returned_length);
+ nt_close(key);
if (!(((NTSTATUS)(status)) >= 0))
return false;
- const auto pObjectName = reinterpret_cast(buffer);
+ const auto object_name = reinterpret_cast(buffer);
- UNICODE_STRING expectedName{};
- expectedName.Buffer = const_cast(L"\\REGISTRY\\USER");
- expectedName.Length = static_cast(wcslen(expectedName.Buffer) * sizeof(WCHAR));
+ UNICODE_STRING expected_name{};
+ expected_name.Buffer = const_cast(L"\\REGISTRY\\USER");
+ expected_name.Length = static_cast(wcslen(expected_name.Buffer) * sizeof(WCHAR));
// Compare the requested name vs the actual kernel object name
// If they don't match, we have been redirected, confirming the presence of Sandboxie
- const bool mismatch = (pObjectName->Name.Length != expectedName.Length) ||
- (memcmp(pObjectName->Name.Buffer, expectedName.Buffer, expectedName.Length) != 0);
+ const bool mismatch = (object_name->Name.Length != expected_name.Length) ||
+ (memcmp(object_name->Name.Buffer, expected_name.Buffer, expected_name.Length) != 0);
return mismatch ? core::add(brands::SANDBOXIE) : false;
}
@@ -8966,12 +8881,12 @@ struct VM {
void* funcs[ARRAYSIZE(names)] = {};
util::get_function_address(ntdll, names, funcs, ARRAYSIZE(names));
- const auto pRtlInitUnicodeString = reinterpret_cast(funcs[0]);
- const auto pNtOpenKey = reinterpret_cast(funcs[1]);
- const auto pNtQueryKey = reinterpret_cast(funcs[2]);
- const auto pNtClose = reinterpret_cast(funcs[3]);
+ const auto rtl_init_unicode_string = reinterpret_cast(funcs[0]);
+ const auto nt_open_key = reinterpret_cast(funcs[1]);
+ const auto nt_query_key = reinterpret_cast(funcs[2]);
+ const auto nt_close = reinterpret_cast(funcs[3]);
- if (!pRtlInitUnicodeString || !pNtOpenKey || !pNtQueryKey || !pNtClose) {
+ if (!rtl_init_unicode_string || !nt_open_key || !nt_query_key || !nt_close) {
return false;
}
@@ -8979,56 +8894,56 @@ struct VM {
// Most legitimate user PCs have speakers or headphones (audio endpoints)
// Automated sandboxes and headless servers often have no audio devices configured
// We target the MMDevices\Audio\Render key where these endpoints are registered
- const wchar_t* nativePath = L"\\Registry\\Machine\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\MMDevices\\Audio\\Render";
+ const wchar_t* native_path = L"\\Registry\\Machine\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\MMDevices\\Audio\\Render";
- UNICODE_STRING uPath;
- pRtlInitUnicodeString(&uPath, nativePath);
+ UNICODE_STRING unicode_path;
+ rtl_init_unicode_string(&unicode_path, native_path);
- OBJECT_ATTRIBUTES objAttr;
- RtlZeroMemory(&objAttr, sizeof(objAttr));
- objAttr.Length = sizeof(objAttr);
- objAttr.ObjectName = &uPath;
- objAttr.Attributes = OBJ_CASE_INSENSITIVE;
+ OBJECT_ATTRIBUTES object_attributes;
+ RtlZeroMemory(&object_attributes, sizeof(object_attributes));
+ object_attributes.Length = sizeof(object_attributes);
+ object_attributes.ObjectName = &unicode_path;
+ object_attributes.Attributes = OBJ_CASE_INSENSITIVE;
- HANDLE hKey = nullptr;
- const ACCESS_MASK desiredAccess = KEY_READ;
+ HANDLE key = nullptr;
+ const ACCESS_MASK desired_access = KEY_READ;
- NTSTATUS st = pNtOpenKey(&hKey, desiredAccess, &objAttr);
- if (!NT_SUCCESS(st) || hKey == nullptr) {
+ NTSTATUS st = nt_open_key(&key, desired_access, &object_attributes);
+ if (!NT_SUCCESS(st) || key == nullptr) {
return false;
}
constexpr KEY_INFORMATION_CLASS InfoClass = KeyFullInformation;
- std::vector infoBuf(512);
- ULONG returnedLen = 0;
+ std::vector info_buffer(512);
+ ULONG returned_len = 0;
// Query the key information. If the buffer is too small (STATUS_BUFFER_TOO_SMALL),
// resize it to the exact length required by the kernel and try again
- st = pNtQueryKey(hKey, InfoClass, infoBuf.data(), static_cast(infoBuf.size()), &returnedLen);
+ st = nt_query_key(key, InfoClass, info_buffer.data(), static_cast(info_buffer.size()), &returned_len);
- if (!NT_SUCCESS(st) && returnedLen > infoBuf.size()) {
- infoBuf.resize(returnedLen);
- st = pNtQueryKey(hKey, InfoClass, infoBuf.data(), static_cast(infoBuf.size()), &returnedLen);
+ if (!NT_SUCCESS(st) && returned_len > info_buffer.size()) {
+ info_buffer.resize(returned_len);
+ st = nt_query_key(key, InfoClass, info_buffer.data(), static_cast(info_buffer.size()), &returned_len);
}
- bool hasValues = false;
- if (NT_SUCCESS(st) && returnedLen >= sizeof(KEY_FULL_INFORMATION)) {
- auto* kfi = reinterpret_cast(infoBuf.data());
+ bool has_values = false;
+ if (NT_SUCCESS(st) && returned_len >= sizeof(KEY_FULL_INFORMATION)) {
+ const auto* kfi = reinterpret_cast(info_buffer.data());
// Check if the registry key has any values associated with it
// If 'Values' is 0, the audio system is likely uninitialized or barren,
// which strongly suggests a virtualized/sandbox environment
- const DWORD valueCount = static_cast(kfi->Values); // values, not subkeys
- hasValues = (valueCount > 0);
+ const DWORD value_count = static_cast(kfi->Values); // values, not subkeys
+ has_values = (value_count > 0);
}
else {
- pNtClose(hKey);
+ nt_close(key);
return false;
}
- pNtClose(hKey);
+ nt_close(key);
- return hasValues;
+ return has_values;
}
@@ -9043,15 +8958,15 @@ struct VM {
};
// enumerate all DISPLAY devices
- const HDEVINFO hDevInfo = SetupDiGetClassDevsW(&GUID_DEVCLASS_DISPLAY, nullptr, nullptr, DIGCF_PRESENT);
- if (hDevInfo == INVALID_HANDLE_VALUE) {
+ const HDEVINFO handle_dev_info = SetupDiGetClassDevsW(&GUID_DEVCLASS_DISPLAY, nullptr, nullptr, DIGCF_PRESENT);
+ if (handle_dev_info == INVALID_HANDLE_VALUE) {
debug("ACPI_SIGNATURE: No display device detected");
return true;
}
- SP_DEVINFO_DATA devInfo;
- ZeroMemory(&devInfo, sizeof(devInfo));
- devInfo.cbSize = sizeof(devInfo);
+ SP_DEVINFO_DATA dev_info;
+ ZeroMemory(&dev_info, sizeof(dev_info));
+ dev_info.cbSize = sizeof(dev_info);
const DEVPROPKEY key = DEVPKEY_Device_LocationPaths;
// baremetal tokens (case-sensitive to preserve handling against edge-cases)
@@ -9071,16 +8986,16 @@ struct VM {
return false;
};
- for (DWORD idx = 0; SetupDiEnumDeviceInfo(hDevInfo, idx, &devInfo); ++idx) {
- DEVPROPTYPE propType = 0;
- DWORD requiredSize = 0;
+ for (DWORD idx = 0; SetupDiEnumDeviceInfo(handle_dev_info, idx, &dev_info); ++idx) {
+ DEVPROPTYPE prop_type = 0;
+ DWORD required_size = 0;
// query required size (bytes)
- SetupDiGetDevicePropertyW(hDevInfo, &devInfo, &key, &propType, nullptr, 0, &requiredSize, 0);
- if (GetLastError() != ERROR_INSUFFICIENT_BUFFER || requiredSize == 0) {
+ SetupDiGetDevicePropertyW(handle_dev_info, &dev_info, &key, &prop_type, nullptr, 0, &required_size, 0);
+ if (GetLastError() != ERROR_INSUFFICIENT_BUFFER || required_size == 0) {
if (GetLastError() == ERROR_NOT_FOUND) {
debug("ACPI_SIGNATURE: No dedicated display/GPU detected");
- SetupDiDestroyDeviceInfoList(hDevInfo);
+ SetupDiDestroyDeviceInfoList(handle_dev_info);
return false;
}
else {
@@ -9089,16 +9004,16 @@ struct VM {
}
// fetch buffer (multi-sz)
- std::vector buffer(requiredSize);
- if (!SetupDiGetDevicePropertyW(hDevInfo, &devInfo, &key, &propType,
- buffer.data(), requiredSize, &requiredSize, 0))
+ std::vector buffer(required_size);
+ if (!SetupDiGetDevicePropertyW(handle_dev_info, &dev_info, &key, &prop_type,
+ buffer.data(), required_size, &required_size, 0))
{
continue;
}
const wchar_t* ptr = reinterpret_cast(buffer.data());
// number of wchar_t slots in buffer
- const size_t total_wchars = requiredSize / sizeof(wchar_t);
+ const size_t total_wchars = required_size / sizeof(wchar_t);
const wchar_t* buf_end = ptr + (total_wchars ? total_wchars : 0);
#ifdef __VMAWARE_DEBUG__
@@ -9107,8 +9022,8 @@ struct VM {
}
#endif
- static const wchar_t acpiPrefix[] = L"#ACPI(S";
- static const wchar_t acpiParen[] = L"ACPI(";
+ static const wchar_t acpi_prefix[] = L"#ACPI(S";
+ static const wchar_t acpi_paren[] = L"ACPI(";
// First pass: QEMU-style "#ACPI(Sxx...)" and generic "ACPI(Sxx)"
for (const wchar_t* p = ptr; p < buf_end && *p; p += (wcslen(p) + 1)) {
@@ -9120,18 +9035,18 @@ struct VM {
// search for "#ACPI(S"
const wchar_t* search = p;
while (true) {
- const wchar_t* found = wcsstr(search, acpiPrefix);
+ const wchar_t* found = wcsstr(search, acpi_prefix);
if (!found) break;
// after "#ACPI(S" we expect two hex chars
- const wchar_t* hexpos = found + wcslen(acpiPrefix); // first hex char
+ const wchar_t* hexpos = found + wcslen(acpi_prefix); // first hex char
if (hexpos && hexpos[0] && hexpos[1]) {
wchar_t b = hexpos[0];
wchar_t s = hexpos[1];
if (is_hex(b) && is_hex(s)) {
const wchar_t after = hexpos[2]; // may be '_' or ')'
if (after == L'_' || after == L')') {
- SetupDiDestroyDeviceInfoList(hDevInfo);
+ SetupDiDestroyDeviceInfoList(handle_dev_info);
return core::add(brands::QEMU);
}
}
@@ -9142,12 +9057,12 @@ struct VM {
// search for "ACPI(" then check for "S" + two hex digits
search = p;
while (true) {
- const wchar_t* found = wcsstr(search, acpiParen);
+ const wchar_t* found = wcsstr(search, acpi_paren);
if (!found) break;
- const wchar_t* start = found + wcslen(acpiParen); // char after '('
+ const wchar_t* start = found + wcslen(acpi_paren); // char after '('
if (start && start[0] && start[1] && start[2]) {
if (start[0] == L'S' && is_hex(start[1]) && is_hex(start[2])) {
- SetupDiDestroyDeviceInfoList(hDevInfo);
+ SetupDiDestroyDeviceInfoList(handle_dev_info);
return core::add(brands::QEMU);
}
}
@@ -9165,14 +9080,14 @@ struct VM {
for (const wchar_t* sig : vm_signatures) {
if (wcsstr(p, sig) != nullptr) {
- SetupDiDestroyDeviceInfoList(hDevInfo);
+ SetupDiDestroyDeviceInfoList(handle_dev_info);
return core::add(brands::HYPERV);
}
}
}
}
- SetupDiDestroyDeviceInfoList(hDevInfo);
+ SetupDiDestroyDeviceInfoList(handle_dev_info);
return false;
}
@@ -9183,7 +9098,7 @@ struct VM {
* @implements VM::TRAP
*/
[[nodiscard]] static bool trap() {
- bool hypervisorCaught = false;
+ bool hypervisor_caught = false;
#if (x86_64)
// when a single - step(TF) and hardware breakpoint(DR0) collide, Intel CPUs set both DR6.BS and DR6.B0 to report both events, which help make this detection trick
// AMD CPUs prioritize the breakpoint, setting only its corresponding bit in DR6 and clearing the single-step bit, which is why this technique is not compatible with AMD
@@ -9217,7 +9132,7 @@ struct VM {
0x4C, 0x89, 0xC3, // mov rbx, r8 (restore rbx from r8) - trap happens here
0xC3 // ret
};
- SIZE_T trampSize = sizeof(trampoline);
+ SIZE_T trampoline_size = sizeof(trampoline);
const HMODULE ntdll = util::get_ntdll();
if (!ntdll) return false;
@@ -9243,84 +9158,84 @@ struct VM {
using NtSetContextThread_t = NTSTATUS(__stdcall*)(HANDLE, PCONTEXT);
// volatile ensures these are loaded from stack after SEH unwind when compiled with aggresive optimizations
- NtAllocateVirtualMemory_t volatile pNtAllocateVirtualMemory = reinterpret_cast(funcs[0]);
- NtProtectVirtualMemory_t volatile pNtProtectVirtualMemory = reinterpret_cast(funcs[1]);
- NtFreeVirtualMemory_t volatile pNtFreeVirtualMemory = reinterpret_cast