diff --git a/.gitignore b/.gitignore index 29fdffae..6578b254 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,6 @@ # macOS .DS_Store + +# Linux +*.o +*.cmd diff --git a/.travis.yml b/.travis.yml index c859993b..47e579dd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,20 @@ matrix: - cd platforms/darwin - xcodebuild -configuration Debug -sdk macosx10.14 + - name: "haxm-linux" + os: linux + dist: trusty + sudo: false + before_install: + - sudo apt-get update + - sudo apt-get install -y linux-headers-`uname -r` + - wget http://mirrors.kernel.org/ubuntu/pool/universe/n/nasm/nasm_2.13.02-0.1_amd64.deb + - sudo apt-get install -y dpkg + - sudo dpkg -i nasm_2.13.02-0.1_amd64.deb + script: + - cd platforms/linux + - make -j$(nproc) + exclude: # TODO: Currently TravisCI does not support full VS/EWDK on Windows - name: "haxm-windows" os: windows diff --git a/README.md b/README.md index c8c8fd85..9fe46301 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,7 @@ release, you can get it [here][github-haxm-latest-release]. ## Usage Detailed instructions for building and testing HAXM can be found at: +* [Manual for Linux](docs/manual-linux.md) * [Manual for macOS](docs/manual-macos.md) * [Manual for Windows](docs/manual-windows.md) diff --git a/core/cpu.c b/core/cpu.c index 470ad096..c7371836 100644 --- a/core/cpu.c +++ b/core/cpu.c @@ -520,7 +520,7 @@ uint32_t load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags) /* when wake up from sleep, we need the barrier, as vm operation * are not serialized instructions. */ - smp_mb(); + hax_smp_mb(); cpu_data = current_cpu_data(); diff --git a/core/ept.c b/core/ept.c index c544c88d..65727374 100644 --- a/core/ept.c +++ b/core/ept.c @@ -328,7 +328,7 @@ static void invept_smpfunc(struct invept_bundle *bundle) { struct per_cpu_data *cpu_data; - smp_mb(); + hax_smp_mb(); cpu_data = current_cpu_data(); cpu_data->invept_res = VMX_SUCCEED; @@ -373,7 +373,7 @@ void invept(hax_vm_t *hax_vm, uint type) bundle.type = type; bundle.desc = &desc; - smp_call_function(&cpu_online_map, (void (*)(void *))invept_smpfunc, + hax_smp_call_function(&cpu_online_map, (void (*)(void *))invept_smpfunc, &bundle); /* diff --git a/core/hax.c b/core/hax.c index ef122a3e..69ae9fa2 100644 --- a/core/hax.c +++ b/core/hax.c @@ -61,12 +61,12 @@ struct hax_t *hax; extern hax_atomic_t vmx_cpu_num, vmx_enabled_num; static void hax_enable_vmx(void) { - smp_call_function(&cpu_online_map, cpu_init_vmx, NULL); + hax_smp_call_function(&cpu_online_map, cpu_init_vmx, NULL); } static void hax_disable_vmx(void) { - smp_call_function(&cpu_online_map, cpu_exit_vmx, NULL); + hax_smp_call_function(&cpu_online_map, cpu_exit_vmx, NULL); } static void free_cpu_vmxon_region(void) @@ -410,7 +410,7 @@ static void hax_pmu_init(void) int ref_cpu_id = -1; // Execute cpu_pmu_init() on each logical processor of the host CPU - smp_call_function(&cpu_online_map, cpu_pmu_init, NULL); + hax_smp_call_function(&cpu_online_map, cpu_pmu_init, NULL); // Find the common APM version supported by all host logical processors // TODO: Theoretically we should do the same for other APM parameters diff --git a/core/ia32.c b/core/ia32.c index f494d6f4..9bbf8268 100644 --- a/core/ia32.c +++ b/core/ia32.c @@ -70,7 +70,7 @@ void ia32_wrmsr(uint32_t reg, uint64_t val) #endif } -uint64_t rdtsc(void) +uint64_t ia32_rdtsc(void) { #ifdef HAX_ARCH_X86_32 struct qword_val val = { 0 }; diff --git a/core/ia32_ops.asm b/core/ia32_ops.asm index 7ca7c83d..947b5376 100644 --- a/core/ia32_ops.asm +++ b/core/ia32_ops.asm @@ -161,7 +161,7 @@ function __nmi, 0 int 2h ret -function __fls, 1 +function asm_fls, 1 xor reg_ret_32, reg_ret_32 bsr reg_ret_32, reg_arg1_32 ret diff --git a/core/include/cpu.h b/core/include/cpu.h index 04149053..a306113c 100644 --- a/core/include/cpu.h +++ b/core/include/cpu.h @@ -108,12 +108,12 @@ struct per_cpu_data { /* * These fields are used to record the result of certain VMX instructions - * when they are used in a function wrapped by smp_call_function(). This is + * when they are used in a function wrapped by hax_smp_call_function(). This is * because it is not safe to call hax_error(), etc. (whose underlying * implementation may use a lock) from the wrapped function to log a * failure; doing so may cause a deadlock and thus a host reboot, especially * on macOS, where mp_rendezvous_no_intrs() (the legacy Darwin API used by - * HAXM to implement smp_call_function()) is known to be prone to deadlocks: + * HAXM to implement hax_smp_call_function()) is known to be prone to deadlocks: * https://lists.apple.com/archives/darwin-kernel/2006/Dec/msg00006.html */ vmx_result_t vmxon_res; diff --git a/core/include/emulate_ops.h b/core/include/emulate_ops.h index 732b2368..68624912 100644 --- a/core/include/emulate_ops.h +++ b/core/include/emulate_ops.h @@ -39,7 +39,7 @@ (0 * FASTOP_ALIGN)) /* Instruction handlers */ -typedef void(__cdecl em_handler_t)(void); +typedef void(ASMCALL em_handler_t)(void); em_handler_t em_not; em_handler_t em_neg; em_handler_t em_inc; @@ -72,7 +72,7 @@ em_handler_t em_bextr; em_handler_t em_andn; /* Dispatch handlers */ -void __cdecl fastop_dispatch(void *handler, uint64_t *dst, +void ASMCALL fastop_dispatch(void *handler, uint64_t *dst, uint64_t *src1, uint64_t *src2, uint64_t *flags); #endif /* HAX_CORE_EMULATE_OPS_H_ */ diff --git a/core/include/ia32.h b/core/include/ia32.h index 9a3ba83c..caf8eaa4 100644 --- a/core/include/ia32.h +++ b/core/include/ia32.h @@ -78,12 +78,12 @@ void ASMCALL asm_fxrstor(mword *addr); void ASMCALL asm_cpuid(union cpuid_args_t *state); void ASMCALL __nmi(void); -uint32_t ASMCALL __fls(uint32_t bit32); +uint32_t ASMCALL asm_fls(uint32_t bit32); uint64_t ia32_rdmsr(uint32_t reg); void ia32_wrmsr(uint32_t reg, uint64_t val); -uint64_t rdtsc(void); +uint64_t ia32_rdtsc(void); void fxinit(void); void fxsave(mword *addr); diff --git a/core/include/vmx.h b/core/include/vmx.h index 7a36244c..70e350a1 100644 --- a/core/include/vmx.h +++ b/core/include/vmx.h @@ -681,7 +681,7 @@ void vmx_vmwrite(struct vcpu_t *vcpu, const char *name, vmwrite(vcpu, GUEST_##seg##_AR, tmp_ar); \ } -#elif defined(HAX_PLATFORM_DARWIN) +#else #define VMWRITE_SEG(vcpu, seg, val) ({ \ uint32_t tmp_ar = val.ar; \ if (tmp_ar == 0) \ diff --git a/core/intr_exc.c b/core/intr_exc.c index 6c2ea807..57f3a50f 100644 --- a/core/intr_exc.c +++ b/core/intr_exc.c @@ -48,7 +48,7 @@ uint32_t vcpu_get_pending_intrs(struct vcpu_t *vcpu) for (i = 7; i >= 0; i--) { if (intr_pending[i]) { - offset = __fls(intr_pending[i]); + offset = asm_fls(intr_pending[i]); break; } } diff --git a/core/memory.c b/core/memory.c index 7331a4e4..ca7ba334 100644 --- a/core/memory.c +++ b/core/memory.c @@ -342,7 +342,7 @@ int hax_vm_set_ram(struct vm_t *vm, struct hax_set_ram_info *info) hva = 0; #endif #endif - cur_va += page_size; + cur_va += HAX_PAGE_SIZE; } if (!hax_core_set_p2m(vm, gpfn, hpfn, hva, info->flags)) { diff --git a/core/vcpu.c b/core/vcpu.c index 293cac12..b89a12cd 100644 --- a/core/vcpu.c +++ b/core/vcpu.c @@ -565,7 +565,7 @@ static void vcpu_init(struct vcpu_t *vcpu) vcpu->ref_count = 1; - vcpu->tsc_offset = 0ULL - rdtsc(); + vcpu->tsc_offset = 0ULL - ia32_rdtsc(); // Prepare the vcpu state to Power-up state->_rflags = 2; @@ -3247,7 +3247,7 @@ static int handle_msr_read(struct vcpu_t *vcpu, uint32_t msr, uint64_t *val) switch (msr) { case IA32_TSC: { - *val = vcpu->tsc_offset + rdtsc(); + *val = vcpu->tsc_offset + ia32_rdtsc(); break; } case IA32_FEATURE_CONTROL: { @@ -3503,7 +3503,7 @@ static int handle_msr_write(struct vcpu_t *vcpu, uint32_t msr, uint64_t val) switch (msr) { case IA32_TSC: { - vcpu->tsc_offset = val - rdtsc(); + vcpu->tsc_offset = val - ia32_rdtsc(); if (vmx(vcpu, pcpu_ctls) & USE_TSC_OFFSETTING) { vmwrite(vcpu, VMX_TSC_OFFSET, vcpu->tsc_offset); } @@ -4160,9 +4160,9 @@ int vcpu_pause(struct vcpu_t *vcpu) return -1; vcpu->paused = 1; - smp_mb(); + hax_smp_mb(); if (vcpu->is_running) { - smp_call_function(&cpu_online_map, _vcpu_take_off, NULL); + hax_smp_call_function(&cpu_online_map, _vcpu_take_off, NULL); } return 0; @@ -4171,7 +4171,7 @@ int vcpu_pause(struct vcpu_t *vcpu) int vcpu_takeoff(struct vcpu_t *vcpu) { int cpu_id; - cpumap_t targets; + hax_cpumap_t targets; // Don't change the sequence unless you are sure if (vcpu->is_running) { @@ -4179,7 +4179,7 @@ int vcpu_takeoff(struct vcpu_t *vcpu) assert(cpu_id != hax_cpuid()); targets = cpu2cpumap(cpu_id); // If not considering Windows XP, definitely we don't need this - smp_call_function(&targets, _vcpu_take_off, NULL); + hax_smp_call_function(&targets, _vcpu_take_off, NULL); } return 0; diff --git a/docs/manual-linux.md b/docs/manual-linux.md new file mode 100644 index 00000000..a00542f5 --- /dev/null +++ b/docs/manual-linux.md @@ -0,0 +1,50 @@ +## Building for Linux + +**Disclaimer: Support for Linux is experimental.** + +### Prerequisites +* Linux headers +* NASM 2.11 or later + +### Build steps +1. `cd platforms/linux/` +1. `make` + +## Testing on Linux +### System requirements +Note that these are requirements for the _test_ environment, which does not +have to be the same as the _build_ environment. + +1. Hardware requirements are the same as those for Windows. +1. Linux 4.x or later. + +### Loading and unloading the kernel module +To load the kernel module: +1. Make sure no other HAXM kernel module is loaded. If the output of +`lsmod | grep haxm` is not empty, you must unload the existing HAXM module +first: `sudo make uninstall`. +1. Run `sudo make install`. + +To unload the kernel module: +1. Run `sudo make uninstall`. + +Additionally, if you want to use HAXM as a non-privileged user, +you can enter the following command to make the current user +part of the *haxm* group (requires logging out and back in!): + +```bash +sudo adduser `id -un` haxm +``` + +Note that in recent Linux distributions, you might get a `sign-file` error +since it kernel Makefiles will attempt to sign the kernel module with +`certs/signing_key.pem`. Unless driver signature enforcement has been enabled, +you can safely ignore this warning. Alternatively, you can follow +[this guide][linux-module-signing] to self-sign your drivers. + +### Viewing logs +On Linux, HAXM debug output goes to the system log database, and can be +retrieved via `dmesg` (if supported, the `-w` flag will update the output). +You might filter these entries via: `dmesg | grep haxm`. + +[linux-module-signing]: https://www.kernel.org/doc/html/v4.18/admin-guide/module-signing.html diff --git a/include/darwin/hax_mac.h b/include/darwin/hax_mac.h index 53451de4..e2bf37ec 100644 --- a/include/darwin/hax_mac.h +++ b/include/darwin/hax_mac.h @@ -205,4 +205,14 @@ static inline bool cpu_is_online(int cpu) return !!(((uint64_t)1 << cpu) & cpu_online_map); } +#ifdef __cplusplus +extern "C" { +#endif + +extern int cpu_number(void); + +#ifdef __cplusplus +} +#endif + #endif // HAX_DARWIN_HAX_MAC_H_ diff --git a/include/darwin/hax_types_mac.h b/include/darwin/hax_types_mac.h index 46372bde..4f122f24 100644 --- a/include/darwin/hax_types_mac.h +++ b/include/darwin/hax_types_mac.h @@ -82,10 +82,10 @@ static hax_atomic_t hax_atomic_dec(hax_atomic_t *address) /* * According to kernel programming, the Atomic function is barrier - * Although we can write a smp_mb from scrach, this simple one can resolve our + * Although we can write a hax_smp_mb from scrach, this simple one can resolve our * issue */ -static inline void smp_mb(void) +static inline void hax_smp_mb(void) { SInt32 atom; OSAddAtomic(1, &atom); @@ -132,10 +132,10 @@ typedef struct hax_kmap_phys { typedef ulong mword; typedef mword preempt_flag; -typedef uint64_t cpumap_t; +typedef uint64_t hax_cpumap_t; typedef uint64_t HAX_VADDR_T; -static inline cpumap_t cpu2cpumap(int cpu) +static inline hax_cpumap_t cpu2cpumap(int cpu) { return (0x1UL << cpu); } diff --git a/include/hax.h b/include/hax.h index 8f30d8f5..444bcd2b 100644 --- a/include/hax.h +++ b/include/hax.h @@ -39,8 +39,6 @@ // declaration struct vcpu_t; -extern int hax_page_size; - #define HAX_CUR_VERSION 0x0004 #define HAX_COMPAT_VERSION 0x0001 @@ -207,7 +205,7 @@ void hax_set_page(phax_page page); static inline uint64_t hax_page2pa(phax_page page) { - return hax_page2pfn(page) << PAGE_SHIFT; + return hax_page2pfn(page) << HAX_PAGE_SHIFT; } #define hax_page_pa hax_page2pa @@ -232,15 +230,15 @@ static inline unsigned char *hax_page_va(struct hax_page *page) #define HAX_MAX_CPUS (sizeof(uint64_t) * 8) /* Host SMP */ -extern cpumap_t cpu_online_map; +extern hax_cpumap_t cpu_online_map; extern int max_cpus; #ifdef __cplusplus extern "C" { #endif -int smp_call_function(cpumap_t *cpus, void(*scfunc)(void *param), void *param); -extern int cpu_number(void); +int hax_smp_call_function(hax_cpumap_t *cpus, void(*scfunc)(void *param), + void *param); uint32_t hax_cpuid(void); int proc_event_pending(struct vcpu_t *vcpu); @@ -268,6 +266,9 @@ int hax_em64t_enabled(void); #ifdef HAX_PLATFORM_DARWIN #include "darwin/hax_mac.h" #endif +#ifdef HAX_PLATFORM_LINUX +#include "linux/hax_linux.h" +#endif #ifdef HAX_PLATFORM_WINDOWS #include "windows/hax_windows.h" #endif diff --git a/include/hax_interface.h b/include/hax_interface.h index cc1fcea6..00951dbe 100644 --- a/include/hax_interface.h +++ b/include/hax_interface.h @@ -42,6 +42,9 @@ #ifdef HAX_PLATFORM_DARWIN #include "darwin/hax_interface_mac.h" #endif +#ifdef HAX_PLATFORM_LINUX +#include "linux/hax_interface_linux.h" +#endif #ifdef HAX_PLATFORM_WINDOWS #include "windows/hax_interface_windows.h" #endif diff --git a/include/hax_types.h b/include/hax_types.h index e3b44471..ca1bf06a 100644 --- a/include/hax_types.h +++ b/include/hax_types.h @@ -50,6 +50,13 @@ #define HAX_COMPILER_CLANG #define PACKED __attribute__ ((packed)) #define ALIGNED(x) __attribute__ ((aligned(x))) +// GCC +#elif defined(__GNUC__) +#define HAX_COMPILER_GCC +#define PACKED __attribute__ ((packed)) +#define ALIGNED(x) __attribute__ ((aligned(x))) +#define __cdecl __attribute__ ((__cdecl__,regparm(0))) +#define __stdcall __attribute__ ((__stdcall__)) // MSVC #elif defined(_MSC_VER) #define HAX_COMPILER_MSVC @@ -67,6 +74,10 @@ #if defined(__MACH__) #define HAX_PLATFORM_DARWIN #include "darwin/hax_types_mac.h" +// Linux +#elif defined(__linux__) +#define HAX_PLATFORM_LINUX +#include "linux/hax_types_linux.h" // Windows #elif defined(_WIN32) #define HAX_PLATFORM_WINDOWS diff --git a/include/linux/hax_interface_linux.h b/include/linux/hax_interface_linux.h new file mode 100644 index 00000000..ecb13fd0 --- /dev/null +++ b/include/linux/hax_interface_linux.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2011 Intel Corporation + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HAX_LINUX_HAX_INTERFACE_LINUX_H_ +#define HAX_LINUX_HAX_INTERFACE_LINUX_H_ + +#include + +/* The mac specific interface to qemu because of mac's + * special handling like hax tunnel allocation etc */ +/* HAX model level ioctl */ +#define HAX_IOCTL_VERSION _IOWR(0, 0x20, struct hax_module_version) +#define HAX_IOCTL_CREATE_VM _IOWR(0, 0x21, uint32_t) +#define HAX_IOCTL_DESTROY_VM _IOW(0, 0x22, uint32_t) +#define HAX_IOCTL_CAPABILITY _IOR(0, 0x23, struct hax_capabilityinfo) +#define HAX_IOCTL_SET_MEMLIMIT _IOWR(0, 0x24, struct hax_set_memlimit) + +// Only for backward compatibility with old Qemu. +#define HAX_VM_IOCTL_VCPU_CREATE_ORIG _IOR(0, 0x80, int) + +#define HAX_VM_IOCTL_VCPU_CREATE _IOWR(0, 0x80, uint32_t) +#define HAX_VM_IOCTL_ALLOC_RAM _IOWR(0, 0x81, struct hax_alloc_ram_info) +#define HAX_VM_IOCTL_SET_RAM _IOWR(0, 0x82, struct hax_set_ram_info) +#define HAX_VM_IOCTL_VCPU_DESTROY _IOR(0, 0x83, uint32_t) +#define HAX_VM_IOCTL_ADD_RAMBLOCK _IOW(0, 0x85, struct hax_ramblock_info) +#define HAX_VM_IOCTL_SET_RAM2 _IOWR(0, 0x86, struct hax_set_ram_info2) +#define HAX_VM_IOCTL_PROTECT_RAM _IOWR(0, 0x87, struct hax_protect_ram_info) + +#define HAX_VCPU_IOCTL_RUN _IO(0, 0xc0) +#define HAX_VCPU_IOCTL_SET_MSRS _IOWR(0, 0xc1, struct hax_msr_data) +#define HAX_VCPU_IOCTL_GET_MSRS _IOWR(0, 0xc2, struct hax_msr_data) + +#define HAX_VCPU_IOCTL_SET_FPU _IOW(0, 0xc3, struct fx_layout) +#define HAX_VCPU_IOCTL_GET_FPU _IOR(0, 0xc4, struct fx_layout) + +#define HAX_VCPU_IOCTL_SETUP_TUNNEL _IOWR(0, 0xc5, struct hax_tunnel_info) +#define HAX_VCPU_IOCTL_INTERRUPT _IOWR(0, 0xc6, uint32_t) +#define HAX_VCPU_SET_REGS _IOWR(0, 0xc7, struct vcpu_state_t) +#define HAX_VCPU_GET_REGS _IOWR(0, 0xc8, struct vcpu_state_t) + +/* API 2.0 */ +#define HAX_VM_IOCTL_NOTIFY_QEMU_VERSION _IOW(0, 0x84, struct hax_qemu_version) + +#define HAX_IOCTL_VCPU_DEBUG _IOW(0, 0xc9, struct hax_debug_t) + +#define HAX_KERNEL64_CS 0x80 +#define HAX_KERNEL32_CS 0x08 + +#define is_compatible() 0 + +#endif // HAX_LINUX_HAX_INTERFACE_LINUX_H_ diff --git a/include/linux/hax_linux.h b/include/linux/hax_linux.h new file mode 100644 index 00000000..a86cfa44 --- /dev/null +++ b/include/linux/hax_linux.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2011 Intel Corporation + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HAX_LINUX_HAX_LINUX_H_ +#define HAX_LINUX_HAX_LINUX_H_ + +#define HAX_RAM_ENTRY_SIZE 0x4000000 + +hax_spinlock *hax_spinlock_alloc_init(void); +void hax_spinlock_free(hax_spinlock *lock); +void hax_spin_lock(hax_spinlock *lock); +void hax_spin_unlock(hax_spinlock *lock); + +hax_mutex hax_mutex_alloc_init(void); +void hax_mutex_lock(hax_mutex lock); +void hax_mutex_unlock(hax_mutex lock); +void hax_mutex_free(hax_mutex lock); + +/* Return true if the bit is set already */ +int hax_test_and_set_bit(int bit, uint64_t *memory); + +/* Return true if the bit is cleared already */ +int hax_test_and_clear_bit(int bit, uint64_t *memory); + +/* Don't care for the big endian situation */ +static inline bool hax_test_bit(int bit, uint64_t *memory) +{ + int byte = bit / 8; + unsigned char *p; + int offset = bit % 8; + + p = (unsigned char *)memory + byte; + return !!(*p & (1 << offset)); +} + +// memcpy_s() is part of the optional Bounds Checking Interfaces specified in +// Annex K of the C11 standard: +// http://en.cppreference.com/w/c/string/byte/memcpy +// However, it is not implemented by Clang: +// https://stackoverflow.com/questions/40829032/how-to-install-c11-compiler-on-mac-os-with-optional-string-functions-included +// Provide a simplified implementation here so memcpy_s() can be used instead of +// memcpy() everywhere else, which helps reduce the number of Klocwork warnings. +static inline int memcpy_s(void *dest, size_t destsz, const void *src, + size_t count) +{ + char *dest_start = (char *)dest; + char *dest_end = (char *)dest + destsz; + char *src_start = (char *)src; + char *src_end = (char *)src + count; + bool overlap; + + if (count == 0) + return 0; + + if (!dest || destsz == 0) + return -EINVAL; + + overlap = src_start < dest_start + ? dest_start < src_end : src_start < dest_end; + if (!src || count > destsz || overlap) { + memset(dest, 0, destsz); + return -EINVAL; + } + + memcpy(dest, src, count); + return 0; +} + +/* Why it's a bool? Strange */ +bool hax_cmpxchg32(uint32_t old_val, uint32_t new_val, volatile uint32_t *addr); +bool hax_cmpxchg64(uint64_t old_val, uint64_t new_val, volatile uint64_t *addr); + +static inline bool cpu_is_online(int cpu) +{ + if (cpu < 0 || cpu >= max_cpus) + return 0; + return !!(((mword)1 << cpu) & cpu_online_map); +} + +int hax_notify_host_event(enum hax_notify_event event, uint32_t *param, + uint32_t size); + +extern int default_hax_log_level; + +void hax_error(char *fmt, ...); +void hax_warning(char *fmt, ...); +void hax_info(char *fmt, ...); +void hax_debug(char *fmt, ...); +void hax_log(char *fmt, ...); + +#define hax_log hax_info + +//#define hax_panic DbgPrint +#define hax_panic hax_error + +//#define assert(condition) BUG_ON(!(condition)) +void assert(bool condition); + +#endif // HAX_LINUX_HAX_LINUX_H_ diff --git a/include/linux/hax_types_linux.h b/include/linux/hax_types_linux.h new file mode 100644 index 00000000..593a7df4 --- /dev/null +++ b/include/linux/hax_types_linux.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2011 Intel Corporation + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HAX_LINUX_HAX_TYPES_LINUX_H_ +#define HAX_LINUX_HAX_TYPES_LINUX_H_ + +#include +#include +#include + +// Signed Types +typedef int8_t int8; +typedef int16_t int16; +typedef int32_t int32; +typedef int64_t int64; + +// Unsigned Types +typedef uint8_t uint8; +typedef uint16_t uint16; +typedef uint32_t uint32; +typedef uint64_t uint64; + +typedef unsigned int uint; +typedef unsigned long ulong; +typedef unsigned long ulong_t; + +#if defined(__i386__) +typedef uint32_t mword; +#endif +#if defined (__x86_64__) +typedef uint64_t mword; +#endif +typedef mword HAX_VADDR_T; + +#include "../hax_list.h" +struct hax_page { + void *kva; + struct page *page; + uint64_t pa; + uint32_t order; + uint32_t flags; + struct hax_link_list list; +}; + +typedef struct hax_memdesc_user { + int nr_pages; + struct page **pages; +} hax_memdesc_user; + +typedef struct hax_kmap_user { + void *kva; +} hax_kmap_user; + +typedef struct hax_memdesc_phys { + struct page *ppage; +} hax_memdesc_phys; + +typedef struct hax_kmap_phys { + void *kva; +} hax_kmap_phys; + +typedef struct hax_spinlock hax_spinlock; + +typedef int hax_cpumap_t; + +static inline hax_cpumap_t cpu2cpumap(int cpu) +{ + return (0x1 << cpu); +} + +/* Remove this later */ +#define is_leaf(x) 1 + +typedef mword preempt_flag; +typedef void *hax_mutex; +typedef uint32_t hax_atomic_t; + +/* Return the value before add */ +hax_atomic_t hax_atomic_add(volatile hax_atomic_t *atom, uint32_t value); + +/* Return the value before the increment */ +hax_atomic_t hax_atomic_inc(volatile hax_atomic_t *atom); + +/* Return the value before the decrement */ +hax_atomic_t hax_atomic_dec(volatile hax_atomic_t *atom); + +void hax_smp_mb(void); + +#endif // HAX_LINUX_HAX_TYPES_LINUX_H_ diff --git a/include/windows/hax_types_windows.h b/include/windows/hax_types_windows.h index 17bb457c..2c2ce73f 100644 --- a/include/windows/hax_types_windows.h +++ b/include/windows/hax_types_windows.h @@ -45,8 +45,8 @@ typedef unsigned char bool; #define is_leaf(x) 1 #endif -typedef KAFFINITY cpumap_t; -inline cpumap_t cpu2cpumap(int cpu) +typedef KAFFINITY hax_cpumap_t; +inline hax_cpumap_t cpu2cpumap(int cpu) { return ((KAFFINITY)0x1 << cpu); } @@ -134,7 +134,7 @@ typedef uint64_t mword; typedef mword HAX_VADDR_T; -static inline void smp_mb(void) +static inline void hax_smp_mb(void) { KeMemoryBarrier(); } diff --git a/include/windows/hax_windows.h b/include/windows/hax_windows.h index 804e7966..060e18a7 100644 --- a/include/windows/hax_windows.h +++ b/include/windows/hax_windows.h @@ -38,7 +38,7 @@ /* * According to DDK, the IoAllocateMdl can support at mos - * 64M - page_size * (sizeof(MDL)) / sizeof(ULONG_PTR), so + * 64M - PAGE_SIZE * (sizeof(MDL)) / sizeof(ULONG_PTR), so * take 32M here */ #if (NTDDI_VERSION <= NTDDI_WS03) diff --git a/platforms/darwin/com_intel_hax.c b/platforms/darwin/com_intel_hax.c index 4aa25f0f..1071bad0 100644 --- a/platforms/darwin/com_intel_hax.c +++ b/platforms/darwin/com_intel_hax.c @@ -99,7 +99,7 @@ static int lock_prim_init(void) return -1; } -cpumap_t cpu_online_map; +hax_cpumap_t cpu_online_map; int max_cpus; void get_online_map(void *param) @@ -125,7 +125,7 @@ static void init_cpu_info(void) uint64_t possible_map, omap = 0; possible_map = ~0ULL; - smp_call_function(&possible_map, get_online_map, &omap); + hax_smp_call_function(&possible_map, get_online_map, &omap); printf("possible map %llx cpu_online_map %llx\n", possible_map, omap); cpu_online_map = omap; max_cpus = real_ncpus; diff --git a/platforms/darwin/hax_wrapper.cpp b/platforms/darwin/hax_wrapper.cpp index 4edcc78a..c28a7509 100644 --- a/platforms/darwin/hax_wrapper.cpp +++ b/platforms/darwin/hax_wrapper.cpp @@ -66,18 +66,16 @@ extern "C" int hax_log_level(int level, const char *fmt, ...) struct smp_call_parameter { void (*func)(void *); void *param; - cpumap_t *cpus; + hax_cpumap_t *cpus; }; extern "C" void mp_rendezvous_no_intrs(void (*action_func)(void *), void *arg); -extern "C" int cpu_number(void); - void smp_cfunction(void *param) { int cpu_id; void (*action)(void *parap); - cpumap_t *hax_cpus; + hax_cpumap_t *hax_cpus; struct smp_call_parameter *p; p = (struct smp_call_parameter *)param; @@ -89,7 +87,7 @@ void smp_cfunction(void *param) action(p->param); } -extern "C" int smp_call_function(cpumap_t *cpus, void (*scfunc)(void *), +extern "C" int hax_smp_call_function(hax_cpumap_t *cpus, void (*scfunc)(void *), void *param) { struct smp_call_parameter sp; diff --git a/platforms/linux/.gitignore b/platforms/linux/.gitignore new file mode 100644 index 00000000..e6fa4e1a --- /dev/null +++ b/platforms/linux/.gitignore @@ -0,0 +1,7 @@ +# Build +*.ko +*.mod.c +.tmp_versions +.cache.mk +modules.order +Module.symvers diff --git a/platforms/linux/Kbuild b/platforms/linux/Kbuild new file mode 100644 index 00000000..e1d5f1a7 --- /dev/null +++ b/platforms/linux/Kbuild @@ -0,0 +1,37 @@ +ccflags-y += -Wno-unused-function +obj-m := haxm.o + +# haxm +haxm-y += ../../core/chunk.o +haxm-y += ../../core/cpu.o +haxm-y += ../../core/cpuid.o +haxm-y += ../../core/dump.o +haxm-y += ../../core/emulate.o +haxm-y += ../../core/emulate_ops.o +haxm-y += ../../core/ept.o +haxm-y += ../../core/ept2.o +haxm-y += ../../core/ept_tree.o +haxm-y += ../../core/gpa_space.o +haxm-y += ../../core/hax.o +haxm-y += ../../core/ia32.o +haxm-y += ../../core/ia32_ops.o +haxm-y += ../../core/intr_exc.o +haxm-y += ../../core/memory.o +haxm-y += ../../core/memslot.o +haxm-y += ../../core/name.o +haxm-y += ../../core/page_walker.o +haxm-y += ../../core/ramblock.o +haxm-y += ../../core/vcpu.o +haxm-y += ../../core/vm.o +haxm-y += ../../core/vmx.o +haxm-y += ../../core/vmx_ops.o +haxm-y += ../../core/vtlb.o + +# haxm-linux +haxm-y += components.o +haxm-y += hax_entry.o +haxm-y += hax_event.o +haxm-y += hax_host_mem.o +haxm-y += hax_mem_alloc.o +haxm-y += hax_mm.o +haxm-y += hax_wrapper.o diff --git a/platforms/linux/Makefile b/platforms/linux/Makefile new file mode 100644 index 00000000..de8f1f5d --- /dev/null +++ b/platforms/linux/Makefile @@ -0,0 +1,45 @@ +KVER ?= $(shell uname -r) +KDIR ?= /lib/modules/$(KVER)/build + +MACHINE := $(shell uname -m) +ifeq ($(MACHINE),x86_64) + NASM_OPTS := -f elf64 +else + NASM_OPTS := -f elf32 +endif + +HAXM_DIR := ../../core +HAXM_ASM_SRC := $(wildcard $(HAXM_DIR)/*.asm) +HAXM_ASM_OBJ := $(patsubst $(HAXM_DIR)/%.asm, $(HAXM_DIR)/%.o, $(HAXM_ASM_SRC)) +HAXM_ASM_CMD := $(patsubst $(HAXM_DIR)/%.asm, $(HAXM_DIR)/.%.o.cmd, $(HAXM_ASM_SRC)) +HAXM_C_SRC := $(wildcard $(HAXM_DIR)/*.c) +HAXM_C_OBJ := $(patsubst $(HAXM_DIR)/%.c, $(HAXM_DIR)/%.o, $(HAXM_C_SRC)) +HAXM_C_CMD := $(patsubst $(HAXM_DIR)/%.c, $(HAXM_DIR)/.%.o.cmd, $(HAXM_C_SRC)) + +.PHONY: all modules modules_install install uninstall clean + +%.o: %.asm + $(eval CMD := $(join $(dir $@), .$(notdir $@).cmd)) + nasm $(NASM_OPTS) -o $@ $^ + echo "" > $(CMD) + +all: modules + +modules: $(HAXM_ASM_OBJ) + $(MAKE) -C $(KDIR) M=$$PWD modules + +modules_install: + $(MAKE) -C $(KDIR) M=$$PWD modules_install + +install: modules_install + ./haxm-install.sh + +uninstall: + ./haxm-uninstall.sh + +clean: + $(MAKE) -C $(KDIR) M=$$PWD clean + rm -f $(HAXM_ASM_OBJ) + rm -f $(HAXM_ASM_CMD) + rm -f $(HAXM_C_OBJ) + rm -f $(HAXM_C_CMD) diff --git a/platforms/linux/components.c b/platforms/linux/components.c new file mode 100644 index 00000000..c6bf7ba8 --- /dev/null +++ b/platforms/linux/components.c @@ -0,0 +1,617 @@ +/* + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "../../core/include/hax_core_interface.h" + +#define HAX_VM_DEVFS_FMT "hax_vm/vm%02d" +#define HAX_VCPU_DEVFS_FMT "hax_vm%02d/vcpu%02d" + +typedef struct hax_vm_linux_t { + struct vm_t *cvm; + int id; + struct miscdevice dev; + char *devname; +} hax_vm_linux_t; + +typedef struct hax_vcpu_linux_t { + struct vcpu_t *cvcpu; + struct hax_vm_linux_t *vm; + int id; + struct miscdevice dev; + char *devname; +} hax_vcpu_linux_t; + +static int hax_vm_open(struct inode *inodep, struct file *filep); +static int hax_vm_release(struct inode *inodep, struct file *filep); +static long hax_vm_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + +static int hax_vcpu_open(struct inode *inodep, struct file *filep); +static int hax_vcpu_release(struct inode *inodep, struct file *filep); +static long hax_vcpu_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + +static struct file_operations hax_vm_fops = { + .owner = THIS_MODULE, + .open = hax_vm_open, + .release = hax_vm_release, + .unlocked_ioctl = hax_vm_ioctl, + .compat_ioctl = hax_vm_ioctl, +}; + +static struct file_operations hax_vcpu_fops = { + .owner = THIS_MODULE, + .open = hax_vcpu_open, + .release = hax_vcpu_release, + .unlocked_ioctl = hax_vcpu_ioctl, + .compat_ioctl = hax_vcpu_ioctl, +}; + +/* Component management */ + +static void hax_component_perm(const char *devname, struct miscdevice *misc) +{ + int err; + struct path path; + struct inode *inode; + const struct cred *cred; + char devpath[DM_NAME_LEN]; + + if (!misc || !misc->this_device) + return; + + snprintf(devpath, sizeof(devpath), "/dev/%s", devname); + err = kern_path(devpath, LOOKUP_FOLLOW, &path); + if (err || !path.dentry) { + hax_error("Could not obtain device inode\n"); + return; + } + cred = get_current_cred(); + inode = path.dentry->d_inode; + inode->i_uid.val = cred->uid.val; + inode->i_gid.val = cred->gid.val; + inode->i_mode |= 0660; +} + +static hax_vcpu_linux_t* hax_vcpu_create_linux(struct vcpu_t *cvcpu, + hax_vm_linux_t *vm, int vcpu_id) +{ + hax_vcpu_linux_t *vcpu; + + if (!cvcpu || !vm) + return NULL; + + vcpu = kmalloc(sizeof(hax_vcpu_linux_t), GFP_KERNEL); + if (!vcpu) + return NULL; + + memset(vcpu, 0, sizeof(hax_vcpu_linux_t)); + vcpu->cvcpu = cvcpu; + vcpu->id = vcpu_id; + vcpu->vm = vm; + set_vcpu_host(cvcpu, vcpu); + return vcpu; +} + +static void hax_vcpu_destroy_linux(hax_vcpu_linux_t *vcpu) +{ + struct vcpu_t *cvcpu; + + if (!vcpu) + return; + + cvcpu = vcpu->cvcpu; + hax_vcpu_destroy_hax_tunnel(cvcpu); + set_vcpu_host(cvcpu, NULL); + vcpu->cvcpu = NULL; + kfree(vcpu); +} + +int hax_vcpu_create_host(struct vcpu_t *cvcpu, void *vm_host, int vm_id, + int vcpu_id) +{ + int err; + hax_vcpu_linux_t *vcpu; + hax_vm_linux_t *vm; + + vm = (hax_vm_linux_t *)vm_host; + vcpu = hax_vcpu_create_linux(cvcpu, vm, vcpu_id); + if (!vcpu) + return -1; + + vcpu->devname = kmalloc(DM_NAME_LEN, GFP_KERNEL); + snprintf(vcpu->devname, DM_NAME_LEN, HAX_VCPU_DEVFS_FMT, vm_id, vcpu_id); + vcpu->dev.minor = MISC_DYNAMIC_MINOR; + vcpu->dev.name = vcpu->devname; + vcpu->dev.fops = &hax_vcpu_fops; + + err = misc_register(&vcpu->dev); + if (err) { + hax_error("Failed to register HAXM-VCPU device\n"); + hax_vcpu_destroy_linux(vcpu); + return -1; + } + hax_component_perm(vcpu->devname, &vcpu->dev); + hax_info("Created HAXM-VCPU device with minor=%d\n", vcpu->dev.minor); + return 0; +} + +int hax_vcpu_destroy_host(struct vcpu_t *cvcpu, void *vcpu_host) +{ + hax_vcpu_linux_t *vcpu; + + vcpu = (hax_vcpu_linux_t *)vcpu_host; + misc_deregister(&vcpu->dev); + kfree(vcpu->devname); + + hax_vcpu_destroy_linux(vcpu); + return 0; +} + +static hax_vm_linux_t *hax_vm_create_linux(struct vm_t *cvm, int vm_id) +{ + hax_vm_linux_t *vm; + + if (!cvm) + return NULL; + + vm = kmalloc(sizeof(hax_vm_linux_t), GFP_KERNEL); + if (!vm) + return NULL; + + memset(vm, 0, sizeof(hax_vm_linux_t)); + vm->cvm = cvm; + vm->id = vm_id; + set_vm_host(cvm, vm); + return vm; +} + +static void hax_vm_destroy_linux(hax_vm_linux_t *vm) +{ + struct vm_t *cvm; + + if (!vm) + return; + + cvm = vm->cvm; + set_vm_host(cvm, NULL); + vm->cvm = NULL; + hax_vm_free_all_ram(cvm); + kfree(vm); +} + +int hax_vm_create_host(struct vm_t *cvm, int vm_id) +{ + int err; + hax_vm_linux_t *vm; + + vm = hax_vm_create_linux(cvm, vm_id); + if (!vm) + return -1; + + vm->devname = kmalloc(DM_NAME_LEN, GFP_KERNEL); + snprintf(vm->devname, DM_NAME_LEN, HAX_VM_DEVFS_FMT, vm_id); + vm->dev.minor = MISC_DYNAMIC_MINOR; + vm->dev.name = vm->devname; + vm->dev.fops = &hax_vm_fops; + + err = misc_register(&vm->dev); + if (err) { + hax_error("Failed to register HAXM-VM device\n"); + hax_vm_destroy_linux(vm); + return -1; + } + hax_component_perm(vm->devname, &vm->dev); + hax_info("Created HAXM-VM device with minor=%d\n", vm->dev.minor); + return 0; +} + +/* When coming here, all vcpus should have been destroyed already. */ +int hax_vm_destroy_host(struct vm_t *cvm, void *vm_host) +{ + hax_vm_linux_t *vm; + + vm = (hax_vm_linux_t *)vm_host; + misc_deregister(&vm->dev); + kfree(vm->devname); + + hax_vm_destroy_linux(vm); + return 0; +} + +/* No corresponding function in Linux side, it can be cleaned later. */ +int hax_destroy_host_interface(void) +{ + return 0; +} + +/* VCPU operations */ + +static int hax_vcpu_open(struct inode *inodep, struct file *filep) +{ + int ret; + struct vcpu_t *cvcpu; + struct hax_vcpu_linux_t *vcpu; + struct miscdevice *miscdev; + + miscdev = filep->private_data; + vcpu = container_of(miscdev, struct hax_vcpu_linux_t, dev); + cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1); + + hax_log_level(HAX_LOGD, "HAX vcpu open called\n"); + if (!cvcpu) + return -ENODEV; + + ret = hax_vcpu_core_open(cvcpu); + if (ret) + hax_error("Failed to open core vcpu\n"); + hax_put_vcpu(cvcpu); + return ret; +} + +static int hax_vcpu_release(struct inode *inodep, struct file *filep) +{ + int ret = 0; + struct vcpu_t *cvcpu; + struct hax_vcpu_linux_t *vcpu; + struct miscdevice *miscdev; + + miscdev = filep->private_data; + vcpu = container_of(miscdev, struct hax_vcpu_linux_t, dev); + cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1); + + hax_log_level(HAX_LOGD, "HAX vcpu close called\n"); + if (!cvcpu) { + hax_error("Failed to find the vcpu, is it closed already?\n"); + return 0; + } + + /* put the one for vcpu create */ + hax_put_vcpu(cvcpu); + /* put the one just held */ + hax_put_vcpu(cvcpu); + return ret; +} + +static long hax_vcpu_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + void *argp = (void *)arg; + struct vcpu_t *cvcpu; + struct hax_vcpu_linux_t *vcpu; + struct miscdevice *miscdev; + + miscdev = filp->private_data; + vcpu = container_of(miscdev, struct hax_vcpu_linux_t, dev); + cvcpu = hax_get_vcpu(vcpu->vm->id, vcpu->id, 1); + if (!cvcpu) + return -ENODEV; + + switch (cmd) { + case HAX_VCPU_IOCTL_RUN: + ret = vcpu_execute(cvcpu); + break; + case HAX_VCPU_IOCTL_SETUP_TUNNEL: { + struct hax_tunnel_info info; + ret = hax_vcpu_setup_hax_tunnel(cvcpu, &info); + if (copy_to_user(argp, &info, sizeof(info))) { + ret = -EFAULT; + break; + } + break; + } + case HAX_VCPU_IOCTL_SET_MSRS: { + struct hax_msr_data msrs; + struct vmx_msr *msr; + int i, fail; + + if (copy_from_user(&msrs, argp, sizeof(msrs))) { + ret = -EFAULT; + break; + } + msr = msrs.entries; + /* nr_msr needs to be verified */ + if (msrs.nr_msr >= 0x20) { + hax_error("MSRS invalid!\n"); + ret = -EFAULT; + break; + } + for (i = 0; i < msrs.nr_msr; i++, msr++) { + fail = vcpu_set_msr(cvcpu, msr->entry, msr->value); + if (fail) { + break; + } + } + msrs.done = i; + break; + } + case HAX_VCPU_IOCTL_GET_MSRS: { + struct hax_msr_data msrs; + struct vmx_msr *msr; + int i, fail; + + if (copy_from_user(&msrs, argp, sizeof(msrs))) { + ret = -EFAULT; + break; + } + msr = msrs.entries; + if(msrs.nr_msr >= 0x20) { + hax_error("MSRS invalid!\n"); + ret = -EFAULT; + break; + } + for (i = 0; i < msrs.nr_msr; i++, msr++) { + fail = vcpu_get_msr(cvcpu, msr->entry, &msr->value); + if (fail) { + break; + } + } + msrs.done = i; + if (copy_to_user(argp, &msrs, sizeof(msrs))) { + ret = -EFAULT; + break; + } + break; + } + case HAX_VCPU_IOCTL_SET_FPU: { + struct fx_layout fl; + if (copy_from_user(&fl, argp, sizeof(fl))) { + ret = -EFAULT; + break; + } + ret = vcpu_put_fpu(cvcpu, &fl); + break; + } + case HAX_VCPU_IOCTL_GET_FPU: { + struct fx_layout fl; + ret = vcpu_get_fpu(cvcpu, &fl); + if (copy_to_user(argp, &fl, sizeof(fl))) { + ret = -EFAULT; + break; + } + break; + } + case HAX_VCPU_SET_REGS: { + struct vcpu_state_t vc_state; + if (copy_from_user(&vc_state, argp, sizeof(vc_state))) { + ret = -EFAULT; + break; + } + ret = vcpu_set_regs(cvcpu, &vc_state); + break; + } + case HAX_VCPU_GET_REGS: { + struct vcpu_state_t vc_state; + ret = vcpu_get_regs(cvcpu, &vc_state); + if (copy_to_user(argp, &vc_state, sizeof(vc_state))) { + ret = -EFAULT; + break; + } + break; + } + case HAX_VCPU_IOCTL_INTERRUPT: { + uint8_t vector; + if (copy_from_user(&vector, argp, sizeof(vector))) { + ret = -EFAULT; + break; + } + vcpu_interrupt(cvcpu, vector); + break; + } + case HAX_IOCTL_VCPU_DEBUG: { + struct hax_debug_t hax_debug; + if (copy_from_user(&hax_debug, argp, sizeof(hax_debug))) { + ret = -EFAULT; + break; + } + vcpu_debug(cvcpu, &hax_debug); + break; + } + default: + // TODO: Print information about the process that sent the ioctl. + hax_error("Unknown VCPU IOCTL 0x%lx\n", cmd); + ret = -ENOSYS; + break; + } + hax_put_vcpu(cvcpu); + return ret; +} + +/* VM operations */ + +static int hax_vm_open(struct inode *inodep, struct file *filep) +{ + int ret; + struct vm_t *cvm; + struct hax_vm_linux_t *vm; + struct miscdevice *miscdev; + + miscdev = filep->private_data; + vm = container_of(miscdev, struct hax_vm_linux_t, dev); + cvm = hax_get_vm(vm->id, 1); + if (!cvm) + return -ENODEV; + + ret = hax_vm_core_open(cvm); + hax_put_vm(cvm); + hax_log_level(HAX_LOGI, "Open VM\n"); + return ret; +} + +static int hax_vm_release(struct inode *inodep, struct file *filep) +{ + struct vm_t *cvm; + struct hax_vm_linux_t *vm; + struct miscdevice *miscdev; + + miscdev = filep->private_data; + vm = container_of(miscdev, struct hax_vm_linux_t, dev); + cvm = hax_get_vm(vm->id, 1); + + hax_log_level(HAX_LOGI, "Close VM\n"); + if (cvm) { + /* put the ref get just now */ + hax_put_vm(cvm); + hax_put_vm(cvm); + } + return 0; +} + +static long hax_vm_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + void *argp = (void *)arg; + struct vm_t *cvm; + struct hax_vm_linux_t *vm; + struct miscdevice *miscdev; + + miscdev = filp->private_data; + vm = container_of(miscdev, struct hax_vm_linux_t, dev); + cvm = hax_get_vm(vm->id, 1); + if (!cvm) + return -ENODEV; + + switch (cmd) { + case HAX_VM_IOCTL_VCPU_CREATE: + case HAX_VM_IOCTL_VCPU_CREATE_ORIG: { + uint32_t vcpu_id, vm_id; + struct vcpu_t *cvcpu; + + vm_id = vm->id; + if (copy_from_user(&vcpu_id, argp, sizeof(vcpu_id))) { + ret = -EFAULT; + break; + } + cvcpu = vcpu_create(cvm, vm, vcpu_id); + if (!cvcpu) { + hax_error("Failed to create vcpu %x on vm %x\n", vcpu_id, vm_id); + ret = -EINVAL; + break; + } + break; + } + case HAX_VM_IOCTL_ALLOC_RAM: { + struct hax_alloc_ram_info info; + if (copy_from_user(&info, argp, sizeof(info))) { + ret = -EFAULT; + break; + } + hax_info("IOCTL_ALLOC_RAM: vm_id=%d, va=0x%llx, size=0x%x, pad=0x%x\n", + vm->id, info.va, info.size, info.pad); + ret = hax_vm_add_ramblock(cvm, info.va, info.size); + break; + } + case HAX_VM_IOCTL_ADD_RAMBLOCK: { + struct hax_ramblock_info info; + if (copy_from_user(&info, argp, sizeof(info))) { + ret = -EFAULT; + break; + } + if (info.reserved) { + hax_error("IOCTL_ADD_RAMBLOCK: vm_id=%d, reserved=0x%llx\n", + vm->id, info.reserved); + ret = -EINVAL; + break; + } + hax_info("IOCTL_ADD_RAMBLOCK: vm_id=%d, start_va=0x%llx, size=0x%llx\n", + vm->id, info.start_va, info.size); + ret = hax_vm_add_ramblock(cvm, info.start_va, info.size); + break; + } + case HAX_VM_IOCTL_SET_RAM: { + struct hax_set_ram_info info; + if (copy_from_user(&info, argp, sizeof(info))) { + ret = -EFAULT; + break; + } + ret = hax_vm_set_ram(cvm, &info); + break; + } +#ifdef CONFIG_HAX_EPT2 + case HAX_VM_IOCTL_SET_RAM2: { + struct hax_set_ram_info2 info; + if (copy_from_user(&info, argp, sizeof(info))) { + ret = -EFAULT; + break; + } + if (info.reserved1 || info.reserved2) { + hax_error("IOCTL_SET_RAM2: vm_id=%d, reserved1=0x%x reserved2=0x%llx\n", + vm->id, info.reserved1, info.reserved2); + ret = -EINVAL; + break; + } + ret = hax_vm_set_ram2(cvm, &info); + break; + } + case HAX_VM_IOCTL_PROTECT_RAM: { + struct hax_protect_ram_info info; + if (copy_from_user(&info, argp, sizeof(info))) { + ret = -EFAULT; + break; + } + if (info.reserved) { + hax_error("IOCTL_PROTECT_RAM: vm_id=%d, reserved=0x%x\n", + vm->id, info.reserved); + ret = -EINVAL; + break; + } + ret = hax_vm_protect_ram(cvm, &info); + break; + } +#endif + case HAX_VM_IOCTL_NOTIFY_QEMU_VERSION: { + struct hax_qemu_version info; + if (copy_from_user(&info, argp, sizeof(info))) { + ret = -EFAULT; + break; + } + // TODO: Print information about the process that sent the ioctl. + ret = hax_vm_set_qemuversion(cvm, &info); + break; + } + default: + // TODO: Print information about the process that sent the ioctl. + hax_error("Unknown VM IOCTL 0x%lx\n", cmd); + break; + } + hax_put_vm(cvm); + return ret; +} diff --git a/platforms/linux/hax_entry.c b/platforms/linux/hax_entry.c new file mode 100644 index 00000000..893e64f4 --- /dev/null +++ b/platforms/linux/hax_entry.c @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include "../../include/hax.h" +#include "../../include/hax_interface.h" +#include "../../include/hax_release_ver.h" +#include "../../core/include/hax_core_interface.h" + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Kryptos Logic"); +MODULE_DESCRIPTION("Hypervisor that provides x86 virtualization on Intel VT-x compatible CPUs."); +MODULE_VERSION(HAXM_RELEASE_VERSION_STR); + +#define HAX_DEVICE_NAME "HAX" + +static long hax_dev_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + +static struct file_operations hax_dev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = hax_dev_ioctl, + .compat_ioctl = hax_dev_ioctl, +}; + +static struct miscdevice hax_dev = { + MISC_DYNAMIC_MINOR, + HAX_DEVICE_NAME, + &hax_dev_fops, +}; + +static long hax_dev_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + void *argp = (void *)arg; + + switch (cmd) { + case HAX_IOCTL_VERSION: { + struct hax_module_version version = {}; + version.cur_version = HAX_CUR_VERSION; + version.compat_version = HAX_COMPAT_VERSION; + if (copy_to_user(argp, &version, sizeof(version))) + return -EFAULT; + break; + } + case HAX_IOCTL_CAPABILITY: { + struct hax_capabilityinfo capab = {}; + hax_get_capability(&capab, sizeof(capab), NULL); + if (copy_to_user(argp, &capab, sizeof(capab))) + return -EFAULT; + break; + } + case HAX_IOCTL_SET_MEMLIMIT: { + struct hax_set_memlimit memlimit = {}; + if (copy_from_user(&memlimit, argp, sizeof(memlimit))) + return -EFAULT; + ret = hax_set_memlimit(&memlimit, sizeof(memlimit), NULL); + break; + } + case HAX_IOCTL_CREATE_VM: { + int vm_id; + struct vm_t *cvm; + + cvm = hax_create_vm(&vm_id); + if (!cvm) { + hax_log_level(HAX_LOGE, "Failed to create the HAX VM\n"); + ret = -ENOMEM; + break; + } + + if (copy_to_user(argp, &vm_id, sizeof(vm_id))) + return -EFAULT; + break; + } + default: + break; + } + return ret; +} + +static int __init hax_driver_init(void) +{ + int i, err; + + // Initialization + max_cpus = num_present_cpus(); + cpu_online_map = 0; + for (i = 0; i < max_cpus; i++) { + if (cpu_online(i)) + cpu_online_map |= (1ULL << i); + } + + if (hax_module_init() < 0) { + hax_error("Failed to initialize HAXM module\n"); + return -EAGAIN; + } + + err = misc_register(&hax_dev); + if (err) { + hax_error("Failed to register HAXM device\n"); + hax_module_exit(); + return err; + } + + hax_info("Created HAXM device with minor=%d\n", hax_dev.minor); + return 0; +} + +static void __exit hax_driver_exit(void) +{ + if (hax_module_exit() < 0) { + hax_error("Failed to finalize HAXM module\n"); + } + + misc_deregister(&hax_dev); + hax_info("Removed HAXM device\n"); +} + +module_init(hax_driver_init); +module_exit(hax_driver_exit); diff --git a/platforms/linux/hax_event.c b/platforms/linux/hax_event.c new file mode 100644 index 00000000..d5a66eba --- /dev/null +++ b/platforms/linux/hax_event.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "../../include/hax.h" + +int hax_notify_host_event(enum hax_notify_event event, uint32_t *param, + uint32_t size) +{ + return 0; +} diff --git a/platforms/linux/hax_host_mem.c b/platforms/linux/hax_host_mem.c new file mode 100644 index 00000000..71e689b8 --- /dev/null +++ b/platforms/linux/hax_host_mem.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "../../include/hax_host_mem.h" +#include "../../core/include/paging.h" + +#include +#include +#include +#include +#include + +int hax_pin_user_pages(uint64_t start_uva, uint64_t size, hax_memdesc_user *memdesc) +{ + int nr_pages; + int nr_pages_pinned; + struct page **pages; + + if (start_uva & ~PAGE_MASK) + return -EINVAL; + if (size & ~PAGE_MASK) + return -EINVAL; + if (!size) + return -EINVAL; + + nr_pages = ((size - 1) / PAGE_SIZE) + 1; + pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_KERNEL); + if (!pages) + return -ENOMEM; + + nr_pages_pinned = get_user_pages_fast(start_uva, nr_pages, 1, pages); + if (nr_pages_pinned < 0) { + kfree(pages); + return -EFAULT; + } + memdesc->nr_pages = nr_pages_pinned; + memdesc->pages = pages; + return 0; +} + +int hax_unpin_user_pages(hax_memdesc_user *memdesc) +{ + if (!memdesc) + return -EINVAL; + if (!memdesc->pages) + return -EINVAL; + +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4,15,0) + release_pages(memdesc->pages, memdesc->nr_pages, 1); +#else + release_pages(memdesc->pages, memdesc->nr_pages); +#endif + return 0; +} + +uint64_t hax_get_pfn_user(hax_memdesc_user *memdesc, uint64_t uva_offset) +{ + int page_idx; + + page_idx = uva_offset / PAGE_SIZE; + if (page_idx >= memdesc->nr_pages) + return -EINVAL; + + return page_to_pfn(memdesc->pages[page_idx]); +} + +void * hax_map_user_pages(hax_memdesc_user *memdesc, uint64_t uva_offset, + uint64_t size, hax_kmap_user *kmap) +{ + void *kva; + int page_idx_start; + int page_idx_stop; + int subrange_pages_nr; + struct page **subrange_pages; + + if (!memdesc || !kmap || size == 0) + return NULL; + + page_idx_start = uva_offset / PAGE_SIZE; + page_idx_stop = (uva_offset + size - 1) / PAGE_SIZE; + if ((page_idx_start >= memdesc->nr_pages) || + (page_idx_stop >= memdesc->nr_pages)) + return NULL; + + subrange_pages_nr = page_idx_stop - page_idx_start + 1; + subrange_pages = &memdesc->pages[page_idx_start]; + kva = vmap(subrange_pages, subrange_pages_nr, VM_MAP, PAGE_KERNEL); + kmap->kva = kva; + return kva; +} + +int hax_unmap_user_pages(hax_kmap_user *kmap) +{ + if (!kmap) + return -EINVAL; + + vunmap(kmap->kva); + return 0; +} + +int hax_alloc_page_frame(uint8_t flags, hax_memdesc_phys *memdesc) +{ + gfp_t gfp_flags; + + if (!memdesc) + return -EINVAL; + + gfp_flags = GFP_KERNEL; + if (flags & HAX_PAGE_ALLOC_ZEROED) + gfp_flags |= __GFP_ZERO; + + // TODO: Support HAX_PAGE_ALLOC_BELOW_4G + if (flags & HAX_PAGE_ALLOC_BELOW_4G) { + hax_warning("%s: HAX_PAGE_ALLOC_BELOW_4G is ignored\n", __func__); + } + + memdesc->ppage = alloc_page(gfp_flags); + if (!memdesc->ppage) + return -ENOMEM; + return 0; +} + +int hax_free_page_frame(hax_memdesc_phys *memdesc) +{ + if (!memdesc || !memdesc->ppage) + return -EINVAL; + + free_page((unsigned long)page_address(memdesc->ppage)); + return 0; +} + +uint64_t hax_get_pfn_phys(hax_memdesc_phys *memdesc) +{ + if (!memdesc || !memdesc->ppage) + return INVALID_PFN; + + return page_to_pfn(memdesc->ppage); +} + +void * hax_get_kva_phys(hax_memdesc_phys *memdesc) +{ + if (!memdesc || !memdesc->ppage) + return NULL; + + return page_address(memdesc->ppage); +} + +void * hax_map_page_frame(uint64_t pfn, hax_kmap_phys *kmap) +{ + void *kva; + struct page *ppage; + + if (!kmap) + return NULL; + + ppage = pfn_to_page(pfn); + kva = vmap(&ppage, 1, VM_MAP, PAGE_KERNEL); + kmap->kva = kva; + return kva; +} + +int hax_unmap_page_frame(hax_kmap_phys *kmap) +{ + if (!kmap) + return -EINVAL; + + vfree(kmap->kva); + return 0; +} diff --git a/platforms/linux/hax_mem_alloc.c b/platforms/linux/hax_mem_alloc.c new file mode 100644 index 00000000..144fa9da --- /dev/null +++ b/platforms/linux/hax_mem_alloc.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "../../include/hax.h" + +#include +#include + +void * hax_vmalloc(uint32_t size, uint32_t flags) +{ + void *ptr; + (void)flags; + + if (size == 0) + return NULL; + + // NOTE: Flags ignored. Linux allows only non-pageable memory in kernel. + ptr = kzalloc(size, GFP_KERNEL); + return ptr; +} + +void hax_vfree_flags(void *va, uint32_t size, uint32_t flags) +{ + (void)size; + (void)flags; + + // NOTE: Flags ignored. Linux allows only non-pageable memory in kernel. + kfree(va); +} + +void hax_vfree(void *va, uint32_t size) +{ + hax_vfree_flags(va, size, 0); +} + +void hax_vfree_aligned(void *va, uint32_t size, uint32_t alignment, + uint32_t flags) +{ + hax_vfree_flags(va, size, flags); +} + +void * hax_vmap(hax_pa_t pa, uint32_t size) +{ + return ioremap(pa, size); +} + +void hax_vunmap(void *addr, uint32_t size) +{ + return iounmap(addr); +} + +hax_pa_t hax_pa(void *va) +{ + return virt_to_phys(va); +} + +struct hax_page * hax_alloc_pages(int order, uint32_t flags, bool vmap) +{ + struct hax_page *ppage; + struct page *page; + gfp_t gfp_mask; + + ppage = kmalloc(sizeof(struct hax_page), GFP_KERNEL); + if (!ppage) + return NULL; + + gfp_mask = GFP_KERNEL; + // TODO: Support HAX_MEM_LOW_4G + if (flags & HAX_MEM_LOW_4G) { + hax_warning("%s: HAX_MEM_LOW_4G is ignored\n", __func__); + } + + page = alloc_pages(GFP_KERNEL, order); + if (!page) { + kfree(ppage); + return NULL; + } + + ppage->page = page; + ppage->pa = page_to_phys(page); + ppage->kva = page_address(page); + ppage->flags = flags; + ppage->order = order; + return ppage; +} + +void hax_free_pages(struct hax_page *pages) +{ + if (!pages) + return; + + free_pages((unsigned long)pages->kva, pages->order); +} + +void * hax_map_page(struct hax_page *page) +{ + if (!page) + return NULL; + + return page->kva; +} + +void hax_unmap_page(struct hax_page *page) +{ + return; +} + +hax_pfn_t hax_page2pfn(struct hax_page *page) +{ + if (!page) + return 0; + + return page->pa >> PAGE_SHIFT; +} + +void hax_clear_page(struct hax_page *page) +{ + memset((void *)page->kva, 0, PAGE_SIZE); +} + +void hax_set_page(struct hax_page *page) +{ + memset((void *)page->kva, 0xFF, PAGE_SIZE); +} + +/* Initialize memory allocation related structures */ +int hax_malloc_init(void) +{ + return 0; +} + +void hax_malloc_exit(void) +{ +} diff --git a/platforms/linux/hax_mm.c b/platforms/linux/hax_mm.c new file mode 100644 index 00000000..5af70280 --- /dev/null +++ b/platforms/linux/hax_mm.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "../../include/hax.h" + +#include +#include +#include +#include +#include +#include + +typedef struct hax_vcpu_mem_hinfo_t { + int flags; + int nr_pages; + struct page **pages; +} hax_vcpu_mem_hinfo_t; + +int hax_clear_vcpumem(struct hax_vcpu_mem *mem) +{ + struct hax_vcpu_mem_hinfo_t *hinfo; + + if (!mem) + return -EINVAL; + + hinfo = mem->hinfo; + vunmap(mem->kva); +#if LINUX_VERSION_CODE <= KERNEL_VERSION(4,15,0) + release_pages(hinfo->pages, hinfo->nr_pages, 1); +#else + release_pages(hinfo->pages, hinfo->nr_pages); +#endif + if (!(hinfo->flags & HAX_VCPUMEM_VALIDVA)) { + // TODO: This caused a kernel panic, now it just leaks memory. + //vm_munmap(mem->uva, mem->size); + } + kfree(hinfo->pages); + kfree(hinfo); + return 0; +} + +int hax_setup_vcpumem(struct hax_vcpu_mem *mem, uint64_t uva, uint32_t size, + int flags) +{ + int err = 0; + int nr_pages; + int nr_pages_map; + struct page **pages = NULL; + struct hax_vcpu_mem_hinfo_t *hinfo = NULL; + void *kva; + + if (!mem || !size) + return -EINVAL; + + hinfo = kmalloc(sizeof(struct hax_vcpu_mem_hinfo_t), GFP_KERNEL); + if (!hinfo) { + err = -ENOMEM; + goto fail; + } + + nr_pages = ((size - 1) / PAGE_SIZE) + 1; + pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_KERNEL); + if (!pages) { + err = -ENOMEM; + goto fail; + } + + if (!(flags & HAX_VCPUMEM_VALIDVA)) { + uva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, 0); + if (!uva) { + err = -ENOMEM; + goto fail; + } + } + nr_pages_map = get_user_pages_fast(uva, nr_pages, 1, pages); + if (nr_pages_map < 0) { + err = -EFAULT; + goto fail; + } + kva = vmap(pages, nr_pages_map, VM_MAP, PAGE_KERNEL); + + hinfo->flags = flags; + hinfo->pages = pages; + hinfo->nr_pages = nr_pages_map; + + mem->uva = uva; + mem->kva = kva; + mem->hinfo = hinfo; + mem->size = size; + return 0; + +fail: + kfree(pages); + kfree(hinfo); + return err; +} + +uint64_t hax_get_memory_threshold(void) +{ +#ifdef CONFIG_HAX_EPT2 + // Since there is no memory cap, just return a sufficiently large value + return 1ULL << 48; // PHYSADDR_MAX + 1 +#else // !CONFIG_HAX_EPT2 + return 0; +#endif // CONFIG_HAX_EPT2 +} diff --git a/platforms/linux/hax_wrapper.c b/platforms/linux/hax_wrapper.c new file mode 100644 index 00000000..3d60883b --- /dev/null +++ b/platforms/linux/hax_wrapper.c @@ -0,0 +1,337 @@ +/* + * Copyright (c) 2018 Kryptos Logic + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "../../include/hax.h" +#include "../../core/include/hax_core_interface.h" +#include "../../core/include/ia32.h" + +#include +#include +#include +#include +#include + +#include + +int default_hax_log_level = 3; +int max_cpus; +hax_cpumap_t cpu_online_map; + +int hax_log_level(int level, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (level < default_hax_log_level) + return 0; + + vaf.fmt = fmt; + vaf.va = &args; + va_start(args, fmt); + printk("%shaxm: %pV", KERN_ERR, &vaf); + va_end(args); + return 0; +} + +uint32_t hax_cpuid(void) +{ + return smp_processor_id(); +} + +typedef struct smp_call_parameter { + void (*func)(void *); + void *param; + hax_cpumap_t *cpus; +} smp_call_parameter; + +static void smp_cfunction(void *p) +{ + struct smp_call_parameter *info = p; + hax_cpumap_t *cpus; + uint32_t cpuid; + + cpus = info->cpus; + cpuid = hax_cpuid(); + if (*cpus & (0x1 << cpuid)) + info->func(info->param); +} + +int hax_smp_call_function(hax_cpumap_t *cpus, void (*scfunc)(void *), + void *param) +{ + smp_call_parameter info; + + info.func = scfunc; + info.param = param; + info.cpus = cpus; + on_each_cpu(smp_cfunction, &info, 1); + return 0; +} + +/* XXX */ +int proc_event_pending(struct vcpu_t *vcpu) +{ + return vcpu_event_pending(vcpu); +} + +void hax_disable_preemption(preempt_flag *eflags) +{ + preempt_disable(); +} + +void hax_enable_preemption(preempt_flag *eflags) +{ + preempt_enable(); +} + +void hax_enable_irq(void) +{ + asm_enable_irq(); +} + +void hax_disable_irq(void) +{ + asm_disable_irq(); +} + +void hax_error(char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (HAX_LOGE < default_hax_log_level) + return; + + vaf.fmt = fmt; + vaf.va = &args; + va_start(args, fmt); + printk("%shaxm_error: %pV", KERN_ERR, &vaf); + va_end(args); +} + +void hax_warning(char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (HAX_LOGW < default_hax_log_level) + return; + + vaf.fmt = fmt; + vaf.va = &args; + va_start(args, fmt); + printk("%shaxm_warning: %pV", KERN_WARNING, &vaf); + va_end(args); +} + +void hax_info(char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (HAX_LOGI < default_hax_log_level) + return; + + vaf.fmt = fmt; + vaf.va = &args; + va_start(args, fmt); + printk("%shaxm_info: %pV", KERN_INFO, &vaf); + va_end(args); +} + +void hax_debug(char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (HAX_LOGD < default_hax_log_level) + return; + + vaf.fmt = fmt; + vaf.va = &args; + va_start(args, fmt); + printk("%shaxm_debug: %pV", KERN_DEBUG, &vaf); + va_end(args); +} + +void hax_panic_vcpu(struct vcpu_t *v, char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + vaf.fmt = fmt; + vaf.va = &args; + va_start(args, fmt); + printk("%shaxm_panic: %pV", KERN_ERR, &vaf); + va_end(args); + vcpu_set_panic(v); +} + +void assert(bool condition) +{ + if (!condition) + BUG(); +} + +/* Misc */ +void hax_smp_mb(void) +{ + smp_mb(); +} + +/* Compare-Exchange */ +bool hax_cmpxchg32(uint32_t old_val, uint32_t new_val, volatile uint32_t *addr) +{ + uint64_t ret; + + ret = cmpxchg(addr, old_val, new_val); + if (ret == old_val) + return true; + else + return false; +} + +bool hax_cmpxchg64(uint64_t old_val, uint64_t new_val, volatile uint64_t *addr) +{ + uint64_t ret; + + ret = cmpxchg64(addr, old_val, new_val); + if (ret == old_val) + return true; + else + return false; +} + +/* Atomics */ +hax_atomic_t hax_atomic_add(volatile hax_atomic_t *atom, uint32_t value) +{ + return atomic_add_return(value, (atomic_t *)atom) - value; +} + +hax_atomic_t hax_atomic_inc(volatile hax_atomic_t *atom) +{ + return atomic_inc_return((atomic_t *)atom) - 1; +} + +hax_atomic_t hax_atomic_dec(volatile hax_atomic_t *atom) +{ + return atomic_dec_return((atomic_t *)atom) + 1; +} + +int hax_test_and_set_bit(int bit, uint64_t *memory) +{ + unsigned long *addr; + + addr = (unsigned long *)memory; + return test_and_set_bit(bit, addr); +} + +int hax_test_and_clear_bit(int bit, uint64_t *memory) +{ + unsigned long *addr; + + addr = (unsigned long *)memory; + return !test_and_clear_bit(bit, addr); +} + +/* Spinlock */ +struct hax_spinlock { + spinlock_t lock; +}; + +hax_spinlock *hax_spinlock_alloc_init(void) +{ + struct hax_spinlock *lock; + + lock = kmalloc(sizeof(struct hax_spinlock), GFP_KERNEL); + if (!lock) { + hax_error("Could not allocate spinlock\n"); + return NULL; + } + spin_lock_init(&lock->lock); + return lock; +} + +void hax_spinlock_free(hax_spinlock *lock) +{ + if (!lock) + return; + + kfree(lock); +} + +void hax_spin_lock(hax_spinlock *lock) +{ + spin_lock(&lock->lock); +} + +void hax_spin_unlock(hax_spinlock *lock) +{ + spin_unlock(&lock->lock); +} + +/* Mutex */ +hax_mutex hax_mutex_alloc_init(void) +{ + struct mutex *lock; + + lock = kmalloc(sizeof(struct mutex), GFP_KERNEL); + if (!lock) { + hax_error("Could not allocate mutex\n"); + return NULL; + } + mutex_init(lock); + return lock; +} + +void hax_mutex_lock(hax_mutex lock) +{ + if (!lock) + return; + + mutex_lock((struct mutex *)lock); +} + +void hax_mutex_unlock(hax_mutex lock) +{ + if (!lock) + return; + + mutex_unlock((struct mutex *)lock); +} + +void hax_mutex_free(hax_mutex lock) +{ + if (!lock) + return; + + mutex_destroy((struct mutex *)lock); + kfree(lock); +} diff --git a/platforms/linux/haxm-install.sh b/platforms/linux/haxm-install.sh new file mode 100755 index 00000000..9b6cf1a0 --- /dev/null +++ b/platforms/linux/haxm-install.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +DEVNAME=HAX +GRPNAME=haxm +MODNAME=haxm + +# Create group, if necessary +groupadd -f $GRPNAME + +# Create udev rule +echo "KERNEL==\"${DEVNAME}\", GROUP=\"${GRPNAME}\", MODE=\"0660\"" \ + > /lib/udev/rules.d/99-haxm.rules + +# Load kernel module +depmod -a +modprobe $MODNAME + +# Add to boot-time kernel module list, only once +sed -i "/^${MODNAME}$/d" /etc/modules +echo $MODNAME >> /etc/modules + +echo 'HAXM successfully installed' diff --git a/platforms/linux/haxm-uninstall.sh b/platforms/linux/haxm-uninstall.sh new file mode 100755 index 00000000..d5858a8f --- /dev/null +++ b/platforms/linux/haxm-uninstall.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +DEVNAME=HAX +GRPNAME=haxm +MODNAME=haxm + +# Remove udev rule +rm -f /lib/udev/rules.d/99-haxm.rules + +# Remove group, if necessary +if [ $(getent group $GRPNAME) ]; then + groupdel $GRPNAME +fi + +# Remove from boot-time kernel module list +sed -i "/^${MODNAME}$/d" /etc/modules + +# Unload kernel module +modprobe -r $MODNAME + +echo 'HAXM successfully uninstalled' diff --git a/platforms/windows/hax_entry.h b/platforms/windows/hax_entry.h index d7d0a1de..52613f7d 100644 --- a/platforms/windows/hax_entry.h +++ b/platforms/windows/hax_entry.h @@ -89,7 +89,7 @@ static inline smpc_dpc_exit(void) { return 1; } #endif /* According to DDK, the IoAllocateMdl can support at most - * 64M - page_size * (sizeof(MDL))/sizeof(ULONG_PTR), so take 32M here + * 64M - PAGE_SIZE * (sizeof(MDL))/sizeof(ULONG_PTR), so take 32M here */ #if (NTDDI_VERSION <= NTDDI_WS03) #define HAX_RAM_ENTRY_SIZE 0x2000000 diff --git a/platforms/windows/hax_wrapper.c b/platforms/windows/hax_wrapper.c index c62ffd93..ced9996f 100644 --- a/platforms/windows/hax_wrapper.c +++ b/platforms/windows/hax_wrapper.c @@ -33,7 +33,7 @@ int default_hax_log_level = 3; int max_cpus; -cpumap_t cpu_online_map; +hax_cpumap_t cpu_online_map; int hax_log_level(int level, const char *fmt, ...) { @@ -55,7 +55,7 @@ struct smp_call_parameter void (*func)(void *); void *param; /* Not used in DPC model*/ - cpumap_t *cpus; + hax_cpumap_t *cpus; }; #ifdef SMPC_DPCS @@ -70,12 +70,12 @@ void smp_cfunction_dpc( __in_opt PVOID SystemArgument1, __in_opt PVOID SystemArgument2) { - cpumap_t *done; + hax_cpumap_t *done; void (*action)(void *parap); struct smp_call_parameter *p; p = (struct smp_call_parameter *)SystemArgument2; - done = (cpumap_t*)SystemArgument1; + done = (hax_cpumap_t*)SystemArgument1; action = p->func; action(p->param); hax_test_and_set_bit(hax_cpuid(), (uint64_t*)done); @@ -84,17 +84,17 @@ void smp_cfunction_dpc( /* IPI function is not exported to in XP, we use DPC to trigger the smp * call function. However, as the DPC is not happen immediately, not * sure how to handle such situation. Currently simply delay - * The smp_call_function has to be synced, since we use global dpc, however, + * The hax_smp_call_function has to be synced, since we use global dpc, however, * we can't use spinlock here since spinlock will increase IRQL to DISPATCH * and cause potential deadloop. Another choice is to allocate the DPC in the - * smp_call_function instead of globla dpc. + * hax_smp_call_function instead of globla dpc. */ -int smp_call_function(cpumap_t *cpus, void (*scfunc)(void *), void * param) +int hax_smp_call_function(hax_cpumap_t *cpus, void (*scfunc)(void *), void * param) { int i, self; BOOLEAN result; struct _KDPC *cur_dpc; - cpumap_t done; + hax_cpumap_t done; struct smp_call_parameter *sp; KIRQL old_irql; LARGE_INTEGER delay; @@ -181,7 +181,7 @@ static ULONG_PTR smp_cfunction(ULONG_PTR param) { int cpu_id; void (*action)(void *parap) ; - cpumap_t *hax_cpus; + hax_cpumap_t *hax_cpus; struct smp_call_parameter *p; p = (struct smp_call_parameter *)param; @@ -192,7 +192,7 @@ static ULONG_PTR smp_cfunction(ULONG_PTR param) action(p->param); return (ULONG_PTR)NULL; } -int smp_call_function(cpumap_t *cpus, void (*scfunc)(void *), void * param) +int hax_smp_call_function(hax_cpumap_t *cpus, void (*scfunc)(void *), void * param) { struct smp_call_parameter sp; sp.func = scfunc;