diff --git a/src/injector_core/internal.rs b/src/injector_core/internal.rs index 27748b1..360586c 100644 --- a/src/injector_core/internal.rs +++ b/src/injector_core/internal.rs @@ -1,8 +1,14 @@ use crate::injector_core::common::*; -#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] use super::patch_trait::PatchTrait; +#[cfg(target_arch = "x86_64")] +use super::patch_amd64::PatchAmd64; +#[cfg(target_arch = "aarch64")] +use super::patch_arm64::PatchArm64; +#[cfg(target_arch = "arm")] +use super::patch_arm::PatchArm; + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] use super::thread_local_registry; @@ -19,10 +25,15 @@ impl WhenCalled { Self { func_ptr: func } } - /// Patches the target function so that it branches to a JIT block that uses an absolute jump - /// to call the target function. - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] + /// Patches the target function with a direct JMP to the replacement (0.4.0-style global patching). + /// All threads see the fake because the function's code bytes are overwritten. + /// Used by `when_called_globally()`. pub(crate) fn will_execute_guard(self, target: FuncPtrInternal) -> PatchGuard { + #[cfg(target_arch = "x86_64")] + { + PatchAmd64::replace_function_with_other_function(self.func_ptr, target) + } + #[cfg(target_arch = "aarch64")] { PatchArm64::replace_function_with_other_function(self.func_ptr, target) @@ -101,9 +112,14 @@ impl WhenCalled { ) } - /// Patches the target function so that it branches to a JIT block that returns the specified boolean. - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] + /// Patches the target function to return a fixed boolean via direct JMP (0.4.0-style). + /// All threads see the fake. Used by `when_called_globally().will_return_boolean()`. pub(crate) fn will_return_boolean_guard(self, value: bool) -> PatchGuard { + #[cfg(target_arch = "x86_64")] + { + PatchAmd64::replace_function_return_boolean(self.func_ptr, value) + } + #[cfg(target_arch = "aarch64")] { PatchArm64::replace_function_return_boolean(self.func_ptr, value) diff --git a/src/injector_core/patch_amd64.rs b/src/injector_core/patch_amd64.rs index d8a8d7a..c4355ca 100644 --- a/src/injector_core/patch_amd64.rs +++ b/src/injector_core/patch_amd64.rs @@ -76,23 +76,41 @@ fn generate_branch_to_target_function(ori_func: usize, target_func: usize) -> Ve } fn patch_and_guard(src: FuncPtrInternal, jit_memory: *mut u8, jit_size: usize) -> PatchGuard { - let func_addr = src.as_ptr() as usize; + // Resolve IAT thunks (jmp [rip+disp]) to the actual function address. + // This matches what thread_local_registry does, ensuring we patch the real + // function address — not the thunk — so the patch is visible to all call paths. + let func_addr = unsafe { resolve_to_real_function(src.as_ptr() as *mut u8) } as usize; let jit_addr = jit_memory as usize; let branch_code = generate_branch_to_target_function(func_addr, jit_addr); let patch_size = branch_code.len(); - let original_bytes = unsafe { read_bytes(src.as_ptr() as *mut u8, patch_size) }; + let original_bytes = unsafe { read_bytes(func_addr as *mut u8, patch_size) }; unsafe { - patch_function(src.as_ptr() as *mut u8, &branch_code); + patch_function(func_addr as *mut u8, &branch_code); } PatchGuard::new( - src.as_ptr() as *mut u8, + func_addr as *mut u8, original_bytes, patch_size, jit_memory, jit_size, ) } + +/// Resolve import thunks to the actual function address on Windows x86_64. +/// Extern functions go through an IAT thunk: `jmp [rip+disp32]` (FF 25 xx xx xx xx). +/// This follows the indirection to return the real function address. +unsafe fn resolve_to_real_function(func_addr: *mut u8) -> *mut u8 { + let code = std::slice::from_raw_parts(func_addr, 6); + if code[0] == 0xFF && code[1] == 0x25 { + let disp = i32::from_le_bytes([code[2], code[3], code[4], code[5]]); + let rip_after_insn = func_addr.add(6); + let iat_entry = rip_after_insn.offset(disp as isize) as *const *mut u8; + let real_addr = std::ptr::read(iat_entry); + return resolve_to_real_function(real_addr); + } + func_addr +} diff --git a/src/injector_core/thread_local_registry.rs b/src/injector_core/thread_local_registry.rs index 9c7b814..f244f48 100644 --- a/src/injector_core/thread_local_registry.rs +++ b/src/injector_core/thread_local_registry.rs @@ -141,14 +141,19 @@ impl Drop for ThreadRegistration { /// Called by the JIT dispatcher to get the target function pointer for the current thread. /// -/// If the current thread has a registered replacement for `method_key`, returns that. -/// Otherwise, returns `default_target` (the trampoline to the original function). +/// Returns the thread-local replacement if registered, otherwise falls back to +/// the default target (the trampoline to the original function). /// /// # Safety /// This function is called from JIT-generated code. It must not panic across the FFI boundary. pub(crate) extern "C" fn get_thread_target(method_key: usize, default_target: usize) -> usize { match std::panic::catch_unwind(AssertUnwindSafe(|| { - tls_get(&method_key, default_target) + let tls_result = tls_get(&method_key, 0); + if tls_result != 0 { + return tls_result; + } + + default_target })) { Ok(target) => target, Err(_) => default_target, diff --git a/src/interface/injector.rs b/src/interface/injector.rs index a1b656d..e578701 100644 --- a/src/interface/injector.rs +++ b/src/interface/injector.rs @@ -13,10 +13,11 @@ use std::pin::Pin; use std::task::Context; use std::task::Poll; -#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] use std::sync::Mutex; -#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] use std::sync::MutexGuard; +use std::sync::RwLock; +use std::sync::RwLockReadGuard; +use std::sync::RwLockWriteGuard; #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] use crate::injector_core::thread_local_registry::ThreadRegistration; @@ -29,14 +30,12 @@ fn normalize_signature(sig: &str) -> String { } /// A `Mutex` that never stays poisoned: on panic it just recovers the guard. -/// -/// Only used on non-x86_64 architectures where the global mutex approach is still used. -#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] +#[allow(dead_code)] struct NoPoisonMutex { inner: Mutex, } -#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] +#[allow(dead_code)] impl NoPoisonMutex { const fn new(value: T) -> Self { Self { @@ -52,39 +51,65 @@ impl NoPoisonMutex { } } +/// Global mutex used on non-TLS architectures to serialize all patching. #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] static LOCK_FUNCTION: NoPoisonMutex<()> = NoPoisonMutex::new(()); +/// RwLock for coordinating thread-local vs global fakes. +/// - `when_called()` (thread-local) acquires a **read** lock — multiple thread-local +/// tests can run in parallel. +/// - `when_called_globally()` acquires a **write** lock — blocks until all thread-local +/// tests finish, and prevents new tests from starting. This is exactly 0.4.0 behavior +/// for the duration of the global fake. +static GLOBAL_FAKE_LOCK: RwLock<()> = RwLock::new(()); + /// A high-level type that holds patch guards so that when it goes out of scope, /// the original function code is automatically restored. /// /// # Thread Safety /// -/// On x86_64 and aarch64, InjectorPP uses thread-local dispatch: each thread can -/// independently fake the same function to different values without interference. +/// On x86_64 and aarch64, InjectorPP uses thread-local dispatch by default: each thread +/// can independently fake the same function to different values without interference. /// Tests using InjectorPP can run in parallel. /// -/// On other architectures, InjectorPP ensures thread safety by holding a global mutex -/// for the entire lifetime of the patch. +/// Use `InjectorPP::new_global()` for 0.4.0-style global patching where fakes are visible +/// to all threads (e.g., when faked functions are called from background timer threads). +/// Global mode acquires an exclusive lock — other tests wait until the global injector drops. +/// +/// On other architectures, InjectorPP always uses global patching with a global mutex. pub struct InjectorPP { #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] registrations: Vec, - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] guards: Vec, verifiers: Vec, + /// Read guard: held by thread-local fakes. Allows parallel TLS tests. + /// Write guard: held by global fakes. Blocks all other tests. + _rw_guard: RwGuard, + /// When true, `when_called()` uses direct code patching (0.4.0-style global). + /// When false (default), uses thread-local dispatch. + use_global: bool, #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] _not_send: PhantomData<*const ()>, #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] _lock: MutexGuard<'static, ()>, } +/// Holds either a read or write guard on GLOBAL_FAKE_LOCK, or none (transient during upgrade). +/// The guard values are never read directly — they exist solely to keep the lock held. +#[allow(dead_code)] +enum RwGuard { + None, + Read(RwLockReadGuard<'static, ()>), + Write(RwLockWriteGuard<'static, ()>), +} + impl InjectorPP { - /// Creates a new `InjectorPP` instance. - /// - /// `InjectorPP` allows faking Rust functions at runtime without modifying the original code. + /// Creates a new `InjectorPP` instance with **thread-local** dispatch (default). /// /// On x86_64 and aarch64, each instance registers thread-local replacements, enabling parallel test execution. - /// On other architectures, it holds a global mutex for the entire lifetime of the patch. + /// Fakes are only visible on the thread that created the injector. + /// + /// Use `new_global()` instead if your faked functions will be called from background threads. /// /// # Example /// @@ -96,9 +121,17 @@ impl InjectorPP { pub fn new() -> Self { #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] { + // Acquire a read lock — allows parallel TLS tests, blocks while a global fake is active. + let rw_guard = match GLOBAL_FAKE_LOCK.read() { + Ok(g) => g, + Err(e) => e.into_inner(), + }; Self { registrations: Vec::new(), + guards: Vec::new(), verifiers: Vec::new(), + _rw_guard: RwGuard::Read(rw_guard), + use_global: false, _not_send: PhantomData, } } @@ -106,9 +139,66 @@ impl InjectorPP { #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] { let lock = LOCK_FUNCTION.lock(); + let rw_guard = match GLOBAL_FAKE_LOCK.read() { + Ok(g) => g, + Err(e) => e.into_inner(), + }; Self { guards: Vec::new(), verifiers: Vec::new(), + _rw_guard: RwGuard::Read(rw_guard), + use_global: false, + _lock: lock, + } + } + } + + /// Creates a new `InjectorPP` instance with **global** (0.4.0-style) patching. + /// + /// All `when_called()` fakes will use direct code patching, visible to **all threads**. + /// This acquires an exclusive write lock — other tests (both thread-local and global) + /// will wait until this instance is dropped. + /// + /// Use this when the faked functions will be called from background threads, + /// timers, or thread pools. + /// + /// # Example + /// + /// ```rust + /// use injectorpp::interface::injector::InjectorPP; + /// + /// // All fakes created with this injector are visible to all threads + /// let injector = InjectorPP::new_global(); + /// ``` + pub fn new_global() -> Self { + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] + { + let rw_guard = match GLOBAL_FAKE_LOCK.write() { + Ok(g) => g, + Err(e) => e.into_inner(), + }; + Self { + registrations: Vec::new(), + guards: Vec::new(), + verifiers: Vec::new(), + _rw_guard: RwGuard::Write(rw_guard), + use_global: true, + _not_send: PhantomData, + } + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] + { + let lock = LOCK_FUNCTION.lock(); + let rw_guard = match GLOBAL_FAKE_LOCK.write() { + Ok(g) => g, + Err(e) => e.into_inner(), + }; + Self { + guards: Vec::new(), + verifiers: Vec::new(), + _rw_guard: RwGuard::Write(rw_guard), + use_global: true, _lock: lock, } } @@ -429,16 +519,21 @@ impl WhenCalledBuilder<'_> { _ => {} } - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] - { - let reg = self.when.will_execute_thread_local(target.func_ptr_internal); - self.lib.registrations.push(reg); - } - - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] - { + if self.lib.use_global { let guard = self.when.will_execute_guard(target.func_ptr_internal); self.lib.guards.push(guard); + } else { + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] + { + let reg = self.when.will_execute_thread_local(target.func_ptr_internal); + self.lib.registrations.push(reg); + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] + { + let guard = self.when.will_execute_guard(target.func_ptr_internal); + self.lib.guards.push(guard); + } } } @@ -496,16 +591,21 @@ impl WhenCalledBuilder<'_> { /// assert!(Path::new("/nonexistent").exists()); /// ``` pub unsafe fn will_execute_raw_unchecked(self, target: FuncPtr) { - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] - { - let reg = self.when.will_execute_thread_local(target.func_ptr_internal); - self.lib.registrations.push(reg); - } - - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] - { + if self.lib.use_global { let guard = self.when.will_execute_guard(target.func_ptr_internal); self.lib.guards.push(guard); + } else { + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] + { + let reg = self.when.will_execute_thread_local(target.func_ptr_internal); + self.lib.registrations.push(reg); + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] + { + let guard = self.when.will_execute_guard(target.func_ptr_internal); + self.lib.guards.push(guard); + } } } @@ -578,16 +678,21 @@ impl WhenCalledBuilder<'_> { ); } - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] - { - let reg = self.when.will_return_boolean_thread_local(value); - self.lib.registrations.push(reg); - } - - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] - { + if self.lib.use_global { let guard = self.when.will_return_boolean_guard(value); self.lib.guards.push(guard); + } else { + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] + { + let reg = self.when.will_return_boolean_thread_local(value); + self.lib.registrations.push(reg); + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] + { + let guard = self.when.will_return_boolean_guard(value); + self.lib.guards.push(guard); + } } } } @@ -645,16 +750,21 @@ impl WhenCalledBuilderAsync<'_> { _ => {} } - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] - { - let reg = self.when.will_execute_thread_local(target.func_ptr_internal); - self.lib.registrations.push(reg); - } - - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] - { + if self.lib.use_global { let guard = self.when.will_execute_guard(target.func_ptr_internal); self.lib.guards.push(guard); + } else { + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] + { + let reg = self.when.will_execute_thread_local(target.func_ptr_internal); + self.lib.registrations.push(reg); + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] + { + let guard = self.when.will_execute_guard(target.func_ptr_internal); + self.lib.guards.push(guard); + } } } @@ -690,16 +800,22 @@ impl WhenCalledBuilderAsync<'_> { /// } /// ``` pub unsafe fn will_return_async_unchecked(self, target: FuncPtr) { - #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] - { - let reg = self.when.will_execute_thread_local(target.func_ptr_internal); - self.lib.registrations.push(reg); - } - - #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] - { + if self.lib.use_global { let guard = self.when.will_execute_guard(target.func_ptr_internal); self.lib.guards.push(guard); + } else { + #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] + { + let reg = self.when.will_execute_thread_local(target.func_ptr_internal); + self.lib.registrations.push(reg); + } + + #[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm")))] + { + let guard = self.when.will_execute_guard(target.func_ptr_internal); + self.lib.guards.push(guard); + } } } } + diff --git a/tests/global.rs b/tests/global.rs new file mode 100644 index 0000000..0d1adba --- /dev/null +++ b/tests/global.rs @@ -0,0 +1,234 @@ +use injectorpp::interface::injector::*; +use std::sync::atomic::{AtomicU32, Ordering}; +use std::sync::Arc; +use std::thread; + +// ---- Helper functions for testing ---- +// These use `core::hint::black_box` to ensure each function's compiled code is +// at least 16 bytes. On ARM, the PatchGuard uses 12-byte patches; tiny functions +// placed adjacently by the linker would overlap when patched simultaneously. + +#[inline(never)] +fn global_test_func() -> i32 { + core::hint::black_box(core::hint::black_box(21) + core::hint::black_box(21)) +} + +#[inline(never)] +fn global_test_func_bool() -> bool { + core::hint::black_box(!core::hint::black_box(true)) +} + +#[inline(never)] +fn global_add(a: i32, b: i32) -> i32 { + core::hint::black_box(core::hint::black_box(a) + core::hint::black_box(b)) +} + +#[inline(never)] +fn global_multiply(a: i32, b: i32) -> i32 { + core::hint::black_box(core::hint::black_box(a) * core::hint::black_box(b)) +} + +// ---- Tests ---- + +/// Verifies that a global fake using `will_execute` (fake! macro) is visible from a spawned thread. +#[test] +fn test_global_fake_visible_from_spawned_thread() { + let mut injector = InjectorPP::new_global(); + injector + .when_called(injectorpp::func!(fn (global_test_func)() -> i32)) + .will_execute(injectorpp::fake!( + func_type: fn() -> i32, + returns: 99, + times: 2 + )); + + assert_eq!(global_test_func(), 99); + + let handle = thread::spawn(global_test_func); + assert_eq!(handle.join().unwrap(), 99); +} + +/// Verifies that `will_return_boolean` in global mode is visible from a spawned thread. +#[test] +fn test_global_fake_boolean_visible_from_spawned_thread() { + let mut injector = InjectorPP::new_global(); + injector + .when_called(injectorpp::func!(fn (global_test_func_bool)() -> bool)) + .will_return_boolean(true); + + assert!(global_test_func_bool()); + + let handle = thread::spawn(global_test_func_bool); + assert!(handle.join().unwrap()); +} + +/// Verifies that `will_execute_raw` (named function) in global mode works across threads. +#[test] +fn test_global_fake_will_execute_raw_cross_thread() { + fn fake_add(_a: i32, _b: i32) -> i32 { + 1000 + } + + let mut injector = InjectorPP::new_global(); + injector + .when_called(injectorpp::func!(fn (global_add)(i32, i32) -> i32)) + .will_execute_raw(injectorpp::func!(fn (fake_add)(i32, i32) -> i32)); + + assert_eq!(global_add(1, 2), 1000); + + let handle = thread::spawn(|| global_add(10, 20)); + assert_eq!(handle.join().unwrap(), 1000); +} + +/// Verifies that a closure-based fake in global mode works across threads. +#[test] +fn test_global_fake_closure_cross_thread() { + let mut injector = InjectorPP::new_global(); + injector + .when_called(injectorpp::func!(fn (global_multiply)(i32, i32) -> i32)) + .will_execute_raw(injectorpp::closure!(|_a: i32, _b: i32| -> i32 { 777 }, fn(i32, i32) -> i32)); + + assert_eq!(global_multiply(3, 4), 777); + + let handle = thread::spawn(|| global_multiply(5, 6)); + assert_eq!(handle.join().unwrap(), 777); +} + +/// Verifies that multiple functions can be faked in the same global injector. +#[test] +fn test_global_multiple_fakes_in_same_injector() { + let mut injector = InjectorPP::new_global(); + + injector + .when_called(injectorpp::func!(fn (global_test_func)() -> i32)) + .will_execute(injectorpp::fake!( + func_type: fn() -> i32, + returns: 111, + times: 1 + )); + + injector + .when_called(injectorpp::func!(fn (global_test_func_bool)() -> bool)) + .will_return_boolean(true); + + assert_eq!(global_test_func(), 111); + assert!(global_test_func_bool()); +} + +/// Verifies that after a global injector is dropped, the original function is restored. +#[test] +fn test_global_fake_restores_original_after_drop() { + { + let mut injector = InjectorPP::new_global(); + injector + .when_called(injectorpp::func!(fn (global_test_func)() -> i32)) + .will_execute(injectorpp::fake!( + func_type: fn() -> i32, + returns: 555, + times: 1 + )); + + assert_eq!(global_test_func(), 555); + // injector drops here + } + + // Original function should be restored + assert_eq!(global_test_func(), 42); +} + +/// Verifies that a global fake is visible from multiple concurrently spawned threads. +#[test] +fn test_global_fake_visible_from_many_threads() { + let mut injector = InjectorPP::new_global(); + injector + .when_called(injectorpp::func!(fn (global_test_func)() -> i32)) + .will_execute_raw(injectorpp::closure!(|| -> i32 { 42_000 }, fn() -> i32)); + + let counter = Arc::new(AtomicU32::new(0)); + let mut handles = Vec::new(); + + for _ in 0..8 { + let counter = Arc::clone(&counter); + handles.push(thread::spawn(move || { + let result = global_test_func(); + if result == 42_000 { + counter.fetch_add(1, Ordering::SeqCst); + } + })); + } + + for handle in handles { + handle.join().unwrap(); + } + + // All 8 threads should have seen the global fake + assert_eq!(counter.load(Ordering::SeqCst), 8); +} + +/// Verifies that `will_execute_raw_unchecked` in global mode works across threads. +#[test] +fn test_global_fake_unchecked_cross_thread() { + fn fake_func() -> i32 { + 9999 + } + + let mut injector = InjectorPP::new_global(); + unsafe { + injector + .when_called(injectorpp::func_unchecked!(global_test_func)) + .will_execute_raw_unchecked(injectorpp::func_unchecked!(fake_func)); + } + + assert_eq!(global_test_func(), 9999); + + let handle = thread::spawn(global_test_func); + assert_eq!(handle.join().unwrap(), 9999); +} + +/// Verifies that `new()` (thread-local mode) still works correctly — fakes are NOT visible +/// from spawned threads (default 0.5.0 behavior). +#[test] +#[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm"))] +fn test_thread_local_mode_not_visible_from_spawned_thread() { + let mut injector = InjectorPP::new(); + injector + .when_called(injectorpp::func!(fn (global_add)(i32, i32) -> i32)) + .will_execute_raw(injectorpp::closure!(|_a: i32, _b: i32| -> i32 { 9999 }, fn(i32, i32) -> i32)); + + // Test thread sees the fake + assert_eq!(global_add(1, 2), 9999); + + // Spawned thread should NOT see the fake (thread-local mode) + let handle = thread::spawn(|| global_add(1, 2)); + assert_eq!(handle.join().unwrap(), 3); +} + +/// Verifies that a global fake with call-count verification works correctly +/// when calls come from multiple threads. +#[test] +fn test_global_fake_call_count_across_threads() { + let mut injector = InjectorPP::new_global(); + injector + .when_called(injectorpp::func!(fn (global_test_func)() -> i32)) + .will_execute(injectorpp::fake!( + func_type: fn() -> i32, + returns: 50, + times: 4 + )); + + // 1 call from test thread + assert_eq!(global_test_func(), 50); + + // 3 calls from spawned threads + let mut handles = Vec::new(); + for _ in 0..3 { + handles.push(thread::spawn(|| { + assert_eq!(global_test_func(), 50); + })); + } + for h in handles { + h.join().unwrap(); + } + + // Verifier checks times:4 on drop — this test passes only if exactly 4 calls were made. +}