std/sys/pal/unix/
stack_overflow.rs

1#![cfg_attr(test, allow(dead_code))]
2
3pub use self::imp::{cleanup, init};
4use self::imp::{drop_handler, make_handler};
5
6pub struct Handler {
7    data: *mut libc::c_void,
8}
9
10impl Handler {
11    pub unsafe fn new() -> Handler {
12        make_handler(false)
13    }
14
15    fn null() -> Handler {
16        Handler { data: crate::ptr::null_mut() }
17    }
18}
19
20impl Drop for Handler {
21    fn drop(&mut self) {
22        unsafe {
23            drop_handler(self.data);
24        }
25    }
26}
27
28#[cfg(any(
29    target_os = "linux",
30    target_os = "freebsd",
31    target_os = "hurd",
32    target_os = "macos",
33    target_os = "netbsd",
34    target_os = "openbsd",
35    target_os = "solaris",
36    target_os = "illumos",
37))]
38mod imp {
39    use libc::{
40        MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SA_ONSTACK,
41        SA_SIGINFO, SIG_DFL, SIGBUS, SIGSEGV, SS_DISABLE, sigaction, sigaltstack, sighandler_t,
42    };
43    #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
44    use libc::{mmap as mmap64, mprotect, munmap};
45    #[cfg(all(target_os = "linux", target_env = "gnu"))]
46    use libc::{mmap64, mprotect, munmap};
47
48    use super::Handler;
49    use crate::cell::Cell;
50    use crate::ops::Range;
51    use crate::sync::OnceLock;
52    use crate::sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering};
53    use crate::sys::pal::unix::os;
54    use crate::{io, mem, ptr, thread};
55
56    // We use a TLS variable to store the address of the guard page. While TLS
57    // variables are not guaranteed to be signal-safe, this works out in practice
58    // since we make sure to write to the variable before the signal stack is
59    // installed, thereby ensuring that the variable is always allocated when
60    // the signal handler is called.
61    thread_local! {
62        // FIXME: use `Range` once that implements `Copy`.
63        static GUARD: Cell<(usize, usize)> = const { Cell::new((0, 0)) };
64    }
65
66    // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
67    // (unmapped pages) at the end of every thread's stack, so if a thread ends
68    // up running into the guard page it'll trigger this handler. We want to
69    // detect these cases and print out a helpful error saying that the stack
70    // has overflowed. All other signals, however, should go back to what they
71    // were originally supposed to do.
72    //
73    // This handler currently exists purely to print an informative message
74    // whenever a thread overflows its stack. We then abort to exit and
75    // indicate a crash, but to avoid a misleading SIGSEGV that might lead
76    // users to believe that unsafe code has accessed an invalid pointer; the
77    // SIGSEGV encountered when overflowing the stack is expected and
78    // well-defined.
79    //
80    // If this is not a stack overflow, the handler un-registers itself and
81    // then returns (to allow the original signal to be delivered again).
82    // Returning from this kind of signal handler is technically not defined
83    // to work when reading the POSIX spec strictly, but in practice it turns
84    // out many large systems and all implementations allow returning from a
85    // signal handler to work. For a more detailed explanation see the
86    // comments on #26458.
87    /// SIGSEGV/SIGBUS entry point
88    /// # Safety
89    /// Rust doesn't call this, it *gets called*.
90    #[forbid(unsafe_op_in_unsafe_fn)]
91    unsafe extern "C" fn signal_handler(
92        signum: libc::c_int,
93        info: *mut libc::siginfo_t,
94        _data: *mut libc::c_void,
95    ) {
96        let (start, end) = GUARD.get();
97        // SAFETY: this pointer is provided by the system and will always point to a valid `siginfo_t`.
98        let addr = unsafe { (*info).si_addr().addr() };
99
100        // If the faulting address is within the guard page, then we print a
101        // message saying so and abort.
102        if start <= addr && addr < end {
103            thread::with_current_name(|name| {
104                let name = name.unwrap_or("<unknown>");
105                rtprintpanic!("\nthread '{name}' has overflowed its stack\n");
106            });
107
108            rtabort!("stack overflow");
109        } else {
110            // Unregister ourselves by reverting back to the default behavior.
111            // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
112            let mut action: sigaction = unsafe { mem::zeroed() };
113            action.sa_sigaction = SIG_DFL;
114            // SAFETY: pray this is a well-behaved POSIX implementation of fn sigaction
115            unsafe { sigaction(signum, &action, ptr::null_mut()) };
116
117            // See comment above for why this function returns.
118        }
119    }
120
121    static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
122    static MAIN_ALTSTACK: AtomicPtr<libc::c_void> = AtomicPtr::new(ptr::null_mut());
123    static NEED_ALTSTACK: AtomicBool = AtomicBool::new(false);
124
125    /// # Safety
126    /// Must be called only once
127    #[forbid(unsafe_op_in_unsafe_fn)]
128    pub unsafe fn init() {
129        PAGE_SIZE.store(os::page_size(), Ordering::Relaxed);
130
131        // Always write to GUARD to ensure the TLS variable is allocated.
132        let guard = unsafe { install_main_guard().unwrap_or(0..0) };
133        GUARD.set((guard.start, guard.end));
134
135        // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
136        let mut action: sigaction = unsafe { mem::zeroed() };
137        for &signal in &[SIGSEGV, SIGBUS] {
138            // SAFETY: just fetches the current signal handler into action
139            unsafe { sigaction(signal, ptr::null_mut(), &mut action) };
140            // Configure our signal handler if one is not already set.
141            if action.sa_sigaction == SIG_DFL {
142                if !NEED_ALTSTACK.load(Ordering::Relaxed) {
143                    // haven't set up our sigaltstack yet
144                    NEED_ALTSTACK.store(true, Ordering::Release);
145                    let handler = unsafe { make_handler(true) };
146                    MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
147                    mem::forget(handler);
148                }
149                action.sa_flags = SA_SIGINFO | SA_ONSTACK;
150                action.sa_sigaction = signal_handler as sighandler_t;
151                // SAFETY: only overriding signals if the default is set
152                unsafe { sigaction(signal, &action, ptr::null_mut()) };
153            }
154        }
155    }
156
157    /// # Safety
158    /// Must be called only once
159    #[forbid(unsafe_op_in_unsafe_fn)]
160    pub unsafe fn cleanup() {
161        // FIXME: I probably cause more bugs than I'm worth!
162        // see https://github.com/rust-lang/rust/issues/111272
163        unsafe { drop_handler(MAIN_ALTSTACK.load(Ordering::Relaxed)) };
164    }
165
166    unsafe fn get_stack() -> libc::stack_t {
167        // OpenBSD requires this flag for stack mapping
168        // otherwise the said mapping will fail as a no-op on most systems
169        // and has a different meaning on FreeBSD
170        #[cfg(any(
171            target_os = "openbsd",
172            target_os = "netbsd",
173            target_os = "linux",
174            target_os = "dragonfly",
175        ))]
176        let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
177        #[cfg(not(any(
178            target_os = "openbsd",
179            target_os = "netbsd",
180            target_os = "linux",
181            target_os = "dragonfly",
182        )))]
183        let flags = MAP_PRIVATE | MAP_ANON;
184
185        let sigstack_size = sigstack_size();
186        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
187
188        let stackp = mmap64(
189            ptr::null_mut(),
190            sigstack_size + page_size,
191            PROT_READ | PROT_WRITE,
192            flags,
193            -1,
194            0,
195        );
196        if stackp == MAP_FAILED {
197            panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
198        }
199        let guard_result = libc::mprotect(stackp, page_size, PROT_NONE);
200        if guard_result != 0 {
201            panic!("failed to set up alternative stack guard page: {}", io::Error::last_os_error());
202        }
203        let stackp = stackp.add(page_size);
204
205        libc::stack_t { ss_sp: stackp, ss_flags: 0, ss_size: sigstack_size }
206    }
207
208    /// # Safety
209    /// Mutates the alternate signal stack
210    #[forbid(unsafe_op_in_unsafe_fn)]
211    pub unsafe fn make_handler(main_thread: bool) -> Handler {
212        if !NEED_ALTSTACK.load(Ordering::Acquire) {
213            return Handler::null();
214        }
215
216        if !main_thread {
217            // Always write to GUARD to ensure the TLS variable is allocated.
218            let guard = unsafe { current_guard() }.unwrap_or(0..0);
219            GUARD.set((guard.start, guard.end));
220        }
221
222        // SAFETY: assuming stack_t is zero-initializable
223        let mut stack = unsafe { mem::zeroed() };
224        // SAFETY: reads current stack_t into stack
225        unsafe { sigaltstack(ptr::null(), &mut stack) };
226        // Configure alternate signal stack, if one is not already set.
227        if stack.ss_flags & SS_DISABLE != 0 {
228            // SAFETY: We warned our caller this would happen!
229            unsafe {
230                stack = get_stack();
231                sigaltstack(&stack, ptr::null_mut());
232            }
233            Handler { data: stack.ss_sp as *mut libc::c_void }
234        } else {
235            Handler::null()
236        }
237    }
238
239    /// # Safety
240    /// Must be called
241    /// - only with our handler or nullptr
242    /// - only when done with our altstack
243    /// This disables the alternate signal stack!
244    #[forbid(unsafe_op_in_unsafe_fn)]
245    pub unsafe fn drop_handler(data: *mut libc::c_void) {
246        if !data.is_null() {
247            let sigstack_size = sigstack_size();
248            let page_size = PAGE_SIZE.load(Ordering::Relaxed);
249            let disabling_stack = libc::stack_t {
250                ss_sp: ptr::null_mut(),
251                ss_flags: SS_DISABLE,
252                // Workaround for bug in macOS implementation of sigaltstack
253                // UNIX2003 which returns ENOMEM when disabling a stack while
254                // passing ss_size smaller than MINSIGSTKSZ. According to POSIX
255                // both ss_sp and ss_size should be ignored in this case.
256                ss_size: sigstack_size,
257            };
258            // SAFETY: we warned the caller this disables the alternate signal stack!
259            unsafe { sigaltstack(&disabling_stack, ptr::null_mut()) };
260            // SAFETY: We know from `get_stackp` that the alternate stack we installed is part of
261            // a mapping that started one page earlier, so walk back a page and unmap from there.
262            unsafe { munmap(data.sub(page_size), sigstack_size + page_size) };
263        }
264    }
265
266    /// Modern kernels on modern hardware can have dynamic signal stack sizes.
267    #[cfg(any(target_os = "linux", target_os = "android"))]
268    fn sigstack_size() -> usize {
269        let dynamic_sigstksz = unsafe { libc::getauxval(libc::AT_MINSIGSTKSZ) };
270        // If getauxval couldn't find the entry, it returns 0,
271        // so take the higher of the "constant" and auxval.
272        // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
273        libc::SIGSTKSZ.max(dynamic_sigstksz as _)
274    }
275
276    /// Not all OS support hardware where this is needed.
277    #[cfg(not(any(target_os = "linux", target_os = "android")))]
278    fn sigstack_size() -> usize {
279        libc::SIGSTKSZ
280    }
281
282    #[cfg(any(target_os = "solaris", target_os = "illumos"))]
283    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
284        let mut current_stack: libc::stack_t = crate::mem::zeroed();
285        assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
286        Some(current_stack.ss_sp)
287    }
288
289    #[cfg(target_os = "macos")]
290    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
291        let th = libc::pthread_self();
292        let stackptr = libc::pthread_get_stackaddr_np(th);
293        Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
294    }
295
296    #[cfg(target_os = "openbsd")]
297    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
298        let mut current_stack: libc::stack_t = crate::mem::zeroed();
299        assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
300
301        let stack_ptr = current_stack.ss_sp;
302        let stackaddr = if libc::pthread_main_np() == 1 {
303            // main thread
304            stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
305        } else {
306            // new thread
307            stack_ptr.addr() - current_stack.ss_size
308        };
309        Some(stack_ptr.with_addr(stackaddr))
310    }
311
312    #[cfg(any(
313        target_os = "android",
314        target_os = "freebsd",
315        target_os = "netbsd",
316        target_os = "hurd",
317        target_os = "linux",
318        target_os = "l4re"
319    ))]
320    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
321        let mut ret = None;
322        let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
323        #[cfg(target_os = "freebsd")]
324        assert_eq!(libc::pthread_attr_init(&mut attr), 0);
325        #[cfg(target_os = "freebsd")]
326        let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
327        #[cfg(not(target_os = "freebsd"))]
328        let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
329        if e == 0 {
330            let mut stackaddr = crate::ptr::null_mut();
331            let mut stacksize = 0;
332            assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackaddr, &mut stacksize), 0);
333            ret = Some(stackaddr);
334        }
335        if e == 0 || cfg!(target_os = "freebsd") {
336            assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
337        }
338        ret
339    }
340
341    fn stack_start_aligned(page_size: usize) -> Option<*mut libc::c_void> {
342        let stackptr = unsafe { get_stack_start()? };
343        let stackaddr = stackptr.addr();
344
345        // Ensure stackaddr is page aligned! A parent process might
346        // have reset RLIMIT_STACK to be non-page aligned. The
347        // pthread_attr_getstack() reports the usable stack area
348        // stackaddr < stackaddr + stacksize, so if stackaddr is not
349        // page-aligned, calculate the fix such that stackaddr <
350        // new_page_aligned_stackaddr < stackaddr + stacksize
351        let remainder = stackaddr % page_size;
352        Some(if remainder == 0 {
353            stackptr
354        } else {
355            stackptr.with_addr(stackaddr + page_size - remainder)
356        })
357    }
358
359    #[forbid(unsafe_op_in_unsafe_fn)]
360    unsafe fn install_main_guard() -> Option<Range<usize>> {
361        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
362
363        unsafe {
364            // this way someone on any unix-y OS can check that all these compile
365            if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
366                install_main_guard_linux(page_size)
367            } else if cfg!(all(target_os = "linux", target_env = "musl")) {
368                install_main_guard_linux_musl(page_size)
369            } else if cfg!(target_os = "freebsd") {
370                install_main_guard_freebsd(page_size)
371            } else if cfg!(any(target_os = "netbsd", target_os = "openbsd")) {
372                install_main_guard_bsds(page_size)
373            } else {
374                install_main_guard_default(page_size)
375            }
376        }
377    }
378
379    #[forbid(unsafe_op_in_unsafe_fn)]
380    unsafe fn install_main_guard_linux(page_size: usize) -> Option<Range<usize>> {
381        // Linux doesn't allocate the whole stack right away, and
382        // the kernel has its own stack-guard mechanism to fault
383        // when growing too close to an existing mapping. If we map
384        // our own guard, then the kernel starts enforcing a rather
385        // large gap above that, rendering much of the possible
386        // stack space useless. See #43052.
387        //
388        // Instead, we'll just note where we expect rlimit to start
389        // faulting, so our handler can report "stack overflow", and
390        // trust that the kernel's own stack guard will work.
391        let stackptr = stack_start_aligned(page_size)?;
392        let stackaddr = stackptr.addr();
393        Some(stackaddr - page_size..stackaddr)
394    }
395
396    #[forbid(unsafe_op_in_unsafe_fn)]
397    unsafe fn install_main_guard_linux_musl(_page_size: usize) -> Option<Range<usize>> {
398        // For the main thread, the musl's pthread_attr_getstack
399        // returns the current stack size, rather than maximum size
400        // it can eventually grow to. It cannot be used to determine
401        // the position of kernel's stack guard.
402        None
403    }
404
405    #[forbid(unsafe_op_in_unsafe_fn)]
406    unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
407        // FreeBSD's stack autogrows, and optionally includes a guard page
408        // at the bottom. If we try to remap the bottom of the stack
409        // ourselves, FreeBSD's guard page moves upwards. So we'll just use
410        // the builtin guard page.
411        let stackptr = stack_start_aligned(page_size)?;
412        let guardaddr = stackptr.addr();
413        // Technically the number of guard pages is tunable and controlled
414        // by the security.bsd.stack_guard_page sysctl.
415        // By default it is 1, checking once is enough since it is
416        // a boot time config value.
417        static PAGES: OnceLock<usize> = OnceLock::new();
418
419        let pages = PAGES.get_or_init(|| {
420            use crate::sys::weak::dlsym;
421            dlsym!(fn sysctlbyname(*const libc::c_char, *mut libc::c_void, *mut libc::size_t, *const libc::c_void, libc::size_t) -> libc::c_int);
422            let mut guard: usize = 0;
423            let mut size = mem::size_of_val(&guard);
424            let oid = c"security.bsd.stack_guard_page";
425            match sysctlbyname.get() {
426                Some(fcn) if unsafe {
427                    fcn(oid.as_ptr(),
428                        (&raw mut guard).cast(),
429                        &raw mut size,
430                        ptr::null_mut(),
431                        0) == 0
432                } => guard,
433                _ => 1,
434            }
435        });
436        Some(guardaddr..guardaddr + pages * page_size)
437    }
438
439    #[forbid(unsafe_op_in_unsafe_fn)]
440    unsafe fn install_main_guard_bsds(page_size: usize) -> Option<Range<usize>> {
441        // OpenBSD stack already includes a guard page, and stack is
442        // immutable.
443        // NetBSD stack includes the guard page.
444        //
445        // We'll just note where we expect rlimit to start
446        // faulting, so our handler can report "stack overflow", and
447        // trust that the kernel's own stack guard will work.
448        let stackptr = stack_start_aligned(page_size)?;
449        let stackaddr = stackptr.addr();
450        Some(stackaddr - page_size..stackaddr)
451    }
452
453    #[forbid(unsafe_op_in_unsafe_fn)]
454    unsafe fn install_main_guard_default(page_size: usize) -> Option<Range<usize>> {
455        // Reallocate the last page of the stack.
456        // This ensures SIGBUS will be raised on
457        // stack overflow.
458        // Systems which enforce strict PAX MPROTECT do not allow
459        // to mprotect() a mapping with less restrictive permissions
460        // than the initial mmap() used, so we mmap() here with
461        // read/write permissions and only then mprotect() it to
462        // no permissions at all. See issue #50313.
463        let stackptr = stack_start_aligned(page_size)?;
464        let result = unsafe {
465            mmap64(
466                stackptr,
467                page_size,
468                PROT_READ | PROT_WRITE,
469                MAP_PRIVATE | MAP_ANON | MAP_FIXED,
470                -1,
471                0,
472            )
473        };
474        if result != stackptr || result == MAP_FAILED {
475            panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
476        }
477
478        let result = unsafe { mprotect(stackptr, page_size, PROT_NONE) };
479        if result != 0 {
480            panic!("failed to protect the guard page: {}", io::Error::last_os_error());
481        }
482
483        let guardaddr = stackptr.addr();
484
485        Some(guardaddr..guardaddr + page_size)
486    }
487
488    #[cfg(any(
489        target_os = "macos",
490        target_os = "openbsd",
491        target_os = "solaris",
492        target_os = "illumos",
493    ))]
494    // FIXME: I am probably not unsafe.
495    unsafe fn current_guard() -> Option<Range<usize>> {
496        let stackptr = get_stack_start()?;
497        let stackaddr = stackptr.addr();
498        Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
499    }
500
501    #[cfg(any(
502        target_os = "android",
503        target_os = "freebsd",
504        target_os = "hurd",
505        target_os = "linux",
506        target_os = "netbsd",
507        target_os = "l4re"
508    ))]
509    // FIXME: I am probably not unsafe.
510    unsafe fn current_guard() -> Option<Range<usize>> {
511        let mut ret = None;
512        let mut attr: libc::pthread_attr_t = crate::mem::zeroed();
513        #[cfg(target_os = "freebsd")]
514        assert_eq!(libc::pthread_attr_init(&mut attr), 0);
515        #[cfg(target_os = "freebsd")]
516        let e = libc::pthread_attr_get_np(libc::pthread_self(), &mut attr);
517        #[cfg(not(target_os = "freebsd"))]
518        let e = libc::pthread_getattr_np(libc::pthread_self(), &mut attr);
519        if e == 0 {
520            let mut guardsize = 0;
521            assert_eq!(libc::pthread_attr_getguardsize(&attr, &mut guardsize), 0);
522            if guardsize == 0 {
523                if cfg!(all(target_os = "linux", target_env = "musl")) {
524                    // musl versions before 1.1.19 always reported guard
525                    // size obtained from pthread_attr_get_np as zero.
526                    // Use page size as a fallback.
527                    guardsize = PAGE_SIZE.load(Ordering::Relaxed);
528                } else {
529                    panic!("there is no guard page");
530                }
531            }
532            let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
533            let mut size = 0;
534            assert_eq!(libc::pthread_attr_getstack(&attr, &mut stackptr, &mut size), 0);
535
536            let stackaddr = stackptr.addr();
537            ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
538                Some(stackaddr - guardsize..stackaddr)
539            } else if cfg!(all(target_os = "linux", target_env = "musl")) {
540                Some(stackaddr - guardsize..stackaddr)
541            } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
542            {
543                // glibc used to include the guard area within the stack, as noted in the BUGS
544                // section of `man pthread_attr_getguardsize`. This has been corrected starting
545                // with glibc 2.27, and in some distro backports, so the guard is now placed at the
546                // end (below) the stack. There's no easy way for us to know which we have at
547                // runtime, so we'll just match any fault in the range right above or below the
548                // stack base to call that fault a stack overflow.
549                Some(stackaddr - guardsize..stackaddr + guardsize)
550            } else {
551                Some(stackaddr..stackaddr + guardsize)
552            };
553        }
554        if e == 0 || cfg!(target_os = "freebsd") {
555            assert_eq!(libc::pthread_attr_destroy(&mut attr), 0);
556        }
557        ret
558    }
559}
560
561// This is intentionally not enabled on iOS/tvOS/watchOS/visionOS, as it uses
562// several symbols that might lead to rejections from the App Store, namely
563// `sigaction`, `sigaltstack`, `sysctlbyname`, `mmap`, `munmap` and `mprotect`.
564//
565// This might be overly cautious, though it is also what Swift does (and they
566// usually have fewer qualms about forwards compatibility, since the runtime
567// is shipped with the OS):
568// <https://github.com/apple/swift/blob/swift-5.10-RELEASE/stdlib/public/runtime/CrashHandlerMacOS.cpp>
569#[cfg(not(any(
570    target_os = "linux",
571    target_os = "freebsd",
572    target_os = "hurd",
573    target_os = "macos",
574    target_os = "netbsd",
575    target_os = "openbsd",
576    target_os = "solaris",
577    target_os = "illumos",
578)))]
579mod imp {
580    pub unsafe fn init() {}
581
582    pub unsafe fn cleanup() {}
583
584    pub unsafe fn make_handler(_main_thread: bool) -> super::Handler {
585        super::Handler::null()
586    }
587
588    pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
589}