std/sys/alloc/
wasm.rs

1//! This is an implementation of a global allocator on wasm targets when
2//! emscripten or wasi is not in use. In that situation there's no actual runtime
3//! for us to lean on for allocation, so instead we provide our own!
4//!
5//! The wasm instruction set has two instructions for getting the current
6//! amount of memory and growing the amount of memory. These instructions are the
7//! foundation on which we're able to build an allocator, so we do so! Note that
8//! the instructions are also pretty "global" and this is the "global" allocator
9//! after all!
10//!
11//! The current allocator here is the `dlmalloc` crate which we've got included
12//! in the rust-lang/rust repository as a submodule. The crate is a port of
13//! dlmalloc.c from C to Rust and is basically just so we can have "pure Rust"
14//! for now which is currently technically required (can't link with C yet).
15//!
16//! The crate itself provides a global allocator which on wasm has no
17//! synchronization as there are no threads!
18
19use core::cell::SyncUnsafeCell;
20
21use crate::alloc::{GlobalAlloc, Layout, System};
22
23struct SyncDlmalloc(dlmalloc::Dlmalloc);
24unsafe impl Sync for SyncDlmalloc {}
25
26static DLMALLOC: SyncUnsafeCell<SyncDlmalloc> =
27    SyncUnsafeCell::new(SyncDlmalloc(dlmalloc::Dlmalloc::new()));
28
29#[stable(feature = "alloc_system_type", since = "1.28.0")]
30unsafe impl GlobalAlloc for System {
31    #[inline]
32    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
33        // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
34        // Calling malloc() is safe because preconditions on this function match the trait method preconditions.
35        let _lock = lock::lock();
36        unsafe { (*DLMALLOC.get()).0.malloc(layout.size(), layout.align()) }
37    }
38
39    #[inline]
40    unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
41        // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
42        // Calling calloc() is safe because preconditions on this function match the trait method preconditions.
43        let _lock = lock::lock();
44        unsafe { (*DLMALLOC.get()).0.calloc(layout.size(), layout.align()) }
45    }
46
47    #[inline]
48    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
49        // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
50        // Calling free() is safe because preconditions on this function match the trait method preconditions.
51        let _lock = lock::lock();
52        unsafe { (*DLMALLOC.get()).0.free(ptr, layout.size(), layout.align()) }
53    }
54
55    #[inline]
56    unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
57        // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
58        // Calling realloc() is safe because preconditions on this function match the trait method preconditions.
59        let _lock = lock::lock();
60        unsafe { (*DLMALLOC.get()).0.realloc(ptr, layout.size(), layout.align(), new_size) }
61    }
62}
63
64#[cfg(target_feature = "atomics")]
65mod lock {
66    use crate::sync::atomic::Ordering::{Acquire, Release};
67    use crate::sync::atomic::{Atomic, AtomicI32};
68
69    static LOCKED: Atomic<i32> = AtomicI32::new(0);
70
71    pub struct DropLock;
72
73    pub fn lock() -> DropLock {
74        loop {
75            if LOCKED.swap(1, Acquire) == 0 {
76                return DropLock;
77            }
78            // Ok so here's where things get a little depressing. At this point
79            // in time we need to synchronously acquire a lock, but we're
80            // contending with some other thread. Typically we'd execute some
81            // form of `i32.atomic.wait` like so:
82            //
83            //     unsafe {
84            //         let r = core::arch::wasm32::i32_atomic_wait(
85            //             LOCKED.as_mut_ptr(),
86            //             1,  //     expected value
87            //             -1, //     timeout
88            //         );
89            //         debug_assert!(r == 0 || r == 1);
90            //     }
91            //
92            // Unfortunately though in doing so we would cause issues for the
93            // main thread. The main thread in a web browser *cannot ever
94            // block*, no exceptions. This means that the main thread can't
95            // actually execute the `i32.atomic.wait` instruction.
96            //
97            // As a result if we want to work within the context of browsers we
98            // need to figure out some sort of allocation scheme for the main
99            // thread where when there's contention on the global malloc lock we
100            // do... something.
101            //
102            // Possible ideas include:
103            //
104            // 1. Attempt to acquire the global lock. If it fails, fall back to
105            //    memory allocation via `memory.grow`. Later just ... somehow
106            //    ... inject this raw page back into the main allocator as it
107            //    gets sliced up over time. This strategy has the downside of
108            //    forcing allocation of a page to happen whenever the main
109            //    thread contents with other threads, which is unfortunate.
110            //
111            // 2. Maintain a form of "two level" allocator scheme where the main
112            //    thread has its own allocator. Somehow this allocator would
113            //    also be balanced with a global allocator, not only to have
114            //    allocations cross between threads but also to ensure that the
115            //    two allocators stay "balanced" in terms of free'd memory and
116            //    such. This, however, seems significantly complicated.
117            //
118            // Out of a lack of other ideas, the current strategy implemented
119            // here is to simply spin. Typical spin loop algorithms have some
120            // form of "hint" here to the CPU that it's what we're doing to
121            // ensure that the CPU doesn't get too hot, but wasm doesn't have
122            // such an instruction.
123            //
124            // To be clear, spinning here is not a great solution.
125            // Another thread with the lock may take quite a long time to wake
126            // up. For example it could be in `memory.grow` or it could be
127            // evicted from the CPU for a timeslice like 10ms. For these periods
128            // of time our thread will "helpfully" sit here and eat CPU time
129            // until it itself is evicted or the lock holder finishes. This
130            // means we're just burning and wasting CPU time to no one's
131            // benefit.
132            //
133            // Spinning does have the nice properties, though, of being
134            // semantically correct, being fair to all threads for memory
135            // allocation, and being simple enough to implement.
136            //
137            // This will surely (hopefully) be replaced in the future with a
138            // real memory allocator that can handle the restriction of the main
139            // thread.
140            //
141            //
142            // FIXME: We can also possibly add an optimization here to detect
143            // when a thread is the main thread or not and block on all
144            // non-main-thread threads. Currently, however, we have no way
145            // of knowing which wasm thread is on the browser main thread, but
146            // if we could figure out we could at least somewhat mitigate the
147            // cost of this spinning.
148        }
149    }
150
151    impl Drop for DropLock {
152        fn drop(&mut self) {
153            let r = LOCKED.swap(0, Release);
154            debug_assert_eq!(r, 1);
155
156            // Note that due to the above logic we don't actually need to wake
157            // anyone up, but if we did it'd likely look something like this:
158            //
159            //     unsafe {
160            //         core::arch::wasm32::atomic_notify(
161            //             LOCKED.as_mut_ptr(),
162            //             1, //     only one thread
163            //         );
164            //     }
165        }
166    }
167}
168
169#[cfg(not(target_feature = "atomics"))]
170mod lock {
171    #[inline]
172    pub fn lock() {} // no atomics, no threads, that's easy!
173}