xref: /relibc/ralloc/src/allocator.rs (revision 1dad80a90154871db88893f3dc2c1f1e7551428d)
1 //! The global allocator.
2 //!
3 //! This contains primitives for the cross-thread allocator.
4 
5 use prelude::*;
6 
7 use core::{mem, ops};
8 
9 use {brk, sync};
10 use bookkeeper::{self, Bookkeeper, Allocator};
11 
12 use shim::config;
13 
14 #[cfg(feature = "tls")]
15 use tls;
16 
17 /// Alias for the wrapper type of the thread-local variable holding the local allocator.
18 #[cfg(feature = "tls")]
19 type ThreadLocalAllocator = MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>;
20 
21 /// The global default allocator.
22 // TODO: Remove these filthy function pointers.
23 static GLOBAL_ALLOCATOR: sync::Mutex<LazyInit<fn() -> GlobalAllocator, GlobalAllocator>> =
24     sync::Mutex::new(LazyInit::new(GlobalAllocator::init));
25 #[cfg(feature = "tls")]
26 tls! {
27     /// The thread-local allocator.
28     static THREAD_ALLOCATOR: ThreadLocalAllocator = MoveCell::new(Some(LazyInit::new(LocalAllocator::init)));
29 }
30 
31 /// Temporarily get the allocator.
32 ///
33 /// This is simply to avoid repeating ourself, so we let this take care of the hairy stuff:
34 ///
35 /// 1. Initialize the allocator if needed.
36 /// 2. If the allocator is not yet initialized, fallback to the global allocator.
37 /// 3. Unlock/move temporarily out of reference.
38 ///
39 /// This is a macro due to the lack of generic closure, which makes it impossible to have one
40 /// closure for both cases (global and local).
41 // TODO: Instead of falling back to the global allocator, the thread dtor should be set such that
42 // it run after the TLS keys that might be declared.
43 macro_rules! get_allocator {
44     (|$v:ident| $b:expr) => {{
45         // Get the thread allocator, if TLS is enabled
46         #[cfg(feature = "tls")]
47         {
48             THREAD_ALLOCATOR.with(|thread_alloc| {
49                 if let Some(mut thread_alloc_original) = thread_alloc.replace(None) {
50                     let res = {
51                         // Call the closure involved.
52                         let $v = thread_alloc_original.get();
53                         $b
54                     };
55 
56                     // Put back the original allocator.
57                     thread_alloc.replace(Some(thread_alloc_original));
58 
59                     res
60                 } else {
61                     // The local allocator seems to have been deinitialized, for this reason we fallback to
62                     // the global allocator.
63                     log!(WARNING, "Accessing the allocator after deinitialization of the local allocator.");
64 
65                     // Lock the global allocator.
66                     let mut guard = GLOBAL_ALLOCATOR.lock();
67 
68                     // Call the block in question.
69                     let $v = guard.get();
70                     $b
71                 }
72             })
73         }
74 
75         // TLS is disabled, just use the global allocator.
76         #[cfg(not(feature = "tls"))]
77         {
78             // Lock the global allocator.
79             let mut guard = GLOBAL_ALLOCATOR.lock();
80 
81             // Call the block in question.
82             let $v = guard.get();
83             $b
84         }
85     }}
86 }
87 
88 /// Derives `Deref` and `DerefMut` to the `inner` field.
89 ///
90 /// This requires importing `core::ops`.
91 macro_rules! derive_deref {
92     ($imp:ty, $target:ty) => {
93         impl ops::Deref for $imp {
94             type Target = $target;
95 
96             fn deref(&self) -> &$target {
97                 &self.inner
98             }
99         }
100 
101         impl ops::DerefMut for $imp {
102             fn deref_mut(&mut self) -> &mut $target {
103                 &mut self.inner
104             }
105         }
106     };
107 }
108 
109 /// Global SBRK-based allocator.
110 ///
111 /// This will extend the data segment whenever new memory is needed. Since this includes leaving
112 /// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is
113 /// local).
114 struct GlobalAllocator {
115     // The inner bookkeeper.
116     inner: Bookkeeper,
117 }
118 
119 impl GlobalAllocator {
120     /// Initialize the global allocator.
121     fn init() -> GlobalAllocator {
122         /// Logging...
123         log!(NOTE, "Initializing the global allocator.");
124 
125         // The initial acquired segment.
126         let (aligner, initial_segment, excessive) =
127             brk::lock().canonical_brk(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
128 
129         // Initialize the new allocator.
130         let mut res = GlobalAllocator {
131             inner: Bookkeeper::new(unsafe {
132                 // LAST AUDIT: 2016-08-21 (Ticki).
133 
134                 Vec::from_raw_parts(initial_segment, 0)
135             }),
136         };
137 
138         // Free the secondary space.
139         res.push(aligner);
140         res.push(excessive);
141 
142         res
143     }
144 }
145 
146 derive_deref!(GlobalAllocator, Bookkeeper);
147 
148 impl Allocator for GlobalAllocator {
149     #[inline]
150     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
151         // Obtain what you need.
152         let (alignment_block, res, excessive) = brk::lock().canonical_brk(size, align);
153 
154         // Add it to the list. This will not change the order, since the pointer is higher than all
155         // the previous blocks (BRK extends the data segment). Although, it is worth noting that
156         // the stack is higher than the program break.
157         self.push(alignment_block);
158         self.push(excessive);
159 
160         res
161     }
162 
163     fn on_new_memory(&mut self) {
164         if self.total_bytes() > config::OS_MEMTRIM_LIMIT {
165             // memtrim the fack outta 'em.
166 
167             // Pop the last block.
168             let block = self.pop().expect("The byte count on the global allocator is invalid.");
169 
170             // Check if the memtrim is worth it.
171             if block.size() >= config::OS_MEMTRIM_WORTHY {
172                 /// Logging...
173                 log!(NOTE, "Memtrimming the global allocator.");
174 
175                 // Release the block to the OS.
176                 if let Err(block) = brk::lock().release(block) {
177                     // It failed, put the block back.
178                     // TODO: This can be done faster.
179                     self.push(block);
180                 }
181 
182                 // Note that this block is the only block next to the program break, due to the
183                 // segments being as long as possible. For that reason, repeating to push and
184                 // release would fail.
185             } else {
186                 /// Logging...
187                 log!(WARNING, "Memtrimming for the global allocator failed.");
188 
189                 // Push the block back.
190                 // TODO: This can be done faster.
191                 self.push(block);
192             }
193         }
194     }
195 }
196 
197 /// A local allocator.
198 ///
199 /// This acquires memory from the upstream (global) allocator, which is protected by a `Mutex`.
200 #[cfg(feature = "tls")]
201 pub struct LocalAllocator {
202     // The inner bookkeeper.
203     inner: Bookkeeper,
204 }
205 
206 #[cfg(feature = "tls")]
207 impl LocalAllocator {
208     /// Initialize the local allocator.
209     #[cfg(feature = "tls")]
210     fn init() -> LocalAllocator {
211         /// The destructor of the local allocator.
212         ///
213         /// This will simply free everything to the global allocator.
214         extern fn dtor(alloc: &ThreadLocalAllocator) {
215             /// Logging...
216             log!(NOTE, "Deinitializing and freeing the local allocator.");
217 
218             // This is important! The thread destructors guarantee no other, and thus one could use the
219             // allocator _after_ this destructor have been finished. In fact, this is a real problem,
220             // and happens when using `Arc` and terminating the main thread, for this reason we place
221             // `None` as a permanent marker indicating that the allocator is deinitialized. After such
222             // a state is in place, all allocation calls will be redirected to the global allocator,
223             // which is of course still usable at this moment.
224             let alloc = alloc.replace(None).expect("Thread-local allocator is already freed.");
225 
226             // Lock the global allocator.
227             let mut global_alloc = GLOBAL_ALLOCATOR.lock();
228             let global_alloc = global_alloc.get();
229 
230             // TODO: we know this is sorted, so we could abuse that fact to faster insertion in the
231             // global allocator.
232 
233             alloc.into_inner().inner.for_each(move |block| global_alloc.free(block));
234         }
235 
236         /// Logging...
237         log!(NOTE, "Initializing the local allocator.");
238 
239         // The initial acquired segment.
240         let initial_segment = GLOBAL_ALLOCATOR
241             .lock()
242             .get()
243             .alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
244 
245         unsafe {
246             // LAST AUDIT: 2016-08-21 (Ticki).
247 
248             // Register the thread destructor on the current thread.
249             THREAD_ALLOCATOR.register_thread_destructor(dtor);
250 
251             LocalAllocator {
252                 inner: Bookkeeper::new(Vec::from_raw_parts(initial_segment, 0)),
253             }
254         }
255     }
256 }
257 
258 #[cfg(feature = "tls")]
259 derive_deref!(LocalAllocator, Bookkeeper);
260 
261 #[cfg(feature = "tls")]
262 impl Allocator for LocalAllocator {
263     #[inline]
264     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
265         // Get the block from the global allocator. Please note that we cannot canonicalize `size`,
266         // due to freeing excessive blocks would change the order.
267         GLOBAL_ALLOCATOR.lock().get().alloc(size, align)
268     }
269 
270     #[inline]
271     fn on_new_memory(&mut self) {
272         // The idea is to free memory to the global allocator to unify small stubs and avoid
273         // fragmentation and thread accumulation.
274         if self.total_bytes() < config::FRAGMENTATION_SCALE * self.len()
275            || self.total_bytes() > config::LOCAL_MEMTRIM_LIMIT {
276             // Log stuff.
277             log!(NOTE, "Memtrimming the local allocator.");
278 
279             // Lock the global allocator.
280             let mut global_alloc = GLOBAL_ALLOCATOR.lock();
281             let global_alloc = global_alloc.get();
282 
283             while let Some(block) = self.pop() {
284                 // Pop'n'free.
285                 global_alloc.free(block);
286 
287                 // Memtrim 'till we won't memtrim anymore.
288                 if self.total_bytes() < config::LOCAL_MEMTRIM_STOP { break; }
289             }
290         }
291     }
292 }
293 
294 /// Allocate a block of memory.
295 ///
296 /// # Errors
297 ///
298 /// The OOM handler handles out-of-memory conditions.
299 #[inline]
300 pub fn alloc(size: usize, align: usize) -> *mut u8 {
301     log!(CALL, "Allocating buffer of size {} (align {}).", size, align);
302 
303     get_allocator!(|alloc| *Pointer::from(alloc.alloc(size, align)))
304 }
305 
306 /// Free a buffer.
307 ///
308 /// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
309 /// that it is not used after the free.
310 ///
311 /// # Important!
312 ///
313 /// You should only allocate buffers allocated through `ralloc`. Anything else is considered
314 /// invalid.
315 ///
316 /// # Errors
317 ///
318 /// The OOM handler handles out-of-memory conditions.
319 ///
320 /// # Safety
321 ///
322 /// Rust assume that the allocation symbols returns correct values. For this reason, freeing
323 /// invalid pointers might introduce memory unsafety.
324 ///
325 /// Secondly, freeing an used buffer can introduce use-after-free.
326 #[inline]
327 pub unsafe fn free(ptr: *mut u8, size: usize) {
328     log!(CALL, "Freeing buffer of size {}.", size);
329 
330     get_allocator!(|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size)))
331 }
332 
333 /// Reallocate memory.
334 ///
335 /// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
336 /// returned pointer with size `size`.
337 ///
338 /// # Important!
339 ///
340 /// You should only reallocate buffers allocated through `ralloc`. Anything else is considered
341 /// invalid.
342 ///
343 /// # Errors
344 ///
345 /// The OOM handler handles out-of-memory conditions.
346 ///
347 /// # Safety
348 ///
349 /// Due to being able to potentially memcpy an arbitrary buffer, as well as shrinking a buffer,
350 /// this is marked unsafe.
351 #[inline]
352 pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
353     log!(CALL, "Reallocating buffer of size {} to new size {}.", old_size, size);
354 
355     get_allocator!(|alloc| {
356         *Pointer::from(alloc.realloc(
357             Block::from_raw_parts(Pointer::new(ptr), old_size),
358             size,
359             align
360         ))
361     })
362 }
363 
364 /// Try to reallocate the buffer _inplace_.
365 ///
366 /// In case of success, return the new buffer's size. On failure, return the old size.
367 ///
368 /// This can be used to shrink (truncate) a buffer as well.
369 ///
370 /// # Safety
371 ///
372 /// Due to being able to shrink (and thus free) the buffer, this is marked unsafe.
373 #[inline]
374 pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
375     log!(CALL, "Inplace reallocating buffer of size {} to new size {}.", old_size, size);
376 
377     get_allocator!(|alloc| {
378         if alloc.realloc_inplace(
379             Block::from_raw_parts(Pointer::new(ptr), old_size),
380             size
381         ).is_ok() {
382             Ok(())
383         } else {
384             Err(())
385         }
386     })
387 }
388