xref: /relibc/ralloc/src/allocator.rs (revision 998377c6b486f141b1744fc7da7fb5f5089ab16b)
1 //! The global allocator.
2 //!
3 //! This contains primitives for the cross-thread allocator.
4 
5 use prelude::*;
6 
7 use core::{mem, ops};
8 
9 use {brk, sync};
10 use bookkeeper::{self, Bookkeeper, Allocator};
11 
12 #[cfg(feature = "tls")]
13 use tls;
14 
15 /// Alias for the wrapper type of the thread-local variable holding the local allocator.
16 #[cfg(feature = "tls")]
17 type ThreadLocalAllocator = MoveCell<Option<LazyInit<fn() -> LocalAllocator, LocalAllocator>>>;
18 
19 /// The global default allocator.
20 // TODO: Remove these filthy function pointers.
21 static GLOBAL_ALLOCATOR: sync::Mutex<LazyInit<fn() -> GlobalAllocator, GlobalAllocator>> =
22     sync::Mutex::new(LazyInit::new(GlobalAllocator::init));
23 #[cfg(feature = "tls")]
24 tls! {
25     /// The thread-local allocator.
26     static THREAD_ALLOCATOR: ThreadLocalAllocator = MoveCell::new(Some(LazyInit::new(LocalAllocator::init)));
27 }
28 
29 /// Temporarily get the allocator.
30 ///
31 /// This is simply to avoid repeating ourself, so we let this take care of the hairy stuff:
32 ///
33 /// 1. Initialize the allocator if needed.
34 /// 2. If the allocator is not yet initialized, fallback to the global allocator.
35 /// 3. Unlock/move temporarily out of reference.
36 ///
37 /// This is a macro due to the lack of generic closure, which makes it impossible to have one
38 /// closure for both cases (global and local).
39 // TODO: Instead of falling back to the global allocator, the thread dtor should be set such that
40 // it run after the TLS keys that might be declared.
41 macro_rules! get_allocator {
42     (|$v:ident| $b:expr) => {{
43         // Get the thread allocator, if TLS is enabled
44         #[cfg(feature = "tls")]
45         {
46             THREAD_ALLOCATOR.with(|thread_alloc| {
47                 if let Some(mut thread_alloc_original) = thread_alloc.replace(None) {
48                     let res = {
49                         // Call the closure involved.
50                         let $v = thread_alloc_original.get();
51                         $b
52                     };
53 
54                     // Put back the original allocator.
55                     thread_alloc.replace(Some(thread_alloc_original));
56 
57                     res
58                 } else {
59                     // The local allocator seems to have been deinitialized, for this reason we fallback to
60                     // the global allocator.
61 
62                     // Lock the global allocator.
63                     let mut guard = GLOBAL_ALLOCATOR.lock();
64 
65                     // Call the block in question.
66                     let $v = guard.get();
67                     $b
68                 }
69             })
70         }
71 
72         // TLS is disabled, just use the global allocator.
73         #[cfg(not(feature = "tls"))]
74         {
75             // Lock the global allocator.
76             let mut guard = GLOBAL_ALLOCATOR.lock();
77 
78             // Call the block in question.
79             let $v = guard.get();
80             $b
81         }
82     }}
83 }
84 
85 /// Derives `Deref` and `DerefMut` to the `inner` field.
86 ///
87 /// This requires importing `core::ops`.
88 macro_rules! derive_deref {
89     ($imp:ty, $target:ty) => {
90         impl ops::Deref for $imp {
91             type Target = $target;
92 
93             fn deref(&self) -> &$target {
94                 &self.inner
95             }
96         }
97 
98         impl ops::DerefMut for $imp {
99             fn deref_mut(&mut self) -> &mut $target {
100                 &mut self.inner
101             }
102         }
103     };
104 }
105 
106 /// Global SBRK-based allocator.
107 ///
108 /// This will extend the data segment whenever new memory is needed. Since this includes leaving
109 /// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is
110 /// local).
111 struct GlobalAllocator {
112     // The inner bookkeeper.
113     inner: Bookkeeper,
114 }
115 
116 impl GlobalAllocator {
117     /// Initialize the global allocator.
118     fn init() -> GlobalAllocator {
119         // The initial acquired segment.
120         let (aligner, initial_segment, excessive) =
121             brk::lock().canonical_brk(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
122 
123         // Initialize the new allocator.
124         let mut res = GlobalAllocator {
125             inner: Bookkeeper::new(unsafe {
126                 Vec::from_raw_parts(initial_segment, 0)
127             }),
128         };
129 
130         // Free the secondary space.
131         res.push(aligner);
132         res.push(excessive);
133 
134         res
135     }
136 }
137 
138 derive_deref!(GlobalAllocator, Bookkeeper);
139 
140 impl Allocator for GlobalAllocator {
141     #[inline]
142     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
143         // Obtain what you need.
144         let (alignment_block, res, excessive) = brk::lock().canonical_brk(size, align);
145 
146         // Add it to the list. This will not change the order, since the pointer is higher than all
147         // the previous blocks (BRK extends the data segment). Although, it is worth noting that
148         // the stack is higher than the program break.
149         self.push(alignment_block);
150         self.push(excessive);
151 
152         res
153     }
154 }
155 
156 /// A local allocator.
157 ///
158 /// This acquires memory from the upstream (global) allocator, which is protected by a `Mutex`.
159 #[cfg(feature = "tls")]
160 pub struct LocalAllocator {
161     // The inner bookkeeper.
162     inner: Bookkeeper,
163 }
164 
165 impl LocalAllocator {
166     /// Initialize the local allocator.
167     #[cfg(feature = "tls")]
168     fn init() -> LocalAllocator {
169         /// The destructor of the local allocator.
170         ///
171         /// This will simply free everything to the global allocator.
172         extern fn dtor(alloc: &ThreadLocalAllocator) {
173             // This is important! The thread destructors guarantee no other, and thus one could use the
174             // allocator _after_ this destructor have been finished. In fact, this is a real problem,
175             // and happens when using `Arc` and terminating the main thread, for this reason we place
176             // `None` as a permanent marker indicating that the allocator is deinitialized. After such
177             // a state is in place, all allocation calls will be redirected to the global allocator,
178             // which is of course still usable at this moment.
179             let alloc = alloc.replace(None).expect("Thread-local allocator is already freed.");
180 
181             // Lock the global allocator.
182             let mut global_alloc = GLOBAL_ALLOCATOR.lock();
183             let global_alloc = global_alloc.get();
184 
185             // TODO: we know this is sorted, so we could abuse that fact to faster insertion in the
186             // global allocator.
187 
188             alloc.into_inner().inner.for_each(move |block| global_alloc.free(block));
189         }
190 
191         // The initial acquired segment.
192         let initial_segment = GLOBAL_ALLOCATOR
193             .lock()
194             .get()
195             .alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
196 
197         unsafe {
198             // Register the thread destructor on the current thread.
199             THREAD_ALLOCATOR.register_thread_destructor(dtor)
200                 .expect("Unable to register a thread destructor.");
201 
202             LocalAllocator {
203                 inner: Bookkeeper::new(Vec::from_raw_parts(initial_segment, 0)),
204             }
205         }
206     }
207 
208     /// Shuld we memtrim this allocator?
209     ///
210     /// The idea is to free memory to the global allocator to unify small stubs and avoid
211     /// fragmentation and thread accumulation.
212     fn should_memtrim(&self) -> bool {
213         // TODO: Tweak this.
214 
215         /// The fragmentation scale constant.
216         ///
217         /// This is used for determining the minimum avarage block size before memtrimming.
218         const FRAGMENTATION_SCALE: usize = 10;
219         /// The local memtrim limit.
220         ///
221         /// Whenever an allocator has more free bytes than this value, it will be memtrimmed.
222         const LOCAL_MEMTRIM_LIMIT: usize = 16384;
223 
224         self.total_bytes() < FRAGMENTATION_SCALE * self.len() || self.total_bytes() > LOCAL_MEMTRIM_LIMIT
225     }
226 }
227 
228 #[cfg(feature = "tls")]
229 derive_deref!(LocalAllocator, Bookkeeper);
230 
231 #[cfg(feature = "tls")]
232 impl Allocator for LocalAllocator {
233     #[inline]
234     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
235         // Get the block from the global allocator. Please note that we cannot canonicalize `size`,
236         // due to freeing excessive blocks would change the order.
237         GLOBAL_ALLOCATOR.lock().get().alloc(size, align)
238     }
239 
240     #[inline]
241     fn on_new_memory(&mut self) {
242         if self.should_memtrim() {
243             // Lock the global allocator.
244             let mut global_alloc = GLOBAL_ALLOCATOR.lock();
245             let global_alloc = global_alloc.get();
246 
247             while let Some(block) = self.pop() {
248                 // Pop'n'free.
249                 global_alloc.free(block);
250 
251                 // Memtrim 'till we can't memtrim anymore.
252                 if !self.should_memtrim() { break; }
253             }
254         }
255     }
256 }
257 
258 /// Allocate a block of memory.
259 ///
260 /// # Errors
261 ///
262 /// The OOM handler handles out-of-memory conditions.
263 #[inline]
264 pub fn alloc(size: usize, align: usize) -> *mut u8 {
265     get_allocator!(|alloc| *Pointer::from(alloc.alloc(size, align)))
266 }
267 
268 /// Free a buffer.
269 ///
270 /// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
271 /// that it is not used after the free.
272 ///
273 /// # Important!
274 ///
275 /// You should only allocate buffers allocated through `ralloc`. Anything else is considered
276 /// invalid.
277 ///
278 /// # Errors
279 ///
280 /// The OOM handler handles out-of-memory conditions.
281 ///
282 /// # Safety
283 ///
284 /// Rust assume that the allocation symbols returns correct values. For this reason, freeing
285 /// invalid pointers might introduce memory unsafety.
286 ///
287 /// Secondly, freeing an used buffer can introduce use-after-free.
288 #[inline]
289 pub unsafe fn free(ptr: *mut u8, size: usize) {
290     get_allocator!(|alloc| alloc.free(Block::from_raw_parts(Pointer::new(ptr), size)))
291 }
292 
293 /// Reallocate memory.
294 ///
295 /// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
296 /// returned pointer with size `size`.
297 ///
298 /// # Important!
299 ///
300 /// You should only reallocate buffers allocated through `ralloc`. Anything else is considered
301 /// invalid.
302 ///
303 /// # Errors
304 ///
305 /// The OOM handler handles out-of-memory conditions.
306 ///
307 /// # Safety
308 ///
309 /// Due to being able to potentially memcpy an arbitrary buffer, as well as shrinking a buffer,
310 /// this is marked unsafe.
311 #[inline]
312 pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
313     get_allocator!(|alloc| {
314         *Pointer::from(alloc.realloc(
315             Block::from_raw_parts(Pointer::new(ptr), old_size),
316             size,
317             align
318         ))
319     })
320 }
321 
322 /// Try to reallocate the buffer _inplace_.
323 ///
324 /// In case of success, return the new buffer's size. On failure, return the old size.
325 ///
326 /// This can be used to shrink (truncate) a buffer as well.
327 ///
328 /// # Safety
329 ///
330 /// Due to being able to shrink (and thus free) the buffer, this is marked unsafe.
331 #[inline]
332 pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
333     get_allocator!(|alloc| {
334         if alloc.realloc_inplace(
335             Block::from_raw_parts(Pointer::new(ptr), old_size),
336             size
337         ).is_ok() {
338             Ok(())
339         } else {
340             Err(())
341         }
342     })
343 }
344