xref: /relibc/ralloc/src/allocator.rs (revision 9cef1dfa322dd22404c3e9ab33874296568bdcc9)
1 //! The global allocator.
2 //!
3 //! This contains primitives for the cross-thread allocator.
4 
5 use prelude::*;
6 
7 use core::{mem, ops};
8 
9 use {brk, tls, sync};
10 use bookkeeper::{self, Bookkeeper, Allocator};
11 
12 /// Alias for the wrapper type of the thread-local variable holding the local allocator.
13 type ThreadLocalAllocator = MoveCell<LazyInit<fn() -> LocalAllocator, LocalAllocator>>;
14 
15 /// The global default allocator.
16 // TODO remove these filthy function pointers.
17 static GLOBAL_ALLOCATOR: sync::Mutex<LazyInit<fn() -> GlobalAllocator, GlobalAllocator>> =
18     sync::Mutex::new(LazyInit::new(global_init));
19 tls! {
20     /// The thread-local allocator.
21     static THREAD_ALLOCATOR: ThreadLocalAllocator = MoveCell::new(LazyInit::new(local_init));
22 }
23 
24 /// Initialize the global allocator.
25 fn global_init() -> GlobalAllocator {
26     // The initial acquired segment.
27     let (aligner, initial_segment, excessive) =
28         brk::get(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
29 
30     // Initialize the new allocator.
31     let mut res = GlobalAllocator {
32         inner: Bookkeeper::new(unsafe {
33             Vec::from_raw_parts(initial_segment, 0)
34         }),
35     };
36 
37     // Free the secondary space.
38     res.push(aligner);
39     res.push(excessive);
40 
41     res
42 }
43 
44 /// Initialize the local allocator.
45 fn local_init() -> LocalAllocator {
46     /// The destructor of the local allocator.
47     ///
48     /// This will simply free everything to the global allocator.
49     extern fn dtor(alloc: &ThreadLocalAllocator) {
50         // This is important! If we simply moved out of the reference, we would end up with another
51         // dtor could use the value after-free. Thus we replace it by the `Unreachable` state,
52         // meaning that any request will result in panic.
53         let alloc = alloc.replace(LazyInit::unreachable());
54 
55         // Lock the global allocator.
56         // TODO dumb borrowck
57         let mut global_alloc = GLOBAL_ALLOCATOR.lock();
58         let global_alloc = global_alloc.get();
59 
60         // TODO, we know this is sorted, so we could abuse that fact to faster insertion in the
61         // global allocator.
62 
63         alloc.into_inner().inner.for_each(move |block| global_alloc.free(block));
64     }
65 
66     // The initial acquired segment.
67     let initial_segment = GLOBAL_ALLOCATOR
68         .lock()
69         .get()
70         .alloc(4 * bookkeeper::EXTRA_ELEMENTS * mem::size_of::<Block>(), mem::align_of::<Block>());
71 
72     unsafe {
73         THREAD_ALLOCATOR.register_thread_destructor(dtor).unwrap();
74 
75         LocalAllocator {
76             inner: Bookkeeper::new(Vec::from_raw_parts(initial_segment, 0)),
77         }
78     }
79 }
80 
81 /// Temporarily get the allocator.
82 ///
83 /// This is simply to avoid repeating ourself, so we let this take care of the hairy stuff.
84 fn get_allocator<T, F: FnOnce(&mut LocalAllocator) -> T>(f: F) -> T {
85     // Get the thread allocator.
86     THREAD_ALLOCATOR.with(|thread_alloc| {
87         // Just dump some placeholding initializer in the place of the TLA.
88         let mut thread_alloc_original = thread_alloc.replace(LazyInit::unreachable());
89 
90         // Call the closure involved.
91         let res = f(thread_alloc_original.get());
92 
93         // Put back the original allocator.
94         thread_alloc.replace(thread_alloc_original);
95 
96         res
97     })
98 }
99 
100 /// Derives `Deref` and `DerefMut` to the `inner` field.
101 ///
102 /// This requires importing `core::ops`.
103 macro_rules! derive_deref {
104     ($imp:ty, $target:ty) => {
105         impl ops::Deref for $imp {
106             type Target = $target;
107 
108             fn deref(&self) -> &$target {
109                 &self.inner
110             }
111         }
112 
113         impl ops::DerefMut for $imp {
114             fn deref_mut(&mut self) -> &mut $target {
115                 &mut self.inner
116             }
117         }
118     };
119 }
120 
121 /// Global SBRK-based allocator.
122 ///
123 /// This will extend the data segment whenever new memory is needed. Since this includes leaving
124 /// userspace, this shouldn't be used when other allocators are available (i.e. the bookkeeper is
125 /// local).
126 struct GlobalAllocator {
127     // The inner bookkeeper.
128     inner: Bookkeeper,
129 }
130 
131 derive_deref!(GlobalAllocator, Bookkeeper);
132 
133 impl Allocator for GlobalAllocator {
134     #[inline]
135     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
136         // Obtain what you need.
137         let (alignment_block, res, excessive) = brk::get(size, align);
138 
139         // Add it to the list. This will not change the order, since the pointer is higher than all
140         // the previous blocks (BRK extends the data segment). Although, it is worth noting that
141         // the stack is higher than the program break.
142         self.push(alignment_block);
143         self.push(excessive);
144 
145         res
146     }
147 }
148 
149 /// A local allocator.
150 ///
151 /// This acquires memory from the upstream (global) allocator, which is protected by a `Mutex`.
152 pub struct LocalAllocator {
153     // The inner bookkeeper.
154     inner: Bookkeeper,
155 }
156 
157 derive_deref!(LocalAllocator, Bookkeeper);
158 
159 impl Allocator for LocalAllocator {
160     #[inline]
161     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block {
162         // Get the block from the global allocator. Please note that we cannot canonicalize `size`,
163         // due to freeing excessive blocks would change the order.
164         GLOBAL_ALLOCATOR.lock().get().alloc(size, align)
165     }
166 }
167 
168 /// Allocate a block of memory.
169 ///
170 /// # Errors
171 ///
172 /// The OOM handler handles out-of-memory conditions.
173 #[inline]
174 pub fn alloc(size: usize, align: usize) -> *mut u8 {
175     get_allocator(|alloc| {
176         *Pointer::from(alloc.alloc(size, align))
177     })
178 }
179 
180 /// Free a buffer.
181 ///
182 /// Note that this do not have to be a buffer allocated through ralloc. The only requirement is
183 /// that it is not used after the free.
184 ///
185 /// # Important!
186 ///
187 /// You should only allocate buffers allocated through `ralloc`. Anything else is considered
188 /// invalid.
189 ///
190 /// # Errors
191 ///
192 /// The OOM handler handles out-of-memory conditions.
193 ///
194 /// # Safety
195 ///
196 /// Rust assume that the allocation symbols returns correct values. For this reason, freeing
197 /// invalid pointers might introduce memory unsafety.
198 ///
199 /// Secondly, freeing an used buffer can introduce use-after-free.
200 #[inline]
201 pub unsafe fn free(ptr: *mut u8, size: usize) {
202     get_allocator(|alloc| {
203         alloc.free(Block::from_raw_parts(Pointer::new(ptr), size))
204     });
205 }
206 
207 /// Reallocate memory.
208 ///
209 /// Reallocate the buffer starting at `ptr` with size `old_size`, to a buffer starting at the
210 /// returned pointer with size `size`.
211 ///
212 /// # Important!
213 ///
214 /// You should only reallocate buffers allocated through `ralloc`. Anything else is considered
215 /// invalid.
216 ///
217 /// # Errors
218 ///
219 /// The OOM handler handles out-of-memory conditions.
220 ///
221 /// # Safety
222 ///
223 /// Due to being able to potentially memcpy an arbitrary buffer, as well as shrinking a buffer,
224 /// this is marked unsafe.
225 #[inline]
226 pub unsafe fn realloc(ptr: *mut u8, old_size: usize, size: usize, align: usize) -> *mut u8 {
227     get_allocator(|alloc| {
228         *Pointer::from(alloc.realloc(
229             Block::from_raw_parts(Pointer::new(ptr), old_size),
230             size,
231             align
232         ))
233     })
234 }
235 
236 /// Try to reallocate the buffer _inplace_.
237 ///
238 /// In case of success, return the new buffer's size. On failure, return the old size.
239 ///
240 /// This can be used to shrink (truncate) a buffer as well.
241 ///
242 /// # Safety
243 ///
244 /// Due to being able to shrink (and thus free) the buffer, this is marked unsafe.
245 #[inline]
246 pub unsafe fn realloc_inplace(ptr: *mut u8, old_size: usize, size: usize) -> Result<(), ()> {
247     get_allocator(|alloc| {
248         if alloc.realloc_inplace(
249             Block::from_raw_parts(Pointer::new(ptr), old_size),
250             size
251         ).is_ok() {
252             Ok(())
253         } else {
254             Err(())
255         }
256     })
257 }
258