xref: /relibc/ralloc/src/brk.rs (revision 5a42b783d46b59c625e781942fbd5bf048a4ac89)
1 //! BRK abstractions.
2 //!
3 //! This module provides safe abstractions over BRK.
4 
5 use prelude::*;
6 
7 use core::ptr;
8 use core::convert::TryInto;
9 
10 use shim::{syscalls, config};
11 
12 use {sync, fail};
13 
14 /// The BRK mutex.
15 ///
16 /// This is used for avoiding data races in multiple allocator.
17 static BRK_MUTEX: Mutex<BrkState> = Mutex::new(BrkState {
18     current_brk: None,
19 });
20 
21 /// A cache of the BRK state.
22 ///
23 /// To avoid keeping asking the OS for information whenever needed, we cache it.
24 struct BrkState {
25     /// The program break's end
26     current_brk: Option<Pointer<u8>>,
27 }
28 
29 /// A BRK lock.
30 pub struct BrkLock {
31     /// The inner lock.
32     state: sync::MutexGuard<'static, BrkState>,
33 }
34 
35 impl BrkLock {
36     /// Extend the program break.
37     ///
38     /// # Safety
39     ///
40     /// Due to being able shrink the program break, this method is unsafe.
41     unsafe fn sbrk(&mut self, size: isize) -> Result<Pointer<u8>, ()> {
42         log!(NOTE, "Incrementing the program break by {} bytes.", size);
43 
44         // Calculate the new program break. To avoid making multiple syscalls, we make use of the
45         // state cache.
46         let expected_brk = self.current_brk().offset(size);
47 
48         // Break it to me, babe!
49         let old_brk = Pointer::new(syscalls::brk(*expected_brk as *const u8) as *mut u8);
50 
51         /// AAAARGH WAY TOO MUCH LOGGING
52         ///
53         /// No, sweetie. Never too much logging.
54         ///
55         /// REEEEEEEEEEEEEEEEEEEEEE
56         log!(INTERNAL, "Program break set.");
57 
58         if expected_brk == old_brk {
59             // Update the program break cache.
60             self.state.current_brk = Some(expected_brk.clone());
61 
62             // Return the old break.
63             Ok(old_brk)
64         } else {
65             // BRK failed. This syscall is rather weird, but whenever it fails (e.g. OOM) it
66             // returns the old (unchanged) break.
67             Err(())
68         }
69     }
70 
71     /// Safely release memory to the OS.
72     ///
73     /// If failed, we return the memory.
74     #[allow(cast_possible_wrap)]
75     pub fn release(&mut self, block: Block) -> Result<(), Block> {
76         // Check if we are actually next to the program break.
77         if self.current_brk() == Pointer::from(block.empty_right()) {
78             // Logging...
79             log!(DEBUG, "Releasing {:?} to the OS.", block);
80 
81             // We are. Now, sbrk the memory back. Do to the condition above, this is safe.
82             let res = unsafe {
83                 // LAST AUDIT: 2016-08-21 (Ticki).
84 
85                 // Note that the end of the block is addressable, making the size as well. For this
86                 // reason the first bit is unset and the cast will never wrap.
87                 self.sbrk(-(block.size() as isize))
88             };
89 
90             // In debug mode, we want to check for WTF-worthy scenarios.
91             debug_assert!(res.is_ok(), "Failed to set the program break back.");
92 
93             Ok(())
94         } else {
95             // Logging...
96             log!(DEBUG, "Unable to release {:?} to the OS.", block);
97 
98             // Return the block back.
99             Err(block)
100         }
101     }
102 
103     /// Get the current program break.
104     ///
105     /// If not available in the cache, requested it from the OS.
106     fn current_brk(&mut self) -> Pointer<u8> {
107         if let Some(ref cur) = self.state.current_brk {
108             let res = cur.clone();
109             // Make sure that the break is set properly (i.e. there is no libc interference).
110             debug_assert!(res == current_brk(), "The cached program break is out of sync with the \
111                           actual program break. Are you interfering with BRK? If so, prefer the \
112                           provided 'sbrk' instead, then.");
113 
114             return res;
115         }
116 
117         // TODO: Damn it, borrowck.
118         // Get the current break.
119         let cur = current_brk();
120         self.state.current_brk = Some(cur.clone());
121 
122         cur
123     }
124 
125     /// BRK new space.
126     ///
127     /// The first block represents the aligner segment (that is the precursor aligning the middle
128     /// block to `align`), the second one is the result and is of exactly size `size`. The last
129     /// block is the excessive space.
130     ///
131     /// # Failure
132     ///
133     /// This method calls the OOM handler if it is unable to acquire the needed space.
134     // TODO: This method is possibly unsafe.
135     pub fn canonical_brk(&mut self, size: usize, align: usize) -> (Block, Block, Block) {
136         // Calculate the canonical size (extra space is allocated to limit the number of system calls).
137         let brk_size = size + config::extra_brk(size) + align;
138 
139         // Use SBRK to allocate extra data segment. The alignment is used as precursor for our
140         // allocated block. This ensures that it is properly memory aligned to the requested value.
141         // TODO: Audit the casts.
142         let (alignment_block, rest) = unsafe {
143             // LAST AUDIT: 2016-08-21 (Ticki).
144 
145             Block::from_raw_parts(
146                 // Important! The conversion is failable to avoid arithmetic overflow-based
147                 // attacks.
148                 self.sbrk(brk_size.try_into().unwrap()).unwrap_or_else(|()| fail::oom()),
149                 brk_size,
150             )
151         }.align(align).unwrap();
152 
153         // Split the block to leave the excessive space.
154         let (res, excessive) = rest.split(size);
155 
156         // Make some assertions.
157         debug_assert!(res.aligned_to(align), "Alignment failed.");
158         debug_assert!(res.size() + alignment_block.size() + excessive.size() == brk_size, "BRK memory leak.");
159 
160         (alignment_block, res, excessive)
161     }
162 }
163 
164 /// Lock the BRK lock to allow manipulating the program break.
165 pub fn lock() -> BrkLock {
166     BrkLock {
167         state: BRK_MUTEX.lock(),
168     }
169 }
170 
171 /// `SBRK` symbol which can coexist with the allocator.
172 ///
173 /// `SBRK`-ing directly (from the `BRK` syscall or libc) might make the state inconsistent. This
174 /// function makes sure that's not happening.
175 ///
176 /// With the exception of being able to coexist, it follows the same rules. Refer to the relevant
177 /// documentation.
178 ///
179 /// # Failure
180 ///
181 /// On failure the maximum pointer (`!0 as *mut u8`) is returned.
182 pub unsafe extern fn sbrk(size: isize) -> *mut u8 {
183     *lock().sbrk(size).unwrap_or_else(|()| Pointer::new(!0 as *mut u8))
184 }
185 
186 /// Get the current program break.
187 fn current_brk() -> Pointer<u8> {
188     unsafe {
189         // LAST AUDIT: 2016-08-21 (Ticki).
190 
191         Pointer::new(syscalls::brk(ptr::null()) as *mut u8)
192     }
193 }
194 
195 #[cfg(test)]
196 mod test {
197     use super::*;
198 
199     #[test]
200     fn test_ordered() {
201         let brk = lock().canonical_brk(20, 1);
202 
203         assert!(brk.0 <= brk.1);
204         assert!(brk.1 <= brk.2);
205     }
206 
207     #[test]
208     fn test_brk_grow_up() {
209         unsafe {
210             let brk1 = lock().sbrk(5).unwrap();
211             let brk2 = lock().sbrk(100).unwrap();
212 
213             assert!(*brk1 < *brk2);
214         }
215     }
216 }
217