xref: /relibc/ralloc/src/bookkeeper.rs (revision 8b438f18237bdcff84eab983cc190767187c1b9a)
1 //! Memory bookkeeping.
2 
3 use prelude::*;
4 
5 use core::ops::Range;
6 use core::{ptr, mem, ops};
7 
8 /// Elements required _more_ than the length as capacity.
9 ///
10 /// This represents how many elements that are needed to conduct a `reserve` without the
11 /// stack overflowing, plus one (representing the new element):
12 ///
13 /// 1. Aligner.
14 /// 2. Excessive space.
15 /// 3. The old buffer.
16 /// 4. The pushed or inserted block.
17 ///
18 /// See assumption 4.
19 pub const EXTRA_ELEMENTS: usize = 4;
20 
21 #[cfg(feature = "alloc_id")]
22 use core::sync::atomic::{self, AtomicUsize};
23 /// The bookkeeper ID count.
24 ///
25 /// This is atomically incremented whenever a new `Bookkeeper` is created.
26 #[cfg(feature = "alloc_id")]
27 static BOOKKEEPER_ID_COUNTER: AtomicUsize = AtomicUsize::new(0);
28 
29 /// The memory bookkeeper.
30 ///
31 /// This stores data about the state of the allocator, and in particular, the free memory.
32 ///
33 /// The actual functionality is provided by [`Allocator`](./trait.Allocator.html).
34 pub struct Bookkeeper {
35     /// The internal block pool.
36     ///
37     /// Entries in the block pool can be "empty", meaning that you can overwrite the entry without
38     /// breaking consistency.
39     ///
40     /// # Assumptions
41     ///
42     /// Certain assumptions are made:
43     ///
44     /// 1. The list is always sorted with respect to the block's pointers.
45     /// 2. No two consecutive or empty block delimited blocks are adjacent, except if the right
46     ///    block is empty.
47     /// 3. There are no trailing empty blocks.
48     /// 4. The capacity is always `EXTRA_ELEMENTS` blocks more than the length (this is due to
49     ///    reallocation pushing at maximum two elements, so we reserve two or more extra to allow
50     ///    pushing one additional element without unbounded recursion).
51     ///
52     /// These are **not** invariants: If these assumpptions are not held, it will simply act strange
53     /// (e.g. logic bugs), but not memory unsafety.
54     pool: Vec<Block>,
55     /// Is this bookkeeper currently reserving?
56     ///
57     /// This is used to avoid unbounded metacircular reallocation (reservation).
58     ///
59     // TODO find a replacement for this "hack".
60     reserving: bool,
61     /// The allocator ID.
62     ///
63     /// This is simply to be able to distinguish allocators in the locks.
64     #[cfg(feature = "alloc_id")]
65     id: usize,
66 }
67 
68 impl Bookkeeper {
69     /// Create a new bookkeeper with some initial vector.
70     pub fn new(vec: Vec<Block>) -> Bookkeeper {
71         // Make sure the assumptions are satisfied.
72         debug_assert!(vec.capacity() >= EXTRA_ELEMENTS, "Not enough initial capacity of the vector.");
73         debug_assert!(vec.is_empty(), "Initial vector isn't empty.");
74 
75         // TODO, when added use expr field attributes
76         #[cfg(feature = "alloc_id")]
77         let res = Bookkeeper {
78             pool: vec,
79             reserving: false,
80             // Increment the ID counter to get a brand new ID.
81             id: BOOKKEEPER_ID_COUNTER.fetch_add(1, atomic::Ordering::SeqCst),
82         };
83         #[cfg(not(feature = "alloc_id"))]
84         let res = Bookkeeper {
85             pool: vec,
86             reserving: false,
87         };
88 
89         log!(res, "Bookkeeper created.");
90         res.check();
91 
92         res
93     }
94 
95     /// Perform a binary search to find the appropriate place where the block can be insert or is
96     /// located.
97     ///
98     /// It is guaranteed that no block left to the returned value, satisfy the above condition.
99     #[inline]
100     fn find(&mut self, block: &Block) -> usize {
101         // TODO optimize this function.
102         // Logging.
103         log!(self, "Searching (exact) for {:?}.", block);
104 
105         let ind = match self.pool.binary_search(block) {
106             Ok(x) | Err(x) => x,
107         };
108         let len = self.pool.len();
109 
110         // Move left.
111         ind - self.pool.iter_mut()
112             .rev()
113             .skip(len - ind)
114             .take_while(|x| x.is_empty())
115             .count()
116     }
117 
118     /// Perform a binary search to find the appropriate bound where the block can be insert or is
119     /// located.
120     ///
121     /// It is guaranteed that no block left to the returned value, satisfy the above condition.
122     #[inline]
123     fn find_bound(&mut self, block: &Block) -> Range<usize> {
124         // TODO optimize this function.
125         // Logging.
126         log!(self, "Searching (bounds) for {:?}.", block);
127 
128         let mut left_ind = match self.pool.binary_search(block) {
129             Ok(x) | Err(x) => x,
130         };
131 
132         let len = self.pool.len();
133 
134         // Move left.
135         left_ind -= self.pool.iter_mut()
136             .rev()
137             .skip(len - left_ind)
138             .take_while(|x| x.is_empty())
139             .count();
140 
141         let mut right_ind = match self.pool.binary_search(&block.empty_right()) {
142             Ok(x) | Err(x) => x,
143         };
144 
145         // Move right.
146         right_ind += self.pool.iter()
147             .skip(right_ind)
148             .take_while(|x| x.is_empty())
149             .count();
150 
151         left_ind..right_ind
152     }
153 
154     /// Go over every block in the allocator and call some function.
155     ///
156     /// Technically, this could be done through an iterator, but this, more unidiomatic, way is
157     /// slightly faster in some cases.
158     pub fn for_each<F: FnMut(Block)>(mut self, mut f: F) {
159         // Logging.
160         log!(self, "Iterating over the blocks of the bookkeeper...");
161 
162         // Run over all the blocks in the pool.
163         while let Some(i) = self.pool.pop() {
164             f(i);
165         }
166 
167         // Take the block holding the pool.
168         f(Block::from(self.pool));
169     }
170 
171     /// Perform consistency checks.
172     ///
173     /// This will check for the following conditions:
174     ///
175     /// 1. The list is sorted.
176     /// 2. No blocks are adjacent.
177     ///
178     /// This is NOOP in release mode.
179     fn check(&self) {
180         if cfg!(debug_assertions) {
181             // Logging.
182             log!(self, "Checking...");
183 
184             // Reverse iterator over the blocks.
185             let mut it = self.pool.iter().enumerate().rev();
186 
187             // Check that the capacity is large enough.
188             assert!(self.reserving || self.pool.len() + EXTRA_ELEMENTS <= self.pool.capacity(),
189                     "The capacity should be at least {} more than the length of the pool.",
190                     EXTRA_ELEMENTS);
191 
192             if let Some((_, x)) = it.next() {
193                 // Make sure there are no leading empty blocks.
194                 assert!(!x.is_empty(), "The leading block is empty.");
195 
196                 let mut next = x;
197                 for (n, i) in it {
198                     // Check if sorted.
199                     assert!(next >= i, "The block pool is not sorted at index, {} ({:?} < {:?}).",
200                             n, next, i);
201                     // Make sure no blocks are adjacent.
202                     assert!(!i.left_to(next) || i.is_empty(), "Adjacent blocks at index, {} ({:?} and \
203                             {:?})", n, i, next);
204                     // Make sure an empty block has the same address as its right neighbor.
205                     assert!(!i.is_empty() || i == next, "Empty block not adjacent to right neighbor \
206                             at index {} ({:?} and {:?})", n, i, next);
207 
208                     // Set the variable tracking the previous block.
209                     next = i;
210                 }
211 
212                 // Check for trailing empty blocks.
213                 assert!(!self.pool.last().unwrap().is_empty(), "Trailing empty blocks.");
214             }
215         }
216     }
217 }
218 
219 /// An allocator.
220 ///
221 /// This provides the functionality of the memory bookkeeper, requiring only provision of two
222 /// methods, defining the "breaker" (fresh allocator). The core functionality is provided by
223 /// default methods, which aren't generally made to be overwritten.
224 ///
225 /// The reason why these methods aren't implemented directly on the bookkeeper is the distinction
226 /// between different forms of allocators (global, local, and so on). Any newtype of
227 /// [`Bookkeeper`](./struct.Bookkeeper.html).
228 ///
229 /// # Guarantees vs. assumptions
230 ///
231 /// Please note that whenever a guarantee is mentioned, it relies on that the all the methods
232 /// overwritten are upholding the guarantees specified in the documentation.
233 pub trait Allocator: ops::DerefMut<Target = Bookkeeper> {
234     /// Allocate _fresh_ space.
235     ///
236     /// "Fresh" means that the space is allocated through some breaker (be it SBRK or the global
237     /// allocator).
238     ///
239     /// The returned pointer is assumed to be aligned to `align`. If this is not held, all future
240     /// guarantees are invalid.
241     ///
242     /// # Assumptions
243     ///
244     /// This is assumed to not modify the order. If some block `b` is associated with index `i`
245     /// prior to call of this function, it should be too after it.
246     fn alloc_fresh(&mut self, size: usize, align: usize) -> Block;
247 
248     /// Allocate a chunk of memory.
249     ///
250     /// This function takes a size and an alignment. From these a fitting block is found, to which
251     /// a pointer is returned. The block returned is guaranteed to be aligned to `align`.
252     ///
253     /// # Example
254     ///
255     /// We start with our initial segment.
256     ///
257     /// ```notrust
258     ///    Address space
259     ///   I---------------------------------I
260     /// B
261     /// l
262     /// k
263     /// s
264     /// ```
265     ///
266     /// We then split it at the aligner, which is used for making sure that
267     /// the pointer is aligned properly.
268     ///
269     /// ```notrust
270     ///    Address space
271     ///   I------I
272     /// B   ^    I--------------------------I
273     /// l  al
274     /// k
275     /// s
276     /// ```
277     ///
278     /// We then use the remaining block, but leave the excessive space.
279     ///
280     /// ```notrust
281     ///    Address space
282     ///   I------I
283     /// B                           I--------I
284     /// l        \_________________/
285     /// k        our allocated block.
286     /// s
287     /// ```
288     ///
289     /// A block representing the marked area is then returned.
290     fn alloc(&mut self, size: usize, align: usize) -> Block {
291         // TODO: scan more intelligently.
292 
293         // Logging.
294         log!(self, "Allocating {} bytes with alignment {}.", size, align);
295 
296         if let Some((n, b)) = self.pool.iter_mut().enumerate().filter_map(|(n, i)| {
297             if i.size() >= size {
298                 // Try to split at the aligner.
299                 i.align(align).and_then(|(mut a, mut b)| {
300                     if b.size() >= size {
301                         // Override the old block.
302                         *i = a;
303                         Some((n, b))
304                     } else {
305                         // Put the split block back together and place it back in its spot.
306                         a.merge_right(&mut b).expect("Unable to merge block right.");
307                         *i = a;
308                         None
309                     }
310                 })
311             } else {
312                 None
313             }
314         }).next() {
315             if self.pool[n].is_empty() {
316                 // For empty alignment invariant.
317                 let _ = self.remove_at(n);
318             }
319 
320             let (res, excessive) = b.split(size);
321 
322             // Mark the excessive space as free.
323             // There are many corner cases that make knowing where to insert it difficult
324             // so we search instead.
325             let bound = self.find_bound(&excessive);
326             self.free_bound(bound, excessive);
327 
328             // Check consistency.
329             self.check();
330             debug_assert!(res.aligned_to(align), "Alignment failed.");
331             debug_assert!(res.size() == size, "Requested space does not match with the returned \
332                           block.");
333 
334             res
335         } else {
336             // No fitting block found. Allocate a new block.
337             self.alloc_external(size, align)
338         }
339     }
340 
341     /// Free a memory block.
342     ///
343     /// After this have been called, no guarantees are made about the passed pointer. If it want
344     /// to, it could begin shooting laser beams.
345     ///
346     /// Freeing an invalid block will drop all future guarantees about this bookkeeper.
347     ///
348     /// # Example
349     ///
350     /// ```notrust
351     ///    Address space
352     ///   I------I
353     /// B                                  I--------I
354     /// l        \_________________/
355     /// k     the used block we want to deallocate.
356     /// s
357     /// ```
358     ///
359     /// If the blocks are adjacent, we merge them:
360     ///
361     /// ```notrust
362     ///    Address space
363     ///   I------I
364     /// B        I-----------------I
365     /// l                                  I--------I
366     /// k
367     /// s
368     /// ```
369     ///
370     /// This gives us:
371     ///
372     /// ```notrust
373     ///    Address space
374     ///   I------------------------I
375     /// B                                  I--------I
376     /// l
377     /// k
378     /// s
379     /// ```
380     ///
381     /// And we're done. If it cannot be done, we insert the block, while keeping the list sorted.
382     /// See [`insert`](#method.insert) for details.
383     #[inline]
384     fn free(&mut self, block: Block) {
385         // Just logging for the unlucky people debugging this shit. No problem.
386         log!(self, "Freeing {:?}...", block);
387 
388         // Binary search for the block.
389         let bound = self.find_bound(&block);
390 
391         // Free the given block.
392         self.free_bound(bound, block);
393     }
394 
395     /// Reallocate memory.
396     ///
397     /// If necessary (inplace reallocation is not possible or feasible) it will allocate a new
398     /// buffer, fill it with the contents of the old buffer, and deallocate the replaced buffer.
399     ///
400     /// The following guarantees are made:
401     ///
402     /// 1. The returned block is valid and aligned to `align`.
403     /// 2. The returned block contains the same data byte-for-byte as the original buffer.
404     ///
405     /// The data will be truncated if `new_size` is smaller than `block`'s size.
406     ///
407     /// # Example
408     ///
409     /// We will first try to perform an in-place reallocation, and if that fails, we will use
410     /// memmove.
411     ///
412     /// ```notrust
413     ///    Address space
414     ///   I------I
415     /// B \~~~~~~~~~~~~~~~~~~~~~/
416     /// l     needed
417     /// k
418     /// s
419     /// ```
420     ///
421     /// We simply find the block next to our initial block. If this block is free and have
422     /// sufficient size, we will simply merge it into our initial block, and leave the excessive
423     /// space as free. If these conditions are not met, we have to allocate a new list, and then
424     /// deallocate the old one, after which we use memmove to copy the data over to the newly
425     /// allocated list.
426     fn realloc(&mut self, block: Block, new_size: usize, align: usize) -> Block {
427         // Find the index bound.
428         let ind = self.find_bound(&block);
429 
430         // Logging.
431         log!(self;ind, "Reallocating {:?} to size {} with align {}...", block, new_size, align);
432 
433         // Try to do an inplace reallocation.
434         match self.realloc_inplace_bound(ind, block, new_size) {
435             Ok(block) => block,
436             Err(block) => {
437                 // Reallocation cannot be done inplace.
438 
439                 // Allocate a new block with the same size.
440                 let mut res = self.alloc(new_size, align);
441 
442                 // Copy the old data to the new location.
443                 block.copy_to(&mut res);
444 
445                 // Free the old block.
446                 // Allocation may have moved insertion so we search again.
447                 let bound = self.find_bound(&block);
448                 self.free_bound(bound, block);
449 
450                 // Check consistency.
451                 self.check();
452                 debug_assert!(res.aligned_to(align), "Alignment failed.");
453                 debug_assert!(res.size() >= new_size, "Requested space does not match with the \
454                               returned block.");
455 
456                 res
457             },
458         }
459     }
460 
461     /// Extend/shrink the buffer inplace.
462     ///
463     /// This will try to extend the buffer without copying, if the new size is larger than the old
464     /// one. If not, truncate the block and place it back to the pool.
465     ///
466     /// On failure, return `Err(Block)` with the old _intact_ block. Shrinking cannot fail.
467     ///
468     /// This shouldn't be used when the index of insertion is known, since this performs an binary
469     /// search to find the blocks index. When you know the index use
470     /// [`realloc_inplace_bound`](#method.realloc_inplace_bound.html).
471     #[inline]
472     fn realloc_inplace(&mut self, block: Block, new_size: usize) -> Result<Block, Block> {
473         // Logging.
474         log!(self, "Reallocating {:?} inplace to {}...", block, new_size);
475 
476         // Find the bounds of given block.
477         let bound = self.find_bound(&block);
478 
479         // Go for it!
480         let res = self.realloc_inplace_bound(bound, block, new_size);
481 
482         // Check consistency.
483         debug_assert!(res.as_ref().ok().map_or(true, |x| x.size() == new_size), "Requested space \
484                       does not match with the returned block.");
485 
486         res
487     }
488 
489     /// Reallocate a block on a know index bound inplace.
490     ///
491     /// See [`realloc_inplace`](#method.realloc_inplace.html) for more information.
492     fn realloc_inplace_bound(&mut self, ind: Range<usize>, mut block: Block, new_size: usize) -> Result<Block, Block> {
493         // Logging.
494         log!(self;ind, "Try inplace reallocating {:?} to size {}.", block, new_size);
495 
496         /// Assertions...
497         debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
498                       index.");
499 
500         if new_size <= block.size() {
501             // Shrink the block.
502             log!(self;ind, "Shrinking {:?}.", block);
503 
504             // Split the block in two segments, the main segment and the excessive segment.
505             let (block, excessive) = block.split(new_size);
506             // Free the excessive segment.
507             self.free_bound(ind, excessive);
508 
509             // Make some assertions to avoid dumb bugs.
510             debug_assert!(block.size() == new_size, "Block wasn't shrinked properly.");
511 
512             // Run a consistency check.
513             self.check();
514 
515             return Ok(block);
516 
517             // We check if `ind` is the end of the array.
518         } else {
519             let mut mergable = false;
520             if let Some(entry) = self.pool.get_mut(ind.end) {
521                 mergable = entry.size() + block.size() >= new_size && block.left_to(entry);
522             }
523             // Note that we are sure that no segments in the array are adjacent (unless they have size
524             // 0). This way we know that we will, at maximum, need one and only one block for extending
525             // the current block.
526             if mergable {
527                 // Logging...
528                 log!(self;ind, "Merging {:?} to the right.", block);
529 
530                 // We'll merge it with the block at the end of the range.
531                 block.merge_right(&mut self.remove_at(ind.end))
532                     .expect("Unable to merge block right, to the end of the range.");
533                 // Merge succeeded.
534 
535                 // Place the excessive block back.
536                 let (res, excessive) = block.split(new_size);
537                 // Remove_at may have shortened the vector.
538                 if ind.start == self.pool.len() {
539                     self.push(excessive);
540                 } else if !excessive.is_empty() {
541                     self.pool[ind.start] = excessive;
542                 }
543                 // Block will still not be adjacent, due to `excessive` being guaranteed to not be
544                 // adjacent to the next block.
545 
546                 // Run a consistency check.
547                 self.check();
548 
549                 // TODO, drop excessive space
550                 return Ok(res);
551             }
552         }
553 
554         Err(block)
555     }
556 
557     /// Free a block placed in some index bound.
558     ///
559     /// This will at maximum insert one element.
560     ///
561     /// See [`free`](#method.free) for more information.
562     #[inline]
563     fn free_bound(&mut self, ind: Range<usize>, mut block: Block) {
564         // Logging.
565         log!(self;ind, "Freeing {:?}.", block);
566 
567         // Short circuit in case of empty block.
568         if block.is_empty() { return; }
569 
570         // When compiled with `security`, we zero this block.
571         block.sec_zero();
572 
573         if ind.start == self.pool.len() {
574             self.push(block);
575             return;
576         }
577 
578         // Assertions...
579         debug_assert!(self.find(&block) == ind.start, "Block is not inserted at the appropriate \
580                       index.");
581 
582         // Try to merge it with the block to the right.
583         if ind.end < self.pool.len() && block.left_to(&self.pool[ind.end]) {
584             // Merge the block with the rightmost block in the range.
585             block.merge_right(&mut self.remove_at(ind.end))
586                 .expect("Unable to merge block right to the block at the end of the range");
587 
588             // The merging succeeded. We proceed to try to close in the possible gap.
589             if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
590                 self.check();
591                 return;
592             }
593         // Dammit, let's try to merge left.
594         } else if ind.start != 0 && self.pool[ind.start - 1].merge_right(&mut block).is_ok() {
595             // Check consistency.
596             self.check();
597 
598             return;
599         }
600 
601         // Well, it failed, so we insert it the old-fashioned way.
602         self.insert(ind.start, block);
603 
604         // Check consistency.
605         self.check();
606     }
607 
608     /// Allocate external ("fresh") space.
609     ///
610     /// "Fresh" means that the space is allocated through the breaker.
611     ///
612     /// The returned pointer is guaranteed to be aligned to `align`.
613     fn alloc_external(&mut self, size: usize, align: usize) -> Block {
614         // Logging.
615         log!(self, "Fresh allocation of size {} with alignment {}.", size, align);
616 
617         // Break it to me!
618         let res = self.alloc_fresh(size, align);
619 
620         // Check consistency.
621         self.check();
622 
623         res
624     }
625 
626     /// Push an element without reserving.
627     fn push(&mut self, mut block: Block) {
628         // Logging.
629         log!(self;self.pool.len(), "Pushing {:?}.", block);
630 
631         // Short-circuit in case on empty block.
632         if !block.is_empty() {
633             // Some assertions...
634             debug_assert!(self.pool.is_empty() || &block > self.pool.last().unwrap(), "Pushing will \
635                           make the list unsorted.");
636 
637             // We will try to simply merge it with the last block.
638             if let Some(x) = self.pool.last_mut() {
639                 if x.merge_right(&mut block).is_ok() {
640                     return;
641                 }
642             }
643 
644             // Reserve space and free the old buffer.
645             if let Some(x) = unborrow!(self.reserve(self.pool.len() + 1)) {
646                 self.free(x);
647             }
648 
649 
650             // Merging failed. Note that trailing empty blocks are not allowed, hence the last block is
651             // the only non-empty candidate which may be adjacent to `block`.
652 
653             // We push.
654             let res = self.pool.push(block);
655 
656             // Make some assertions.
657             debug_assert!(res.is_ok(), "Push failed (buffer full).");
658         }
659 
660         // Check consistency.
661         self.check();
662     }
663 
664     /// Reserve some number of elements, and return the old buffer's block.
665     ///
666     /// # Assumptions
667     ///
668     /// This is assumed to not modify the order. If some block `b` is associated with index `i`
669     /// prior to call of this function, it should be too after it.
670     fn reserve(&mut self, min_cap: usize) -> Option<Block> {
671         // Logging.
672         log!(self;min_cap, "Reserving {}.", min_cap);
673 
674         if !self.reserving && (self.pool.capacity() < self.pool.len() + EXTRA_ELEMENTS || self.pool.capacity() < min_cap + EXTRA_ELEMENTS) {
675             // Reserve a little extra for performance reasons.
676             let new_cap = (min_cap + EXTRA_ELEMENTS) * 2 + 16;
677 
678             // Catch 'em all.
679             debug_assert!(new_cap > self.pool.capacity(), "Reserve shrinks?!");
680 
681             // Make sure no unbounded reallocation happens.
682             self.reserving = true;
683 
684             // Break it to me!
685             let new_buf = self.alloc_external(new_cap * mem::size_of::<Block>(), mem::align_of::<Block>());
686 
687             // Go back to the original state.
688             self.reserving = false;
689 
690             Some(self.pool.refill(new_buf))
691         } else {
692             None
693         }
694     }
695 
696     /// Insert a block entry at some index.
697     ///
698     /// If the space is non-empty, the elements will be pushed filling out the empty gaps to the
699     /// right.
700     ///
701     /// # Warning
702     ///
703     /// This might in fact break the order.
704     ///
705     /// # Panics
706     ///
707     /// Panics on when `ind` is greater than the block pool's length.
708     ///
709     /// # Example
710     ///
711     /// We want to insert the block denoted by the tildes into our list. Perform a binary search to
712     /// find where insertion is appropriate.
713     ///
714     /// ```notrust
715     ///    Address space
716     ///   I------I
717     /// B < here                      I--------I
718     /// l                                              I------------I
719     /// k
720     /// s                                                             I---I
721     ///                  I~~~~~~~~~~I
722     /// ```
723     ///
724     /// We keep pushing the blocks to the right to the next entry until a empty entry is reached:
725     ///
726     /// ```notrust
727     ///    Address space
728     ///   I------I
729     /// B < here                      I--------I <~ this one cannot move down, due to being blocked.
730     /// l
731     /// k                                              I------------I <~ thus we have moved this one down.
732     /// s                                                             I---I
733     ///              I~~~~~~~~~~I
734     /// ```
735     ///
736     /// Repeating yields:
737     ///
738     /// ```notrust
739     ///    Address space
740     ///   I------I
741     /// B < here
742     /// l                             I--------I <~ this one cannot move down, due to being blocked.
743     /// k                                              I------------I <~ thus we have moved this one down.
744     /// s                                                             I---I
745     ///              I~~~~~~~~~~I
746     /// ```
747     ///
748     /// Now an empty space is left out, meaning that we can insert the block:
749     ///
750     /// ```notrust
751     ///    Address space
752     ///   I------I
753     /// B            I----------I
754     /// l                             I--------I
755     /// k                                              I------------I
756     /// s                                                             I---I
757     /// ```
758     ///
759     /// The insertion is now completed.
760     #[inline]
761     fn insert(&mut self, ind: usize, block: Block) {
762         // Logging.
763         log!(self;ind, "Inserting block {:?}...", block);
764 
765         // Bound check.
766         assert!(self.pool.len() >= ind, "Insertion out of bounds.");
767 
768         // Some assertions...
769         debug_assert!(self.pool.len() <= ind || block <= self.pool[ind], "Inserting at {} will make \
770                       the list unsorted.", ind);
771         debug_assert!(self.find(&block) == ind, "Block is not inserted at the appropriate index.");
772         debug_assert!(!block.is_empty(), "Inserting an empty block.");
773 
774         // Find the next gap, where a used block were.
775         let gap = self.pool
776             .iter()
777             .enumerate()
778             // We only check _after_ the index.
779             .skip(ind)
780             // Until the block is empty.
781             .filter(|&(_, x)| x.is_empty())
782             .next()
783             .map(|(n, _)| n);
784 
785         // Log the operation.
786         log!(self;ind, "Moving all blocks right to {} blocks to the right.",
787              gap.unwrap_or_else(|| self.pool.len()));
788 
789         // The old vector's buffer.
790         let mut old_buf = None;
791 
792         // We will only extend the length if we were unable to fit it into the current length.
793         if gap.is_none() {
794             // Loooooooging...
795             log!(self;ind, "Block pool not long enough for shift. Extending.");
796 
797             // Reserve space. This does not break order, due to the assumption that
798             // `reserve` never breaks order.
799             old_buf = unborrow!(self.reserve(self.pool.len() + 1));
800 
801             // We will move a block into reserved memory but outside of the vec's bounds. For
802             // that reason, we push an uninitialized element to extend the length, which will
803             // be assigned in the memcpy.
804             let res = self.pool.push(unsafe { mem::uninitialized() });
805 
806             // Just some assertions...
807             debug_assert!(res.is_ok(), "Push failed (buffer full).");
808         }
809 
810         unsafe {
811             // Memmove the elements to make a gap to the new block.
812             ptr::copy(self.pool.get_unchecked(ind) as *const Block,
813                       self.pool.get_unchecked_mut(ind + 1) as *mut Block,
814                       // The gap defaults to the end of the pool.
815                       gap.unwrap_or_else(|| self.pool.len() - 1) - ind);
816                       //                                    ^^^
817                       // We decrement to account for the push. Please note how we only push in the
818                       // `if` block above with the conditional that `gap` is `None`, which is the
819                       // case where the closure is evaluated.
820 
821             // Set the element.
822             ptr::write(self.pool.get_unchecked_mut(ind), block);
823         }
824 
825         // Free the old buffer, if it exists.
826         if let Some(block) = old_buf {
827             self.free(block);
828         }
829 
830         // Check consistency.
831         self.check();
832     }
833 
834     /// Remove a block.
835     fn remove_at(&mut self, ind: usize) -> Block {
836         // Logging.
837         log!(self;ind, "Removing block.");
838 
839         if ind + 1 == self.pool.len() {
840             let res = self.pool[ind].pop();
841             // Make sure there are no trailing empty blocks.
842             let new_len = self.pool.len() - self.pool.iter().rev().take_while(|x| x.is_empty()).count();
843 
844             // Truncate the vector.
845             self.pool.truncate(new_len);
846             res
847         } else {
848             // Calculate the upper and lower bound
849             let empty = self.pool[ind + 1].empty_left();
850             let empty2 = empty.empty_left();
851 
852             // Replace the block at `ind` with the left empty block from `ind + 1`.
853             let res = mem::replace(&mut self.pool[ind], empty);
854 
855             // Iterate over the pool from `ind` and down.
856             let skip = self.pool.len() - ind;
857             for place in self.pool.iter_mut().rev().skip(skip).take_while(|x| x.is_empty()) {
858                 // Empty the blocks.
859                 *place = empty2.empty_left();
860             }
861 
862             res
863         }
864     }
865 }
866