Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit f228ae1

Browse files
committedMar 9, 2020
Use AllocRef for ZSTs in RawVec
1 parent f943349 commit f228ae1

File tree

2 files changed

+257
-335
lines changed

2 files changed

+257
-335
lines changed
 

‎src/liballoc/raw_vec.rs

Lines changed: 256 additions & 335 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
use core::cmp;
55
use core::mem;
66
use core::ops::Drop;
7-
use core::ptr::{self, NonNull, Unique};
7+
use core::ptr::Unique;
88
use core::slice;
99

1010
use crate::alloc::{handle_alloc_error, AllocErr, AllocRef, Global, Layout};
@@ -19,28 +19,18 @@ mod tests;
1919
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
2020
/// In particular:
2121
///
22-
/// * Produces `Unique::empty()` on zero-sized types.
23-
/// * Produces `Unique::empty()` on zero-length allocations.
2422
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
2523
/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
2624
/// * Guards against overflowing your length.
27-
/// * Aborts on OOM or calls `handle_alloc_error` as applicable.
28-
/// * Avoids freeing `Unique::empty()`.
25+
/// * Calls `handle_alloc_error` for fallible allocations.
2926
/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
3027
///
3128
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
3229
/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
3330
/// to handle the actual things *stored* inside of a `RawVec`.
3431
///
35-
/// Note that a `RawVec` always forces its capacity to be `usize::MAX` for zero-sized types.
36-
/// This enables you to use capacity-growing logic catch the overflows in your length
37-
/// that might occur with zero-sized types.
38-
///
39-
/// The above means that you need to be careful when round-tripping this type with a
40-
/// `Box<[T]>`, since `capacity()` won't yield the length. However, `with_capacity`,
41-
/// `shrink_to_fit`, and `from_box` will actually set `RawVec`'s private capacity
42-
/// field. This allows zero-sized types to not be special-cased by consumers of
43-
/// this type.
32+
/// Note that a `RawVec` always returns a capacity of `usize::MAX` for zero-sized types. Beside
33+
/// this, zero-sized types are handled like non-zero-sized types.
4434
#[allow(missing_debug_implementations)]
4535
pub struct RawVec<T, A: AllocRef = Global> {
4636
ptr: Unique<T>,
@@ -52,50 +42,42 @@ impl<T, A: AllocRef> RawVec<T, A> {
5242
/// Like `new`, but parameterized over the choice of allocator for
5343
/// the returned `RawVec`.
5444
pub const fn new_in(a: A) -> Self {
55-
let cap = if mem::size_of::<T>() == 0 { core::usize::MAX } else { 0 };
56-
57-
// `Unique::empty()` doubles as "unallocated" and "zero-sized allocation".
58-
RawVec { ptr: Unique::empty(), cap, a }
45+
// `cap: 0` means "unallocated". zero-sized allocations are handled by `AllocRef`
46+
Self { ptr: Unique::empty(), cap: 0, a }
5947
}
6048

6149
/// Like `with_capacity`, but parameterized over the choice of
6250
/// allocator for the returned `RawVec`.
6351
#[inline]
6452
pub fn with_capacity_in(capacity: usize, a: A) -> Self {
65-
RawVec::allocate_in(capacity, false, a)
53+
Self::allocate_in(capacity, AllocInit::Unspecified, a)
6654
}
6755

6856
/// Like `with_capacity_zeroed`, but parameterized over the choice
6957
/// of allocator for the returned `RawVec`.
7058
#[inline]
7159
pub fn with_capacity_zeroed_in(capacity: usize, a: A) -> Self {
72-
RawVec::allocate_in(capacity, true, a)
60+
Self::allocate_in(capacity, AllocInit::Zero, a)
7361
}
7462

75-
fn allocate_in(mut capacity: usize, zeroed: bool, mut a: A) -> Self {
76-
unsafe {
77-
let elem_size = mem::size_of::<T>();
63+
fn allocate_in(capacity: usize, init: AllocInit, mut a: A) -> Self {
64+
let layout = Layout::array::<T>(capacity).unwrap_or_else(|_| capacity_overflow());
65+
alloc_guard(layout.size()).unwrap_or_else(|_| capacity_overflow());
7866

79-
let alloc_size = capacity.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow());
80-
alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow());
67+
let allocation = match init {
68+
AllocInit::Unspecified => a.alloc(layout),
69+
AllocInit::Zero => a.alloc_zeroed(layout),
70+
};
71+
let (ptr, alloc_size) = allocation.unwrap_or_else(|_| handle_alloc_error(layout));
8172

82-
// Handles ZSTs and `capacity == 0` alike.
83-
let ptr = if alloc_size == 0 {
84-
NonNull::<T>::dangling()
73+
let ptr = ptr.cast().as_ptr();
74+
let elem_size = mem::size_of::<T>();
75+
unsafe {
76+
if elem_size == 0 {
77+
Self::from_raw_parts_in(ptr, capacity, a)
8578
} else {
86-
let align = mem::align_of::<T>();
87-
let layout = Layout::from_size_align(alloc_size, align).unwrap();
88-
let result = if zeroed { a.alloc_zeroed(layout) } else { a.alloc(layout) };
89-
match result {
90-
Ok((ptr, size)) => {
91-
capacity = size / elem_size;
92-
ptr.cast()
93-
}
94-
Err(_) => handle_alloc_error(layout),
95-
}
96-
};
97-
98-
RawVec { ptr: ptr.into(), cap: capacity, a }
79+
Self::from_raw_parts_in(ptr, alloc_size / elem_size, a)
80+
}
9981
}
10082
}
10183
}
@@ -140,13 +122,13 @@ impl<T> RawVec<T, Global> {
140122
/// Aborts on OOM.
141123
#[inline]
142124
pub fn with_capacity(capacity: usize) -> Self {
143-
RawVec::allocate_in(capacity, false, Global)
125+
Self::with_capacity_in(capacity, Global)
144126
}
145127

146128
/// Like `with_capacity`, but guarantees the buffer is zeroed.
147129
#[inline]
148130
pub fn with_capacity_zeroed(capacity: usize) -> Self {
149-
RawVec::allocate_in(capacity, true, Global)
131+
Self::with_capacity_zeroed_in(capacity, Global)
150132
}
151133
}
152134

@@ -158,8 +140,9 @@ impl<T, A: AllocRef> RawVec<T, A> {
158140
/// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`.
159141
/// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems).
160142
/// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed.
143+
#[inline]
161144
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self {
162-
RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a }
145+
Self { ptr: Unique::new_unchecked(ptr), cap: capacity, a }
163146
}
164147
}
165148

@@ -171,8 +154,9 @@ impl<T> RawVec<T, Global> {
171154
/// The `ptr` must be allocated (on the system heap), and with the given `capacity`.
172155
/// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems).
173156
/// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed.
157+
#[inline]
174158
pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self {
175-
RawVec { ptr: Unique::new_unchecked(ptr), cap: capacity, a: Global }
159+
Self::from_raw_parts_in(ptr, capacity, Global)
176160
}
177161

178162
/// Converts a `Box<[T]>` into a `RawVec<T>`.
@@ -198,7 +182,7 @@ impl<T, A: AllocRef> RawVec<T, A> {
198182
/// This will always be `usize::MAX` if `T` is zero-sized.
199183
#[inline(always)]
200184
pub fn capacity(&self) -> usize {
201-
if mem::size_of::<T>() == 0 { !0 } else { self.cap }
185+
if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
202186
}
203187

204188
/// Returns a shared reference to the allocator backing this `RawVec`.
@@ -236,8 +220,6 @@ impl<T, A: AllocRef> RawVec<T, A> {
236220
///
237221
/// # Panics
238222
///
239-
/// * Panics if `T` is zero-sized on the assumption that you managed to exhaust
240-
/// all `usize::MAX` slots in your imaginary buffer.
241223
/// * Panics on 32-bit platforms if the requested capacity exceeds
242224
/// `isize::MAX` bytes.
243225
///
@@ -276,50 +258,10 @@ impl<T, A: AllocRef> RawVec<T, A> {
276258
#[inline(never)]
277259
#[cold]
278260
pub fn double(&mut self) {
279-
unsafe {
280-
let elem_size = mem::size_of::<T>();
281-
282-
// Since we set the capacity to `usize::MAX` when `elem_size` is
283-
// 0, getting to here necessarily means the `RawVec` is overfull.
284-
assert!(elem_size != 0, "capacity overflow");
285-
286-
let (ptr, new_cap) = match self.current_layout() {
287-
Some(cur) => {
288-
// Since we guarantee that we never allocate more than
289-
// `isize::MAX` bytes, `elem_size * self.cap <= isize::MAX` as
290-
// a precondition, so this can't overflow. Additionally the
291-
// alignment will never be too large as to "not be
292-
// satisfiable", so `Layout::from_size_align` will always
293-
// return `Some`.
294-
//
295-
// TL;DR, we bypass runtime checks due to dynamic assertions
296-
// in this module, allowing us to use
297-
// `from_size_align_unchecked`.
298-
let new_cap = 2 * self.cap;
299-
let new_size = new_cap * elem_size;
300-
alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
301-
let ptr_res = self.a.realloc(NonNull::from(self.ptr).cast(), cur, new_size);
302-
match ptr_res {
303-
Ok((ptr, new_size)) => (ptr, new_size / elem_size),
304-
Err(_) => handle_alloc_error(Layout::from_size_align_unchecked(
305-
new_size,
306-
cur.align(),
307-
)),
308-
}
309-
}
310-
None => {
311-
// Skip to 4 because tiny `Vec`'s are dumb; but not if that
312-
// would cause overflow.
313-
let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 };
314-
let layout = Layout::array::<T>(new_cap).unwrap();
315-
match self.a.alloc(layout) {
316-
Ok((ptr, new_size)) => (ptr, new_size / elem_size),
317-
Err(_) => handle_alloc_error(layout),
318-
}
319-
}
320-
};
321-
self.ptr = ptr.cast().into();
322-
self.cap = new_cap;
261+
match self.grow(Double, AllocPlacement::Unspecified, AllocInit::Unspecified) {
262+
Err(CapacityOverflow) => capacity_overflow(),
263+
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
264+
Ok(()) => { /* yay */ }
323265
}
324266
}
325267

@@ -331,106 +273,12 @@ impl<T, A: AllocRef> RawVec<T, A> {
331273
///
332274
/// # Panics
333275
///
334-
/// * Panics if `T` is zero-sized on the assumption that you managed to exhaust
335-
/// all `usize::MAX` slots in your imaginary buffer.
336276
/// * Panics on 32-bit platforms if the requested capacity exceeds
337277
/// `isize::MAX` bytes.
338278
#[inline(never)]
339279
#[cold]
340280
pub fn double_in_place(&mut self) -> bool {
341-
unsafe {
342-
let elem_size = mem::size_of::<T>();
343-
let old_layout = match self.current_layout() {
344-
Some(layout) => layout,
345-
None => return false, // nothing to double
346-
};
347-
348-
// Since we set the capacity to `usize::MAX` when `elem_size` is
349-
// 0, getting to here necessarily means the `RawVec` is overfull.
350-
assert!(elem_size != 0, "capacity overflow");
351-
352-
// Since we guarantee that we never allocate more than `isize::MAX`
353-
// bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so
354-
// this can't overflow.
355-
//
356-
// Similarly to with `double` above, we can go straight to
357-
// `Layout::from_size_align_unchecked` as we know this won't
358-
// overflow and the alignment is sufficiently small.
359-
let new_cap = 2 * self.cap;
360-
let new_size = new_cap * elem_size;
361-
alloc_guard(new_size).unwrap_or_else(|_| capacity_overflow());
362-
match self.a.grow_in_place(NonNull::from(self.ptr).cast(), old_layout, new_size) {
363-
Ok(_) => {
364-
// We can't directly divide `size`.
365-
self.cap = new_cap;
366-
true
367-
}
368-
Err(_) => false,
369-
}
370-
}
371-
}
372-
373-
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
374-
pub fn try_reserve_exact(
375-
&mut self,
376-
used_capacity: usize,
377-
needed_extra_capacity: usize,
378-
) -> Result<(), TryReserveError> {
379-
self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Exact)
380-
}
381-
382-
/// Ensures that the buffer contains at least enough space to hold
383-
/// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
384-
/// will reallocate the minimum possible amount of memory necessary.
385-
/// Generally this will be exactly the amount of memory necessary,
386-
/// but in principle the allocator is free to give back more than
387-
/// we asked for.
388-
///
389-
/// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
390-
/// the requested space. This is not really unsafe, but the unsafe
391-
/// code *you* write that relies on the behavior of this function may break.
392-
///
393-
/// # Panics
394-
///
395-
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
396-
/// * Panics on 32-bit platforms if the requested capacity exceeds
397-
/// `isize::MAX` bytes.
398-
///
399-
/// # Aborts
400-
///
401-
/// Aborts on OOM.
402-
pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
403-
match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Exact) {
404-
Err(CapacityOverflow) => capacity_overflow(),
405-
Err(AllocError { .. }) => unreachable!(),
406-
Ok(()) => { /* yay */ }
407-
}
408-
}
409-
410-
/// Calculates the buffer's new size given that it'll hold `used_capacity +
411-
/// needed_extra_capacity` elements. This logic is used in amortized reserve methods.
412-
/// Returns `(new_capacity, new_alloc_size)`.
413-
fn amortized_new_size(
414-
&self,
415-
used_capacity: usize,
416-
needed_extra_capacity: usize,
417-
) -> Result<usize, TryReserveError> {
418-
// Nothing we can really do about these checks, sadly.
419-
let required_cap =
420-
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
421-
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
422-
let double_cap = self.cap * 2;
423-
// `double_cap` guarantees exponential growth.
424-
Ok(cmp::max(double_cap, required_cap))
425-
}
426-
427-
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
428-
pub fn try_reserve(
429-
&mut self,
430-
used_capacity: usize,
431-
needed_extra_capacity: usize,
432-
) -> Result<(), TryReserveError> {
433-
self.reserve_internal(used_capacity, needed_extra_capacity, Fallible, Amortized)
281+
self.grow(Double, AllocPlacement::InPlace, AllocInit::Unspecified).is_ok()
434282
}
435283

436284
/// Ensures that the buffer contains at least enough space to hold
@@ -486,12 +334,30 @@ impl<T, A: AllocRef> RawVec<T, A> {
486334
/// # }
487335
/// ```
488336
pub fn reserve(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
489-
match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Amortized) {
337+
match self.try_reserve(used_capacity, needed_extra_capacity) {
490338
Err(CapacityOverflow) => capacity_overflow(),
491-
Err(AllocError { .. }) => unreachable!(),
339+
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
492340
Ok(()) => { /* yay */ }
493341
}
494342
}
343+
344+
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
345+
pub fn try_reserve(
346+
&mut self,
347+
used_capacity: usize,
348+
needed_extra_capacity: usize,
349+
) -> Result<(), TryReserveError> {
350+
if self.needs_to_grow(used_capacity, needed_extra_capacity) {
351+
self.grow(
352+
Amortized { used_capacity, needed_extra_capacity },
353+
AllocPlacement::Unspecified,
354+
AllocInit::Unspecified,
355+
)
356+
} else {
357+
Ok(())
358+
}
359+
}
360+
495361
/// Attempts to ensure that the buffer contains at least enough space to hold
496362
/// `used_capacity + needed_extra_capacity` elements. If it doesn't already have
497363
/// enough capacity, will reallocate in place enough space plus comfortable slack
@@ -510,45 +376,62 @@ impl<T, A: AllocRef> RawVec<T, A> {
510376
/// * Panics on 32-bit platforms if the requested capacity exceeds
511377
/// `isize::MAX` bytes.
512378
pub fn reserve_in_place(&mut self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
513-
unsafe {
514-
// NOTE: we don't early branch on ZSTs here because we want this
515-
// to actually catch "asking for more than usize::MAX" in that case.
516-
// If we make it past the first branch then we are guaranteed to
517-
// panic.
518-
519-
// Don't actually need any more capacity. If the current `cap` is 0, we can't
520-
// reallocate in place.
521-
// Wrapping in case they give a bad `used_capacity`
522-
let old_layout = match self.current_layout() {
523-
Some(layout) => layout,
524-
None => return false,
525-
};
526-
if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
527-
return false;
528-
}
379+
// This is more readable as putting this in one line:
380+
// `!self.needs_to_grow(...) || self.grow(...).is_ok()`
381+
if self.needs_to_grow(used_capacity, needed_extra_capacity) {
382+
self.grow(
383+
Amortized { used_capacity, needed_extra_capacity },
384+
AllocPlacement::InPlace,
385+
AllocInit::Unspecified,
386+
)
387+
.is_ok()
388+
} else {
389+
true
390+
}
391+
}
529392

530-
let new_cap = self
531-
.amortized_new_size(used_capacity, needed_extra_capacity)
532-
.unwrap_or_else(|_| capacity_overflow());
533-
534-
// Here, `cap < used_capacity + needed_extra_capacity <= new_cap`
535-
// (regardless of whether `self.cap - used_capacity` wrapped).
536-
// Therefore, we can safely call `grow_in_place`.
537-
538-
let new_layout = Layout::new::<T>().repeat(new_cap).unwrap().0;
539-
// FIXME: may crash and burn on over-reserve
540-
alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow());
541-
match self.a.grow_in_place(
542-
NonNull::from(self.ptr).cast(),
543-
old_layout,
544-
new_layout.size(),
545-
) {
546-
Ok(_) => {
547-
self.cap = new_cap;
548-
true
549-
}
550-
Err(_) => false,
551-
}
393+
/// Ensures that the buffer contains at least enough space to hold
394+
/// `used_capacity + needed_extra_capacity` elements. If it doesn't already,
395+
/// will reallocate the minimum possible amount of memory necessary.
396+
/// Generally this will be exactly the amount of memory necessary,
397+
/// but in principle the allocator is free to give back more than
398+
/// we asked for.
399+
///
400+
/// If `used_capacity` exceeds `self.capacity()`, this may fail to actually allocate
401+
/// the requested space. This is not really unsafe, but the unsafe
402+
/// code *you* write that relies on the behavior of this function may break.
403+
///
404+
/// # Panics
405+
///
406+
/// * Panics if the requested capacity exceeds `usize::MAX` bytes.
407+
/// * Panics on 32-bit platforms if the requested capacity exceeds
408+
/// `isize::MAX` bytes.
409+
///
410+
/// # Aborts
411+
///
412+
/// Aborts on OOM.
413+
pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) {
414+
match self.try_reserve_exact(used_capacity, needed_extra_capacity) {
415+
Err(CapacityOverflow) => capacity_overflow(),
416+
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
417+
Ok(()) => { /* yay */ }
418+
}
419+
}
420+
421+
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
422+
pub fn try_reserve_exact(
423+
&mut self,
424+
used_capacity: usize,
425+
needed_extra_capacity: usize,
426+
) -> Result<(), TryReserveError> {
427+
if self.needs_to_grow(used_capacity, needed_extra_capacity) {
428+
self.grow(
429+
Exact { used_capacity, needed_extra_capacity },
430+
AllocPlacement::Unspecified,
431+
AllocInit::Unspecified,
432+
)
433+
} else {
434+
Ok(())
552435
}
553436
}
554437

@@ -563,126 +446,169 @@ impl<T, A: AllocRef> RawVec<T, A> {
563446
///
564447
/// Aborts on OOM.
565448
pub fn shrink_to_fit(&mut self, amount: usize) {
566-
let elem_size = mem::size_of::<T>();
567-
568-
// Set the `cap` because they might be about to promote to a `Box<[T]>`
569-
if elem_size == 0 {
570-
self.cap = amount;
571-
return;
572-
}
573-
574-
// This check is my waterloo; it's the only thing `Vec` wouldn't have to do.
575-
assert!(self.cap >= amount, "Tried to shrink to a larger capacity");
576-
577-
if amount == 0 {
578-
// We want to create a new zero-length vector within the
579-
// same allocator. We use `ptr::write` to avoid an
580-
// erroneous attempt to drop the contents, and we use
581-
// `ptr::read` to sidestep condition against destructuring
582-
// types that implement Drop.
583-
584-
unsafe {
585-
let a = ptr::read(&self.a as *const A);
586-
self.dealloc_buffer();
587-
ptr::write(self, RawVec::new_in(a));
588-
}
589-
} else if self.cap != amount {
590-
unsafe {
591-
// We know here that our `amount` is greater than zero. This
592-
// implies, via the assert above, that capacity is also greater
593-
// than zero, which means that we've got a current layout that
594-
// "fits"
595-
//
596-
// We also know that `self.cap` is greater than `amount`, and
597-
// consequently we don't need runtime checks for creating either
598-
// layout.
599-
let old_size = elem_size * self.cap;
600-
let new_size = elem_size * amount;
601-
let align = mem::align_of::<T>();
602-
let old_layout = Layout::from_size_align_unchecked(old_size, align);
603-
match self.a.realloc(NonNull::from(self.ptr).cast(), old_layout, new_size) {
604-
Ok((ptr, _)) => self.ptr = ptr.cast().into(),
605-
Err(_) => {
606-
handle_alloc_error(Layout::from_size_align_unchecked(new_size, align))
607-
}
608-
}
609-
}
610-
self.cap = amount;
449+
match self.shrink(amount, AllocPlacement::Unspecified) {
450+
Err(CapacityOverflow) => capacity_overflow(),
451+
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
452+
Ok(()) => { /* yay */ }
611453
}
612454
}
613455
}
614456

615-
enum Fallibility {
616-
Fallible,
617-
Infallible,
457+
#[derive(Copy, Clone)]
458+
enum Strategy {
459+
Double,
460+
Amortized { used_capacity: usize, needed_extra_capacity: usize },
461+
Exact { used_capacity: usize, needed_extra_capacity: usize },
618462
}
463+
use Strategy::*;
619464

620-
use Fallibility::*;
621-
622-
enum ReserveStrategy {
623-
Exact,
624-
Amortized,
465+
enum AllocInit {
466+
Unspecified,
467+
Zero,
625468
}
626469

627-
use ReserveStrategy::*;
470+
enum AllocPlacement {
471+
Unspecified,
472+
InPlace,
473+
}
628474

629475
impl<T, A: AllocRef> RawVec<T, A> {
630-
fn reserve_internal(
476+
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
477+
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
478+
fn needs_to_grow(&self, used_capacity: usize, needed_extra_capacity: usize) -> bool {
479+
needed_extra_capacity > self.capacity().wrapping_sub(used_capacity)
480+
}
481+
482+
/// Single method to handle all possibilities of growing the buffer.
483+
fn grow(
631484
&mut self,
632-
used_capacity: usize,
633-
needed_extra_capacity: usize,
634-
fallibility: Fallibility,
635-
strategy: ReserveStrategy,
485+
strategy: Strategy,
486+
placement: AllocPlacement,
487+
init: AllocInit,
636488
) -> Result<(), TryReserveError> {
637489
let elem_size = mem::size_of::<T>();
638-
639-
unsafe {
640-
// NOTE: we don't early branch on ZSTs here because we want this
641-
// to actually catch "asking for more than usize::MAX" in that case.
642-
// If we make it past the first branch then we are guaranteed to
643-
// panic.
644-
645-
// Don't actually need any more capacity.
646-
// Wrapping in case they gave a bad `used_capacity`.
647-
if self.capacity().wrapping_sub(used_capacity) >= needed_extra_capacity {
648-
return Ok(());
490+
let (new_layout, new_cap) = match strategy {
491+
Double => unsafe {
492+
if elem_size == 0 {
493+
// Since we return a capacity of `usize::MAX` when `elem_size` is
494+
// 0, getting to here necessarily means the `RawVec` is overfull.
495+
return Err(CapacityOverflow);
496+
}
497+
// Since we guarantee that we never allocate more than `isize::MAX` bytes,
498+
// `elem_size * self.cap <= isize::MAX` as a precondition, so this can't overflow.
499+
// Additionally the alignment will never be too large as to "not be satisfiable",
500+
// so `Layout::from_size_align` will always return `Some`.
501+
//
502+
// TL;DR, we bypass runtime checks due to dynamic assertions in this module,
503+
// allowing us to use `from_size_align_unchecked`.
504+
let cap = if self.cap == 0 {
505+
if elem_size > usize::MAX / 8 { 1 } else { 4 }
506+
} else {
507+
self.cap * 2
508+
};
509+
let layout =
510+
Layout::from_size_align_unchecked(cap * elem_size, mem::align_of::<T>());
511+
(layout, cap)
512+
},
513+
Amortized { used_capacity, needed_extra_capacity } => {
514+
// Nothing we can really do about these checks, sadly.
515+
let required_cap =
516+
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
517+
// Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`.
518+
let double_cap = self.cap * 2;
519+
// `double_cap` guarantees exponential growth.
520+
let cap = cmp::max(double_cap, required_cap);
521+
let layout = Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?;
522+
(layout, cap)
523+
}
524+
Exact { used_capacity, needed_extra_capacity } => {
525+
let cap =
526+
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?;
527+
let layout = Layout::array::<T>(cap).map_err(|_| CapacityOverflow)?;
528+
(layout, cap)
649529
}
530+
};
650531

651-
// Nothing we can really do about these checks, sadly.
652-
let new_cap = match strategy {
653-
Exact => {
654-
used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?
532+
let allocation = if let Some(old_layout) = self.current_layout() {
533+
debug_assert!(old_layout.align() == new_layout.align());
534+
debug_assert!(old_layout.size() <= new_layout.size());
535+
let ptr = self.ptr.cast().into();
536+
unsafe {
537+
match (placement, init) {
538+
(AllocPlacement::Unspecified, AllocInit::Unspecified) => {
539+
self.a.realloc(ptr, old_layout, new_layout.size())
540+
}
541+
(AllocPlacement::Unspecified, AllocInit::Zero) => {
542+
self.a.realloc_zeroed(ptr, old_layout, new_layout.size())
543+
}
544+
(AllocPlacement::InPlace, AllocInit::Unspecified) => self
545+
.a
546+
.grow_in_place(ptr, old_layout, new_layout.size())
547+
.map(|size| (ptr, size))
548+
.map_err(|_| AllocErr),
549+
(AllocPlacement::InPlace, AllocInit::Zero) => self
550+
.a
551+
.grow_in_place_zeroed(ptr, old_layout, new_layout.size())
552+
.map(|size| (ptr, size))
553+
.map_err(|_| AllocErr),
655554
}
656-
Amortized => self.amortized_new_size(used_capacity, needed_extra_capacity)?,
657-
};
658-
let new_layout = Layout::array::<T>(new_cap).map_err(|_| CapacityOverflow)?;
555+
}
556+
} else {
557+
match (placement, init) {
558+
(AllocPlacement::Unspecified, AllocInit::Unspecified) => self.a.alloc(new_layout),
559+
(AllocPlacement::Unspecified, AllocInit::Zero) => self.a.alloc_zeroed(new_layout),
560+
(AllocPlacement::InPlace, _) => Err(AllocErr),
561+
}
562+
};
563+
allocation
564+
.map(|(ptr, alloc_size)| {
565+
self.ptr = ptr.cast().into();
566+
if elem_size == 0 {
567+
self.cap = new_cap;
568+
} else {
569+
self.cap = alloc_size / elem_size;
570+
}
571+
})
572+
.map_err(|_| TryReserveError::AllocError { layout: new_layout, non_exhaustive: () })
573+
}
659574

660-
alloc_guard(new_layout.size())?;
575+
fn shrink(&mut self, amount: usize, placement: AllocPlacement) -> Result<(), TryReserveError> {
576+
assert!(amount <= self.cap, "Tried to shrink to a larger capacity");
661577

662-
let res = match self.current_layout() {
663-
Some(layout) => {
664-
debug_assert!(new_layout.align() == layout.align());
665-
self.a.realloc(NonNull::from(self.ptr).cast(), layout, new_layout.size())
578+
let elem_size = mem::size_of::<T>();
579+
let old_layout =
580+
if let Some(layout) = self.current_layout() { layout } else { return Ok(()) };
581+
let old_ptr = self.ptr.cast().into();
582+
let new_size = amount * elem_size;
583+
584+
let allocation = unsafe {
585+
match (amount, placement) {
586+
(0, AllocPlacement::Unspecified) => {
587+
self.dealloc_buffer();
588+
Ok((old_layout.dangling(), 0))
666589
}
667-
None => self.a.alloc(new_layout),
668-
};
669-
670-
let (ptr, new_cap) = match (res, fallibility) {
671-
(Err(AllocErr), Infallible) => handle_alloc_error(new_layout),
672-
(Err(AllocErr), Fallible) => {
673-
return Err(TryReserveError::AllocError {
674-
layout: new_layout,
675-
non_exhaustive: (),
676-
});
590+
(_, AllocPlacement::Unspecified) => self.a.realloc(old_ptr, old_layout, new_size),
591+
(_, AllocPlacement::InPlace) => self
592+
.a
593+
.shrink_in_place(old_ptr, old_layout, new_size)
594+
.map(|size| (old_ptr, size))
595+
.map_err(|_| AllocErr),
596+
}
597+
};
598+
599+
allocation
600+
.map(|(ptr, alloc_size)| {
601+
self.ptr = ptr.cast().into();
602+
if elem_size == 0 {
603+
self.cap = amount;
604+
} else {
605+
self.cap = alloc_size / elem_size;
677606
}
678-
(Ok((ptr, new_size)), _) => (ptr, new_size / elem_size),
679-
};
680-
681-
self.ptr = ptr.cast().into();
682-
self.cap = new_cap;
683-
684-
Ok(())
685-
}
607+
})
608+
.map_err(|_| TryReserveError::AllocError {
609+
layout: unsafe { Layout::from_size_align_unchecked(new_size, old_layout.align()) },
610+
non_exhaustive: (),
611+
})
686612
}
687613
}
688614

@@ -709,21 +635,16 @@ impl<T> RawVec<T, Global> {
709635
impl<T, A: AllocRef> RawVec<T, A> {
710636
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
711637
pub unsafe fn dealloc_buffer(&mut self) {
712-
let elem_size = mem::size_of::<T>();
713-
if elem_size != 0 {
714-
if let Some(layout) = self.current_layout() {
715-
self.a.dealloc(NonNull::from(self.ptr).cast(), layout);
716-
}
638+
if let Some(layout) = self.current_layout() {
639+
self.a.dealloc(self.ptr.cast().into(), layout);
717640
}
718641
}
719642
}
720643

721644
unsafe impl<#[may_dangle] T, A: AllocRef> Drop for RawVec<T, A> {
722645
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
723646
fn drop(&mut self) {
724-
unsafe {
725-
self.dealloc_buffer();
726-
}
647+
unsafe { self.dealloc_buffer() }
727648
}
728649
}
729650

‎src/liballoc/raw_vec/tests.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
use super::*;
2+
use core::ptr::NonNull;
23

34
#[test]
45
fn allocator_param() {

0 commit comments

Comments
 (0)
Please sign in to comment.