use std::{ ptr::null_mut, slice::from_raw_parts_mut, sync::{ Mutex, atomic::{AtomicPtr, Ordering}, }, }; const BUCKETS: usize = (usize::BITS + 1) as usize; /// An `Option`-like type that guarantees that a fully zeroed value is a valid /// `None` variant. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[repr(u8)] enum COption { // TODO(alexkirsz) We need a way to guarantee that a fully zeroed value is a // valid `None` variant. This is theoretically possible when the wrapped // type has no valid value that can be represented by all zeros, but there // is no way to enforce this at the type level. For now, we just use a custom // option type with explicit discriminant for the `None` variant. // The issue with this implementation is that it disables niche optimization. None = 0, Some(T), } impl Default for COption { fn default() -> Self { Self::None } } impl COption { /// Returns a slice of the given size filled with the `None` variant. fn new_none_slice(size: usize) -> Box<[Self]> { let slice = Box::<[COption]>::new_zeroed_slice(size); // Safety: // We know that a zeroed COption is a valid COption::None value. unsafe { slice.assume_init() } } /// Returns a reference to the contained value, or `None` if it is `None`. fn as_option_ref(&self) -> Option<&T> { match self { COption::None => None, COption::Some(t) => Some(t), } } } pub struct NoMoveVec { buckets: [(AtomicPtr>, Mutex<()>); BUCKETS], } fn get_bucket_index(idx: usize) -> u32 { (usize::BITS - idx.leading_zeros()).saturating_sub(INITIAL_CAPACITY_BITS) } fn get_bucket_size(bucket_index: u32) -> usize { if bucket_index != 0 { 1 << (bucket_index + INITIAL_CAPACITY_BITS - 1) } else { 1 << INITIAL_CAPACITY_BITS } } fn get_index_in_bucket(idx: usize, bucket_index: u32) -> usize { if bucket_index != 0 { idx ^ (1 << (bucket_index + INITIAL_CAPACITY_BITS - 1)) } else { idx } } /// Allocates a new bucket of `COption`s, all initialized to `None`. fn allocate_bucket(bucket_index: u32) -> *mut COption { let size = get_bucket_size::(bucket_index); let slice = COption::::new_none_slice(size); Box::into_raw(slice) as *mut COption } impl Default for NoMoveVec { fn default() -> Self { Self::new() } } impl NoMoveVec { pub fn new() -> Self { let mut buckets = [null_mut(); BUCKETS]; buckets[0] = allocate_bucket::(0); let buckets = buckets.map(|p| (AtomicPtr::new(p), Mutex::new(()))); NoMoveVec { buckets } } pub fn get(&self, idx: usize) -> Option<&T> { let bucket_idx = get_bucket_index::(idx); let bucket_ptr = unsafe { self.buckets.get_unchecked(bucket_idx as usize) } .0 .load(Ordering::Acquire); if bucket_ptr.is_null() { return None; } let index = get_index_in_bucket::(idx, bucket_idx); unsafe { &*bucket_ptr.add(index) }.as_option_ref() } /// # Safety /// There must not be a concurrent operation to this idx pub unsafe fn insert(&self, idx: usize, value: T) -> &T { let bucket_idx = get_bucket_index::(idx); let bucket = unsafe { self.buckets.get_unchecked(bucket_idx as usize) }; // SAFETY: This is safe to be relaxed as the bucket will never become null // again. We perform a acquire load when it's null. let mut bucket_ptr = bucket.0.load(Ordering::Relaxed); if bucket_ptr.is_null() { bucket_ptr = bucket.0.load(Ordering::Acquire); if bucket_ptr.is_null() { let lock = bucket.1.lock(); let guarded_bucket_ptr = bucket.0.load(Ordering::Acquire); if guarded_bucket_ptr.is_null() { let new_bucket = allocate_bucket::(bucket_idx); bucket_ptr = match bucket.0.compare_exchange( null_mut(), new_bucket, Ordering::AcqRel, Ordering::Relaxed, ) { Ok(_) => new_bucket, Err(current_bucket) => { drop(unsafe { Box::from_raw(new_bucket) }); current_bucket } }; drop(lock); } else { bucket_ptr = guarded_bucket_ptr; } } } let index = get_index_in_bucket::(idx, bucket_idx); let item = unsafe { &mut *bucket_ptr.add(index) }; *item = COption::Some(value); // To sync with any acquire load of the bucket ptr bucket.0.store(bucket_ptr, Ordering::Release); item.as_option_ref().unwrap() } } impl Drop for NoMoveVec { fn drop(&mut self) { for (bucket_index, (bucket, _)) in self.buckets.iter_mut().enumerate() { if bucket_index < (usize::BITS + 1 - INITIAL_CAPACITY_BITS) as usize { let bucket_size = get_bucket_size::(bucket_index as u32); let bucket_ptr = *bucket.get_mut(); if !bucket_ptr.is_null() { drop(unsafe { Box::from_raw(from_raw_parts_mut(bucket_ptr, bucket_size)) }); } } } } } #[cfg(test)] mod tests { use super::NoMoveVec; #[test] fn basic_operations() { let v = NoMoveVec::<(usize, usize)>::new(); assert_eq!(v.get(0), None); assert_eq!(v.get(1), None); assert_eq!(v.get(8), None); assert_eq!(v.get(9), None); assert_eq!(v.get(15), None); assert_eq!(v.get(16), None); assert_eq!(v.get(100), None); assert_eq!(v.get(1000), None); for i in 0..1000 { unsafe { v.insert(i, (i, i)); } assert_eq!(v.get(i), Some(&(i, i))); } for i in 0..1000 { assert_eq!(v.get(i), Some(&(i, i))); } assert_eq!(v.get(1001), None); unsafe { v.insert(1000000, (0, 0)); } assert_eq!(v.get(1000000), Some(&(0, 0))); assert_eq!(v.get(10000), None); } }