indexmap/map/
core.rs

1//! This is the core implementation that doesn't depend on the hasher at all.
2//!
3//! The methods of `IndexMapCore` don't use any Hash properties of K.
4//!
5//! It's cleaner to separate them out, then the compiler checks that we are not
6//! using Hash at all in these methods.
7//!
8//! However, we should probably not let this show in the public API or docs.
9
10mod entry;
11mod extract;
12
13pub mod raw_entry_v1;
14
15use alloc::vec::{self, Vec};
16use core::mem;
17use core::ops::RangeBounds;
18use hashbrown::hash_table;
19
20use crate::util::simplify_range;
21use crate::{Bucket, Equivalent, HashValue, TryReserveError};
22
23type Indices = hash_table::HashTable<usize>;
24type Entries<K, V> = Vec<Bucket<K, V>>;
25
26pub use entry::{Entry, IndexedEntry, OccupiedEntry, VacantEntry};
27pub(crate) use extract::ExtractCore;
28
29/// Core of the map that does not depend on S
30#[derive(Debug)]
31pub(crate) struct IndexMapCore<K, V> {
32    /// indices mapping from the entry hash to its index.
33    indices: Indices,
34    /// entries is a dense vec maintaining entry order.
35    entries: Entries<K, V>,
36}
37
38/// Mutable references to the parts of an `IndexMapCore`.
39///
40/// When using `HashTable::find_entry`, that takes hold of `&mut indices`, so we have to borrow our
41/// `&mut entries` separately, and there's no way to go back to a `&mut IndexMapCore`. So this type
42/// is used to implement methods on the split references, and `IndexMapCore` can also call those to
43/// avoid duplication.
44struct RefMut<'a, K, V> {
45    indices: &'a mut Indices,
46    entries: &'a mut Entries<K, V>,
47}
48
49#[inline(always)]
50fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
51    move |&i| entries[i].hash.get()
52}
53
54#[inline]
55fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
56    key: &'a Q,
57    entries: &'a [Bucket<K, V>],
58) -> impl Fn(&usize) -> bool + 'a {
59    move |&i| Q::equivalent(key, &entries[i].key)
60}
61
62#[inline]
63fn erase_index(table: &mut Indices, hash: HashValue, index: usize) {
64    if let Ok(entry) = table.find_entry(hash.get(), move |&i| i == index) {
65        entry.remove();
66    } else if cfg!(debug_assertions) {
67        panic!("index not found");
68    }
69}
70
71#[inline]
72fn update_index(table: &mut Indices, hash: HashValue, old: usize, new: usize) {
73    let index = table
74        .find_mut(hash.get(), move |&i| i == old)
75        .expect("index not found");
76    *index = new;
77}
78
79/// Inserts many entries into the indices table without reallocating,
80/// and without regard for duplication.
81///
82/// ***Panics*** if there is not sufficient capacity already.
83fn insert_bulk_no_grow<K, V>(indices: &mut Indices, entries: &[Bucket<K, V>]) {
84    assert!(indices.capacity() - indices.len() >= entries.len());
85    for entry in entries {
86        indices.insert_unique(entry.hash.get(), indices.len(), |_| unreachable!());
87    }
88}
89
90impl<K, V> Clone for IndexMapCore<K, V>
91where
92    K: Clone,
93    V: Clone,
94{
95    fn clone(&self) -> Self {
96        let mut new = Self::new();
97        new.clone_from(self);
98        new
99    }
100
101    fn clone_from(&mut self, other: &Self) {
102        self.indices.clone_from(&other.indices);
103        if self.entries.capacity() < other.entries.len() {
104            // If we must resize, match the indices capacity.
105            let additional = other.entries.len() - self.entries.len();
106            self.borrow_mut().reserve_entries(additional);
107        }
108        self.entries.clone_from(&other.entries);
109    }
110}
111
112impl<K, V> IndexMapCore<K, V> {
113    /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
114    const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
115
116    #[inline]
117    pub(crate) const fn new() -> Self {
118        IndexMapCore {
119            indices: Indices::new(),
120            entries: Vec::new(),
121        }
122    }
123
124    #[inline]
125    fn borrow_mut(&mut self) -> RefMut<'_, K, V> {
126        RefMut::new(&mut self.indices, &mut self.entries)
127    }
128
129    #[inline]
130    pub(crate) fn with_capacity(n: usize) -> Self {
131        IndexMapCore {
132            indices: Indices::with_capacity(n),
133            entries: Vec::with_capacity(n),
134        }
135    }
136
137    #[inline]
138    pub(crate) fn into_entries(self) -> Entries<K, V> {
139        self.entries
140    }
141
142    #[inline]
143    pub(crate) fn as_entries(&self) -> &[Bucket<K, V>] {
144        &self.entries
145    }
146
147    #[inline]
148    pub(crate) fn as_entries_mut(&mut self) -> &mut [Bucket<K, V>] {
149        &mut self.entries
150    }
151
152    pub(crate) fn with_entries<F>(&mut self, f: F)
153    where
154        F: FnOnce(&mut [Bucket<K, V>]),
155    {
156        f(&mut self.entries);
157        self.rebuild_hash_table();
158    }
159
160    #[inline]
161    pub(crate) fn len(&self) -> usize {
162        debug_assert_eq!(self.entries.len(), self.indices.len());
163        self.indices.len()
164    }
165
166    #[inline]
167    pub(crate) fn capacity(&self) -> usize {
168        Ord::min(self.indices.capacity(), self.entries.capacity())
169    }
170
171    pub(crate) fn clear(&mut self) {
172        self.indices.clear();
173        self.entries.clear();
174    }
175
176    pub(crate) fn truncate(&mut self, len: usize) {
177        if len < self.len() {
178            self.erase_indices(len, self.entries.len());
179            self.entries.truncate(len);
180        }
181    }
182
183    #[track_caller]
184    pub(crate) fn drain<R>(&mut self, range: R) -> vec::Drain<'_, Bucket<K, V>>
185    where
186        R: RangeBounds<usize>,
187    {
188        let range = simplify_range(range, self.entries.len());
189        self.erase_indices(range.start, range.end);
190        self.entries.drain(range)
191    }
192
193    #[cfg(feature = "rayon")]
194    pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
195    where
196        K: Send,
197        V: Send,
198        R: RangeBounds<usize>,
199    {
200        use rayon::iter::ParallelDrainRange;
201        let range = simplify_range(range, self.entries.len());
202        self.erase_indices(range.start, range.end);
203        self.entries.par_drain(range)
204    }
205
206    #[track_caller]
207    pub(crate) fn split_off(&mut self, at: usize) -> Self {
208        let len = self.entries.len();
209        assert!(
210            at <= len,
211            "index out of bounds: the len is {len} but the index is {at}. Expected index <= len"
212        );
213
214        self.erase_indices(at, self.entries.len());
215        let entries = self.entries.split_off(at);
216
217        let mut indices = Indices::with_capacity(entries.len());
218        insert_bulk_no_grow(&mut indices, &entries);
219        Self { indices, entries }
220    }
221
222    #[track_caller]
223    pub(crate) fn split_splice<R>(&mut self, range: R) -> (Self, vec::IntoIter<Bucket<K, V>>)
224    where
225        R: RangeBounds<usize>,
226    {
227        let range = simplify_range(range, self.len());
228        self.erase_indices(range.start, self.entries.len());
229        let entries = self.entries.split_off(range.end);
230        let drained = self.entries.split_off(range.start);
231
232        let mut indices = Indices::with_capacity(entries.len());
233        insert_bulk_no_grow(&mut indices, &entries);
234        (Self { indices, entries }, drained.into_iter())
235    }
236
237    /// Append from another map without checking whether items already exist.
238    pub(crate) fn append_unchecked(&mut self, other: &mut Self) {
239        self.reserve(other.len());
240        insert_bulk_no_grow(&mut self.indices, &other.entries);
241        self.entries.append(&mut other.entries);
242        other.indices.clear();
243    }
244
245    /// Reserve capacity for `additional` more key-value pairs.
246    pub(crate) fn reserve(&mut self, additional: usize) {
247        self.indices.reserve(additional, get_hash(&self.entries));
248        // Only grow entries if necessary, since we also round up capacity.
249        if additional > self.entries.capacity() - self.entries.len() {
250            self.borrow_mut().reserve_entries(additional);
251        }
252    }
253
254    /// Reserve capacity for `additional` more key-value pairs, without over-allocating.
255    pub(crate) fn reserve_exact(&mut self, additional: usize) {
256        self.indices.reserve(additional, get_hash(&self.entries));
257        self.entries.reserve_exact(additional);
258    }
259
260    /// Try to reserve capacity for `additional` more key-value pairs.
261    pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
262        self.indices
263            .try_reserve(additional, get_hash(&self.entries))
264            .map_err(TryReserveError::from_hashbrown)?;
265        // Only grow entries if necessary, since we also round up capacity.
266        if additional > self.entries.capacity() - self.entries.len() {
267            self.try_reserve_entries(additional)
268        } else {
269            Ok(())
270        }
271    }
272
273    /// Try to reserve entries capacity, rounded up to match the indices
274    fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
275        // Use a soft-limit on the maximum capacity, but if the caller explicitly
276        // requested more, do it and let them have the resulting error.
277        let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
278        let try_add = new_capacity - self.entries.len();
279        if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
280            return Ok(());
281        }
282        self.entries
283            .try_reserve_exact(additional)
284            .map_err(TryReserveError::from_alloc)
285    }
286
287    /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
288    pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
289        self.indices
290            .try_reserve(additional, get_hash(&self.entries))
291            .map_err(TryReserveError::from_hashbrown)?;
292        self.entries
293            .try_reserve_exact(additional)
294            .map_err(TryReserveError::from_alloc)
295    }
296
297    /// Shrink the capacity of the map with a lower bound
298    pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
299        self.indices
300            .shrink_to(min_capacity, get_hash(&self.entries));
301        self.entries.shrink_to(min_capacity);
302    }
303
304    /// Remove the last key-value pair
305    pub(crate) fn pop(&mut self) -> Option<(K, V)> {
306        if let Some(entry) = self.entries.pop() {
307            let last = self.entries.len();
308            erase_index(&mut self.indices, entry.hash, last);
309            Some((entry.key, entry.value))
310        } else {
311            None
312        }
313    }
314
315    /// Return the index in `entries` where an equivalent key can be found
316    pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
317    where
318        Q: ?Sized + Equivalent<K>,
319    {
320        let eq = equivalent(key, &self.entries);
321        self.indices.find(hash.get(), eq).copied()
322    }
323
324    /// Append a key-value pair to `entries`,
325    /// *without* checking whether it already exists.
326    fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
327        if self.entries.len() == self.entries.capacity() {
328            // Reserve our own capacity synced to the indices,
329            // rather than letting `Vec::push` just double it.
330            self.borrow_mut().reserve_entries(1);
331        }
332        self.entries.push(Bucket { hash, key, value });
333    }
334
335    pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
336    where
337        K: Eq,
338    {
339        let eq = equivalent(&key, &self.entries);
340        let hasher = get_hash(&self.entries);
341        match self.indices.entry(hash.get(), eq, hasher) {
342            hash_table::Entry::Occupied(entry) => {
343                let i = *entry.get();
344                (i, Some(mem::replace(&mut self.entries[i].value, value)))
345            }
346            hash_table::Entry::Vacant(entry) => {
347                let i = self.entries.len();
348                entry.insert(i);
349                self.push_entry(hash, key, value);
350                debug_assert_eq!(self.indices.len(), self.entries.len());
351                (i, None)
352            }
353        }
354    }
355
356    /// Same as `insert_full`, except it also replaces the key
357    pub(crate) fn replace_full(
358        &mut self,
359        hash: HashValue,
360        key: K,
361        value: V,
362    ) -> (usize, Option<(K, V)>)
363    where
364        K: Eq,
365    {
366        let eq = equivalent(&key, &self.entries);
367        let hasher = get_hash(&self.entries);
368        match self.indices.entry(hash.get(), eq, hasher) {
369            hash_table::Entry::Occupied(entry) => {
370                let i = *entry.get();
371                let entry = &mut self.entries[i];
372                let kv = (
373                    mem::replace(&mut entry.key, key),
374                    mem::replace(&mut entry.value, value),
375                );
376                (i, Some(kv))
377            }
378            hash_table::Entry::Vacant(entry) => {
379                let i = self.entries.len();
380                entry.insert(i);
381                self.push_entry(hash, key, value);
382                debug_assert_eq!(self.indices.len(), self.entries.len());
383                (i, None)
384            }
385        }
386    }
387
388    /// Remove an entry by shifting all entries that follow it
389    pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
390    where
391        Q: ?Sized + Equivalent<K>,
392    {
393        let eq = equivalent(key, &self.entries);
394        match self.indices.find_entry(hash.get(), eq) {
395            Ok(entry) => {
396                let (index, _) = entry.remove();
397                let (key, value) = self.borrow_mut().shift_remove_finish(index);
398                Some((index, key, value))
399            }
400            Err(_) => None,
401        }
402    }
403
404    /// Remove an entry by shifting all entries that follow it
405    #[inline]
406    pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
407        self.borrow_mut().shift_remove_index(index)
408    }
409
410    #[inline]
411    #[track_caller]
412    pub(super) fn move_index(&mut self, from: usize, to: usize) {
413        self.borrow_mut().move_index(from, to);
414    }
415
416    #[inline]
417    #[track_caller]
418    pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
419        self.borrow_mut().swap_indices(a, b);
420    }
421
422    /// Remove an entry by swapping it with the last
423    pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
424    where
425        Q: ?Sized + Equivalent<K>,
426    {
427        let eq = equivalent(key, &self.entries);
428        match self.indices.find_entry(hash.get(), eq) {
429            Ok(entry) => {
430                let (index, _) = entry.remove();
431                let (key, value) = self.borrow_mut().swap_remove_finish(index);
432                Some((index, key, value))
433            }
434            Err(_) => None,
435        }
436    }
437
438    /// Remove an entry by swapping it with the last
439    #[inline]
440    pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
441        self.borrow_mut().swap_remove_index(index)
442    }
443
444    /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
445    ///
446    /// All of these items should still be at their original location in `entries`.
447    /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
448    fn erase_indices(&mut self, start: usize, end: usize) {
449        let (init, shifted_entries) = self.entries.split_at(end);
450        let (start_entries, erased_entries) = init.split_at(start);
451
452        let erased = erased_entries.len();
453        let shifted = shifted_entries.len();
454        let half_capacity = self.indices.capacity() / 2;
455
456        // Use a heuristic between different strategies
457        if erased == 0 {
458            // Degenerate case, nothing to do
459        } else if start + shifted < half_capacity && start < erased {
460            // Reinsert everything, as there are few kept indices
461            self.indices.clear();
462
463            // Reinsert stable indices, then shifted indices
464            insert_bulk_no_grow(&mut self.indices, start_entries);
465            insert_bulk_no_grow(&mut self.indices, shifted_entries);
466        } else if erased + shifted < half_capacity {
467            // Find each affected index, as there are few to adjust
468
469            // Find erased indices
470            for (i, entry) in (start..).zip(erased_entries) {
471                erase_index(&mut self.indices, entry.hash, i);
472            }
473
474            // Find shifted indices
475            for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
476                update_index(&mut self.indices, entry.hash, old, new);
477            }
478        } else {
479            // Sweep the whole table for adjustments
480            let offset = end - start;
481            self.indices.retain(move |i| {
482                if *i >= end {
483                    *i -= offset;
484                    true
485                } else {
486                    *i < start
487                }
488            });
489        }
490
491        debug_assert_eq!(self.indices.len(), start + shifted);
492    }
493
494    pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
495    where
496        F: FnMut(&mut K, &mut V) -> bool,
497    {
498        self.entries
499            .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
500        if self.entries.len() < self.indices.len() {
501            self.rebuild_hash_table();
502        }
503    }
504
505    fn rebuild_hash_table(&mut self) {
506        self.indices.clear();
507        insert_bulk_no_grow(&mut self.indices, &self.entries);
508    }
509
510    pub(crate) fn reverse(&mut self) {
511        self.entries.reverse();
512
513        // No need to save hash indices, can easily calculate what they should
514        // be, given that this is an in-place reversal.
515        let len = self.entries.len();
516        for i in &mut self.indices {
517            *i = len - *i - 1;
518        }
519    }
520}
521
522/// Reserve entries capacity, rounded up to match the indices (via `try_capacity`).
523fn reserve_entries<K, V>(entries: &mut Entries<K, V>, additional: usize, try_capacity: usize) {
524    // Use a soft-limit on the maximum capacity, but if the caller explicitly
525    // requested more, do it and let them have the resulting panic.
526    let try_capacity = try_capacity.min(IndexMapCore::<K, V>::MAX_ENTRIES_CAPACITY);
527    let try_add = try_capacity - entries.len();
528    if try_add > additional && entries.try_reserve_exact(try_add).is_ok() {
529        return;
530    }
531    entries.reserve_exact(additional);
532}
533
534impl<'a, K, V> RefMut<'a, K, V> {
535    #[inline]
536    fn new(indices: &'a mut Indices, entries: &'a mut Entries<K, V>) -> Self {
537        Self { indices, entries }
538    }
539
540    /// Reserve entries capacity, rounded up to match the indices
541    #[inline]
542    fn reserve_entries(&mut self, additional: usize) {
543        reserve_entries(self.entries, additional, self.indices.capacity());
544    }
545
546    /// Insert a key-value pair in `entries`,
547    /// *without* checking whether it already exists.
548    fn insert_unique(self, hash: HashValue, key: K, value: V) -> OccupiedEntry<'a, K, V> {
549        let i = self.indices.len();
550        debug_assert_eq!(i, self.entries.len());
551        let entry = self
552            .indices
553            .insert_unique(hash.get(), i, get_hash(self.entries));
554        if self.entries.len() == self.entries.capacity() {
555            // We can't call `indices.capacity()` while this `entry` has borrowed it, so we'll have
556            // to amortize growth on our own. It's still an improvement over the basic `Vec::push`
557            // doubling though, since we also consider `MAX_ENTRIES_CAPACITY`.
558            reserve_entries(self.entries, 1, 2 * self.entries.capacity());
559        }
560        self.entries.push(Bucket { hash, key, value });
561        OccupiedEntry::new(self.entries, entry)
562    }
563
564    /// Insert a key-value pair in `entries` at a particular index,
565    /// *without* checking whether it already exists.
566    fn shift_insert_unique(&mut self, index: usize, hash: HashValue, key: K, value: V) {
567        let end = self.indices.len();
568        assert!(index <= end);
569        // Increment others first so we don't have duplicate indices.
570        self.increment_indices(index, end);
571        let entries = &*self.entries;
572        self.indices.insert_unique(hash.get(), index, move |&i| {
573            // Adjust for the incremented indices to find hashes.
574            debug_assert_ne!(i, index);
575            let i = if i < index { i } else { i - 1 };
576            entries[i].hash.get()
577        });
578        if self.entries.len() == self.entries.capacity() {
579            // Reserve our own capacity synced to the indices,
580            // rather than letting `Vec::insert` just double it.
581            self.reserve_entries(1);
582        }
583        self.entries.insert(index, Bucket { hash, key, value });
584    }
585
586    /// Remove an entry by shifting all entries that follow it
587    fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
588        match self.entries.get(index) {
589            Some(entry) => {
590                erase_index(self.indices, entry.hash, index);
591                Some(self.shift_remove_finish(index))
592            }
593            None => None,
594        }
595    }
596
597    /// Remove an entry by shifting all entries that follow it
598    ///
599    /// The index should already be removed from `self.indices`.
600    fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
601        // Correct indices that point to the entries that followed the removed entry.
602        self.decrement_indices(index + 1, self.entries.len());
603
604        // Use Vec::remove to actually remove the entry.
605        let entry = self.entries.remove(index);
606        (entry.key, entry.value)
607    }
608
609    /// Remove an entry by swapping it with the last
610    fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
611        match self.entries.get(index) {
612            Some(entry) => {
613                erase_index(self.indices, entry.hash, index);
614                Some(self.swap_remove_finish(index))
615            }
616            None => None,
617        }
618    }
619
620    /// Finish removing an entry by swapping it with the last
621    ///
622    /// The index should already be removed from `self.indices`.
623    fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
624        // use swap_remove, but then we need to update the index that points
625        // to the other entry that has to move
626        let entry = self.entries.swap_remove(index);
627
628        // correct index that points to the entry that had to swap places
629        if let Some(entry) = self.entries.get(index) {
630            // was not last element
631            // examine new element in `index` and find it in indices
632            let last = self.entries.len();
633            update_index(self.indices, entry.hash, last, index);
634        }
635
636        (entry.key, entry.value)
637    }
638
639    /// Decrement all indices in the range `start..end`.
640    ///
641    /// The index `start - 1` should not exist in `self.indices`.
642    /// All entries should still be in their original positions.
643    fn decrement_indices(&mut self, start: usize, end: usize) {
644        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
645        let shifted_entries = &self.entries[start..end];
646        if shifted_entries.len() > self.indices.capacity() / 2 {
647            // Shift all indices in range.
648            for i in &mut *self.indices {
649                if start <= *i && *i < end {
650                    *i -= 1;
651                }
652            }
653        } else {
654            // Find each entry in range to shift its index.
655            for (i, entry) in (start..end).zip(shifted_entries) {
656                update_index(self.indices, entry.hash, i, i - 1);
657            }
658        }
659    }
660
661    /// Increment all indices in the range `start..end`.
662    ///
663    /// The index `end` should not exist in `self.indices`.
664    /// All entries should still be in their original positions.
665    fn increment_indices(&mut self, start: usize, end: usize) {
666        // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
667        let shifted_entries = &self.entries[start..end];
668        if shifted_entries.len() > self.indices.capacity() / 2 {
669            // Shift all indices in range.
670            for i in &mut *self.indices {
671                if start <= *i && *i < end {
672                    *i += 1;
673                }
674            }
675        } else {
676            // Find each entry in range to shift its index, updated in reverse so
677            // we never have duplicated indices that might have a hash collision.
678            for (i, entry) in (start..end).zip(shifted_entries).rev() {
679                update_index(self.indices, entry.hash, i, i + 1);
680            }
681        }
682    }
683
684    #[track_caller]
685    fn move_index(&mut self, from: usize, to: usize) {
686        let from_hash = self.entries[from].hash;
687        let _ = self.entries[to]; // explicit bounds check
688        if from != to {
689            // Use a sentinel index so other indices don't collide.
690            update_index(self.indices, from_hash, from, usize::MAX);
691
692            // Update all other indices and rotate the entry positions.
693            if from < to {
694                self.decrement_indices(from + 1, to + 1);
695                self.entries[from..=to].rotate_left(1);
696            } else if to < from {
697                self.increment_indices(to, from);
698                self.entries[to..=from].rotate_right(1);
699            }
700
701            // Change the sentinel index to its final position.
702            update_index(self.indices, from_hash, usize::MAX, to);
703        }
704    }
705
706    #[track_caller]
707    fn swap_indices(&mut self, a: usize, b: usize) {
708        // If they're equal and in-bounds, there's nothing to do.
709        if a == b && a < self.entries.len() {
710            return;
711        }
712
713        // We'll get a "nice" bounds-check from indexing `entries`,
714        // and then we expect to find it in the table as well.
715        match self.indices.get_many_mut(
716            [self.entries[a].hash.get(), self.entries[b].hash.get()],
717            move |i, &x| if i == 0 { x == a } else { x == b },
718        ) {
719            [Some(ref_a), Some(ref_b)] => {
720                mem::swap(ref_a, ref_b);
721                self.entries.swap(a, b);
722            }
723            _ => panic!("indices not found"),
724        }
725    }
726}
727
728#[test]
729fn assert_send_sync() {
730    fn assert_send_sync<T: Send + Sync>() {}
731    assert_send_sync::<IndexMapCore<i32, i32>>();
732    assert_send_sync::<Entry<'_, i32, i32>>();
733    assert_send_sync::<IndexedEntry<'_, i32, i32>>();
734    assert_send_sync::<raw_entry_v1::RawEntryMut<'_, i32, i32, ()>>();
735}