Skip to content

Commit cfad758

Browse files
authored
Merge pull request #424 from cuviper/buckets
Use the bucket API from hashbrown v0.16.1
2 parents 0e68f8a + a96b9c7 commit cfad758

File tree

7 files changed

+241
-286
lines changed

7 files changed

+241
-286
lines changed

Cargo.toml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
[package]
22
name = "indexmap"
33
edition = "2021"
4-
version = "2.12.0"
4+
version = "2.12.1"
55
documentation = "https://docs.rs/indexmap/"
66
repository = "https://github.com/indexmap-rs/indexmap"
77
license = "Apache-2.0 OR MIT"
@@ -15,7 +15,7 @@ bench = false
1515

1616
[dependencies]
1717
equivalent = { version = "1.0", default-features = false }
18-
hashbrown = { version = "0.16", default-features = false }
18+
hashbrown = { version = "0.16.1", default-features = false }
1919

2020
arbitrary = { version = "1.0", optional = true, default-features = false }
2121
quickcheck = { version = "1.0", optional = true, default-features = false }

RELEASES.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
# Releases
22

3+
## 2.12.1 (2025-11-20)
4+
5+
- Simplified a lot of internals using `hashbrown`'s new bucket API.
6+
37
## 2.12.0 (2025-10-17)
48

59
- **MSRV**: Rust 1.82.0 or later is now required.

src/map.rs

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -736,7 +736,7 @@ where
736736
/// Computes in **O(1)** time (amortized average).
737737
pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
738738
let hash = self.hash(&key);
739-
self.core.entry(hash, key)
739+
Entry::new(&mut self.core, hash, key)
740740
}
741741

742742
/// Creates a splicing iterator that replaces the specified range in the map
@@ -1452,10 +1452,7 @@ impl<K, V, S> IndexMap<K, V, S> {
14521452
///
14531453
/// Computes in **O(1)** time.
14541454
pub fn get_index_entry(&mut self, index: usize) -> Option<IndexedEntry<'_, K, V>> {
1455-
if index >= self.len() {
1456-
return None;
1457-
}
1458-
Some(IndexedEntry::new(&mut self.core, index))
1455+
IndexedEntry::new(&mut self.core, index)
14591456
}
14601457

14611458
/// Get an array of `N` key-value pairs by `N` indices

src/map/core.rs

Lines changed: 58 additions & 135 deletions
Original file line numberDiff line numberDiff line change
@@ -35,17 +35,6 @@ pub(crate) struct IndexMapCore<K, V> {
3535
entries: Entries<K, V>,
3636
}
3737

38-
/// Mutable references to the parts of an `IndexMapCore`.
39-
///
40-
/// When using `HashTable::find_entry`, that takes hold of `&mut indices`, so we have to borrow our
41-
/// `&mut entries` separately, and there's no way to go back to a `&mut IndexMapCore`. So this type
42-
/// is used to implement methods on the split references, and `IndexMapCore` can also call those to
43-
/// avoid duplication.
44-
struct RefMut<'a, K, V> {
45-
indices: &'a mut Indices,
46-
entries: &'a mut Entries<K, V>,
47-
}
48-
4938
#[inline(always)]
5039
fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + use<'_, K, V> {
5140
move |&i| entries[i].hash.get()
@@ -103,7 +92,7 @@ where
10392
if self.entries.capacity() < other.entries.len() {
10493
// If we must resize, match the indices capacity.
10594
let additional = other.entries.len() - self.entries.len();
106-
self.borrow_mut().reserve_entries(additional);
95+
self.reserve_entries(additional);
10796
}
10897
self.entries.clone_from(&other.entries);
10998
}
@@ -121,11 +110,6 @@ impl<K, V> IndexMapCore<K, V> {
121110
}
122111
}
123112

124-
#[inline]
125-
fn borrow_mut(&mut self) -> RefMut<'_, K, V> {
126-
RefMut::new(&mut self.indices, &mut self.entries)
127-
}
128-
129113
#[inline]
130114
pub(crate) fn with_capacity(n: usize) -> Self {
131115
IndexMapCore {
@@ -247,7 +231,7 @@ impl<K, V> IndexMapCore<K, V> {
247231
self.indices.reserve(additional, get_hash(&self.entries));
248232
// Only grow entries if necessary, since we also round up capacity.
249233
if additional > self.entries.capacity() - self.entries.len() {
250-
self.borrow_mut().reserve_entries(additional);
234+
self.reserve_entries(additional);
251235
}
252236
}
253237

@@ -327,7 +311,7 @@ impl<K, V> IndexMapCore<K, V> {
327311
if self.entries.len() == self.entries.capacity() {
328312
// Reserve our own capacity synced to the indices,
329313
// rather than letting `Vec::push` just double it.
330-
self.borrow_mut().reserve_entries(1);
314+
self.reserve_entries(1);
331315
}
332316
self.entries.push(Bucket { hash, key, value });
333317
}
@@ -385,45 +369,15 @@ impl<K, V> IndexMapCore<K, V> {
385369
}
386370
}
387371

388-
/// Replaces the key at the given index,
389-
/// *without* checking whether it already exists.
390-
#[track_caller]
391-
pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K {
392-
self.borrow_mut().replace_index_unique(index, hash, key).0
393-
}
394-
395372
/// Remove an entry by shifting all entries that follow it
396373
pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
397374
where
398375
Q: ?Sized + Equivalent<K>,
399376
{
400377
let eq = equivalent(key, &self.entries);
401-
match self.indices.find_entry(hash.get(), eq) {
402-
Ok(entry) => {
403-
let (index, _) = entry.remove();
404-
let (key, value) = self.borrow_mut().shift_remove_finish(index);
405-
Some((index, key, value))
406-
}
407-
Err(_) => None,
408-
}
409-
}
410-
411-
/// Remove an entry by shifting all entries that follow it
412-
#[inline]
413-
pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
414-
self.borrow_mut().shift_remove_index(index)
415-
}
416-
417-
#[inline]
418-
#[track_caller]
419-
pub(super) fn move_index(&mut self, from: usize, to: usize) {
420-
self.borrow_mut().move_index(from, to);
421-
}
422-
423-
#[inline]
424-
#[track_caller]
425-
pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
426-
self.borrow_mut().swap_indices(a, b);
378+
let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
379+
let (key, value) = self.shift_remove_finish(index);
380+
Some((index, key, value))
427381
}
428382

429383
/// Remove an entry by swapping it with the last
@@ -432,20 +386,9 @@ impl<K, V> IndexMapCore<K, V> {
432386
Q: ?Sized + Equivalent<K>,
433387
{
434388
let eq = equivalent(key, &self.entries);
435-
match self.indices.find_entry(hash.get(), eq) {
436-
Ok(entry) => {
437-
let (index, _) = entry.remove();
438-
let (key, value) = self.borrow_mut().swap_remove_finish(index);
439-
Some((index, key, value))
440-
}
441-
Err(_) => None,
442-
}
443-
}
444-
445-
/// Remove an entry by swapping it with the last
446-
#[inline]
447-
pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
448-
self.borrow_mut().swap_remove_index(index)
389+
let (index, _) = self.indices.find_entry(hash.get(), eq).ok()?.remove();
390+
let (key, value) = self.swap_remove_finish(index);
391+
Some((index, key, value))
449392
}
450393

451394
/// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
@@ -524,70 +467,44 @@ impl<K, V> IndexMapCore<K, V> {
524467
*i = len - *i - 1;
525468
}
526469
}
527-
}
528-
529-
/// Reserve entries capacity, rounded up to match the indices (via `try_capacity`).
530-
fn reserve_entries<K, V>(entries: &mut Entries<K, V>, additional: usize, try_capacity: usize) {
531-
// Use a soft-limit on the maximum capacity, but if the caller explicitly
532-
// requested more, do it and let them have the resulting panic.
533-
let try_capacity = try_capacity.min(IndexMapCore::<K, V>::MAX_ENTRIES_CAPACITY);
534-
let try_add = try_capacity - entries.len();
535-
if try_add > additional && entries.try_reserve_exact(try_add).is_ok() {
536-
return;
537-
}
538-
entries.reserve_exact(additional);
539-
}
540-
541-
impl<'a, K, V> RefMut<'a, K, V> {
542-
#[inline]
543-
fn new(indices: &'a mut Indices, entries: &'a mut Entries<K, V>) -> Self {
544-
Self { indices, entries }
545-
}
546470

547471
/// Reserve entries capacity, rounded up to match the indices
548472
#[inline]
549473
fn reserve_entries(&mut self, additional: usize) {
550-
reserve_entries(self.entries, additional, self.indices.capacity());
474+
// Use a soft-limit on the maximum capacity, but if the caller explicitly
475+
// requested more, do it and let them have the resulting panic.
476+
let try_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
477+
let try_add = try_capacity - self.entries.len();
478+
if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
479+
return;
480+
}
481+
self.entries.reserve_exact(additional);
551482
}
552483

553484
/// Insert a key-value pair in `entries`,
554485
/// *without* checking whether it already exists.
555-
fn insert_unique(self, hash: HashValue, key: K, value: V) -> OccupiedEntry<'a, K, V> {
486+
pub(super) fn insert_unique(&mut self, hash: HashValue, key: K, value: V) -> &mut Bucket<K, V> {
556487
let i = self.indices.len();
557488
debug_assert_eq!(i, self.entries.len());
558-
let entry = self
559-
.indices
560-
.insert_unique(hash.get(), i, get_hash(self.entries));
561-
if self.entries.len() == self.entries.capacity() {
562-
// We can't call `indices.capacity()` while this `entry` has borrowed it, so we'll have
563-
// to amortize growth on our own. It's still an improvement over the basic `Vec::push`
564-
// doubling though, since we also consider `MAX_ENTRIES_CAPACITY`.
565-
reserve_entries(self.entries, 1, 2 * self.entries.capacity());
566-
}
567-
self.entries.push(Bucket { hash, key, value });
568-
OccupiedEntry::new(self.entries, entry)
489+
self.indices
490+
.insert_unique(hash.get(), i, get_hash(&self.entries));
491+
self.push_entry(hash, key, value);
492+
&mut self.entries[i]
569493
}
570494

571495
/// Replaces the key at the given index,
572496
/// *without* checking whether it already exists.
573497
#[track_caller]
574-
fn replace_index_unique(
575-
self,
576-
index: usize,
577-
hash: HashValue,
578-
key: K,
579-
) -> (K, OccupiedEntry<'a, K, V>) {
498+
pub(crate) fn replace_index_unique(&mut self, index: usize, hash: HashValue, key: K) -> K {
580499
// NB: This removal and insertion isn't "no grow" (with unreachable hasher)
581500
// because hashbrown's tombstones might force a resize anyway.
582-
erase_index(self.indices, self.entries[index].hash, index);
583-
let table_entry = self
584-
.indices
501+
erase_index(&mut self.indices, self.entries[index].hash, index);
502+
self.indices
585503
.insert_unique(hash.get(), index, get_hash(&self.entries));
586504

587505
let entry = &mut self.entries[index];
588506
entry.hash = hash;
589-
let old_key = mem::replace(&mut entry.key, key);
590-
(old_key, OccupiedEntry::new(self.entries, table_entry))
507+
mem::replace(&mut entry.key, key)
591508
}
592509

593510
/// Insert a key-value pair in `entries` at a particular index,
@@ -613,10 +530,10 @@ impl<'a, K, V> RefMut<'a, K, V> {
613530
}
614531

615532
/// Remove an entry by shifting all entries that follow it
616-
fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
533+
pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
617534
match self.entries.get(index) {
618535
Some(entry) => {
619-
erase_index(self.indices, entry.hash, index);
536+
erase_index(&mut self.indices, entry.hash, index);
620537
Some(self.shift_remove_finish(index))
621538
}
622539
None => None,
@@ -636,10 +553,10 @@ impl<'a, K, V> RefMut<'a, K, V> {
636553
}
637554

638555
/// Remove an entry by swapping it with the last
639-
fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
556+
pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
640557
match self.entries.get(index) {
641558
Some(entry) => {
642-
erase_index(self.indices, entry.hash, index);
559+
erase_index(&mut self.indices, entry.hash, index);
643560
Some(self.swap_remove_finish(index))
644561
}
645562
None => None,
@@ -659,7 +576,7 @@ impl<'a, K, V> RefMut<'a, K, V> {
659576
// was not last element
660577
// examine new element in `index` and find it in indices
661578
let last = self.entries.len();
662-
update_index(self.indices, entry.hash, last, index);
579+
update_index(&mut self.indices, entry.hash, last, index);
663580
}
664581

665582
(entry.key, entry.value)
@@ -674,15 +591,15 @@ impl<'a, K, V> RefMut<'a, K, V> {
674591
let shifted_entries = &self.entries[start..end];
675592
if shifted_entries.len() > self.indices.capacity() / 2 {
676593
// Shift all indices in range.
677-
for i in &mut *self.indices {
594+
for i in &mut self.indices {
678595
if start <= *i && *i < end {
679596
*i -= 1;
680597
}
681598
}
682599
} else {
683600
// Find each entry in range to shift its index.
684601
for (i, entry) in (start..end).zip(shifted_entries) {
685-
update_index(self.indices, entry.hash, i, i - 1);
602+
update_index(&mut self.indices, entry.hash, i, i - 1);
686603
}
687604
}
688605
}
@@ -696,7 +613,7 @@ impl<'a, K, V> RefMut<'a, K, V> {
696613
let shifted_entries = &self.entries[start..end];
697614
if shifted_entries.len() > self.indices.capacity() / 2 {
698615
// Shift all indices in range.
699-
for i in &mut *self.indices {
616+
for i in &mut self.indices {
700617
if start <= *i && *i < end {
701618
*i += 1;
702619
}
@@ -705,43 +622,49 @@ impl<'a, K, V> RefMut<'a, K, V> {
705622
// Find each entry in range to shift its index, updated in reverse so
706623
// we never have duplicated indices that might have a hash collision.
707624
for (i, entry) in (start..end).zip(shifted_entries).rev() {
708-
update_index(self.indices, entry.hash, i, i + 1);
625+
update_index(&mut self.indices, entry.hash, i, i + 1);
709626
}
710627
}
711628
}
712629

713630
#[track_caller]
714-
fn move_index(&mut self, from: usize, to: usize) {
631+
pub(super) fn move_index(&mut self, from: usize, to: usize) {
715632
let from_hash = self.entries[from].hash;
716-
let _ = self.entries[to]; // explicit bounds check
717633
if from != to {
718-
// Use a sentinel index so other indices don't collide.
719-
update_index(self.indices, from_hash, from, usize::MAX);
720-
721-
// Update all other indices and rotate the entry positions.
722-
if from < to {
723-
self.decrement_indices(from + 1, to + 1);
724-
self.entries[from..=to].rotate_left(1);
725-
} else if to < from {
726-
self.increment_indices(to, from);
727-
self.entries[to..=from].rotate_right(1);
728-
}
634+
let _ = self.entries[to]; // explicit bounds check
635+
636+
// Find the bucket index first so we won't lose it among other updated indices.
637+
let bucket = self
638+
.indices
639+
.find_bucket_index(from_hash.get(), move |&i| i == from)
640+
.expect("index not found");
729641

730-
// Change the sentinel index to its final position.
731-
update_index(self.indices, from_hash, usize::MAX, to);
642+
self.move_index_inner(from, to);
643+
*self.indices.get_bucket_mut(bucket).unwrap() = to;
644+
}
645+
}
646+
647+
fn move_index_inner(&mut self, from: usize, to: usize) {
648+
// Update all other indices and rotate the entry positions.
649+
if from < to {
650+
self.decrement_indices(from + 1, to + 1);
651+
self.entries[from..=to].rotate_left(1);
652+
} else if to < from {
653+
self.increment_indices(to, from);
654+
self.entries[to..=from].rotate_right(1);
732655
}
733656
}
734657

735658
#[track_caller]
736-
fn swap_indices(&mut self, a: usize, b: usize) {
659+
pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
737660
// If they're equal and in-bounds, there's nothing to do.
738661
if a == b && a < self.entries.len() {
739662
return;
740663
}
741664

742665
// We'll get a "nice" bounds-check from indexing `entries`,
743666
// and then we expect to find it in the table as well.
744-
match self.indices.get_many_mut(
667+
match self.indices.get_disjoint_mut(
745668
[self.entries[a].hash.get(), self.entries[b].hash.get()],
746669
move |i, &x| if i == 0 { x == a } else { x == b },
747670
) {

0 commit comments

Comments
 (0)