indexmap-1.2.0/.gitignore010064400017510001751000000000221324036072100135220ustar0000000000000000target Cargo.lock indexmap-1.2.0/.travis.yml010064400017510001751000000015101352703340100136460ustar0000000000000000language: rust sudo: false matrix: include: # MSRV is lower for non-dev builds - rust: 1.18.0 env: - SKIP_TEST=1 - rust: 1.30.0 - rust: stable env: - FEATURES='serde-1' - rust: stable env: - FEATURES='rayon' - rust: beta - rust: nightly env: - TEST_BENCH=1 - rust: nightly env: - FEATURES='test_low_transition_point' branches: only: - master script: - | cargo build --verbose --features "$FEATURES" && if [ -z "$SKIP_TEST" ]; then cargo test --verbose --features "$FEATURES" && cargo test --release --verbose --features "$FEATURES" fi && if [ -n "$TEST_BENCH" ]; then cargo test -v --benches --no-run --features "$FEATURES" fi && cargo doc --verbose --features "$FEATURES" indexmap-1.2.0/Cargo.toml.orig010064400017510001751000000025651353516752400144540ustar0000000000000000[package] name = "indexmap" version = "1.2.0" authors = [ "bluss", "Josh Stone " ] documentation = "https://docs.rs/indexmap/" repository = "https://github.com/bluss/indexmap" license = "Apache-2.0/MIT" description = """ A hash table with consistent order and fast iteration. The indexmap is a hash table where the iteration order of the key-value pairs is independent of the hash values of the keys. It has the usual hash table functionality, it preserves insertion order except after removals, and it allows lookup of its elements by either hash table key or numerical index. A corresponding hash set type is also provided. This crate was initially published under the name ordermap, but it was renamed to indexmap. """ keywords = ["hashmap"] categories = ["data-structures"] [lib] bench = false [dependencies] serde = { version = "1.0", optional = true } rayon = { version = "1.0", optional = true } [dev-dependencies] itertools = "0.8" rand = "0.6" quickcheck = { version = "0.8", default-features = false } fnv = "1.0" lazy_static = "1.3" serde_test = "1.0.99" [features] # Serialization with serde 1.0 serde-1 = ["serde"] # for testing only, of course test_low_transition_point = [] test_debug = [] [profile.bench] debug = true [package.metadata.release] no-dev-version = true tag-name = "{{version}}" [package.metadata.docs.rs] features = ["serde-1", "rayon"] indexmap-1.2.0/Cargo.toml0000644000000037110000000000000107000ustar00# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies # # If you believe there's an error in this file please file an # issue against the rust-lang/cargo repository. If you're # editing this file be aware that the upstream Cargo.toml # will likely look very different (and much more reasonable) [package] name = "indexmap" version = "1.2.0" authors = ["bluss", "Josh Stone "] description = "A hash table with consistent order and fast iteration.\n\nThe indexmap is a hash table where the iteration order of the key-value\npairs is independent of the hash values of the keys. It has the usual\nhash table functionality, it preserves insertion order except after\nremovals, and it allows lookup of its elements by either hash table key\nor numerical index. A corresponding hash set type is also provided.\n\nThis crate was initially published under the name ordermap, but it was renamed to\nindexmap.\n" documentation = "https://docs.rs/indexmap/" keywords = ["hashmap"] categories = ["data-structures"] license = "Apache-2.0/MIT" repository = "https://github.com/bluss/indexmap" [package.metadata.docs.rs] features = ["serde-1", "rayon"] [package.metadata.release] no-dev-version = true tag-name = "{{version}}" [profile.bench] debug = true [lib] bench = false [dependencies.rayon] version = "1.0" optional = true [dependencies.serde] version = "1.0" optional = true [dev-dependencies.fnv] version = "1.0" [dev-dependencies.itertools] version = "0.8" [dev-dependencies.lazy_static] version = "1.3" [dev-dependencies.quickcheck] version = "0.8" default-features = false [dev-dependencies.rand] version = "0.6" [dev-dependencies.serde_test] version = "1.0.99" [features] serde-1 = ["serde"] test_debug = [] test_low_transition_point = [] indexmap-1.2.0/LICENSE-APACHE010064400017510001751000000251371324036072100134740ustar0000000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. indexmap-1.2.0/LICENSE-MIT010064400017510001751000000020311324036072100131700ustar0000000000000000Copyright (c) 2016--2017 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. indexmap-1.2.0/README.rst010064400017510001751000000256601353516767400132630ustar0000000000000000indexmap ======== |build_status|_ |crates|_ |docs|_ |rustc|_ .. |crates| image:: https://img.shields.io/crates/v/indexmap.svg .. _crates: https://crates.io/crates/indexmap .. |build_status| image:: https://travis-ci.org/bluss/indexmap.svg .. _build_status: https://travis-ci.org/bluss/indexmap .. |docs| image:: https://docs.rs/indexmap/badge.svg .. _docs: https://docs.rs/indexmap .. |rustc| image:: https://img.shields.io/badge/rust-1.18%2B-orange.svg .. _rustc: https://img.shields.io/badge/rust-1.18%2B-orange.svg A safe, pure-Rust hash table which preserves insertion order. This crate implements compact map and set data-structures, where the iteration order of the keys is independent from their hash or value. It preserves insertion order (except after removals), and it allows lookup of entries by either hash table key or numerical index. Note: this crate was originally released under the name ``ordermap``, but it was renamed to ``indexmap`` to better reflect its features. Background ========== This was inspired by Python 3.6's new dict implementation (which remembers the insertion order and is fast to iterate, and is compact in memory). Some of those features were translated to Rust, and some were not. The result was indexmap, a hash table that has following properties: - Order is **independent of hash function** and hash values of keys. - Fast to iterate. - Indexed in compact space. - Preserves insertion order **as long** as you don't call ``.remove()``. - Uses robin hood hashing just like Rust's libstd ``HashMap`` used to do (before std switched to hashbrown). - It's the usual backwards shift deletion, but only on the index vector, so it's cheaper because it's moving less memory around. Does not implement (Yet) ------------------------ - ``.reserve()`` exists but does not have a complete implementation Performance ----------- ``IndexMap`` derives a couple of performance facts directly from how it is constructed, which is roughly: Two vectors, the first, sparse, with hashes and key-value indices, and the second, dense, the key-value pairs. - Iteration is very fast since it is on the dense key-values. - Removal is fast since it moves memory areas only in the first vector, and uses a single swap in the second vector. - Lookup is fast-ish because the hashes and indices are densely stored. Lookup also is slow-ish since hashes and key-value pairs are stored in separate places. (Visible when cpu caches size is limiting.) - In practice, ``IndexMap`` has been tested out as the hashmap in rustc in PR45282_ and the performance was roughly on par across the whole workload. - If you want the properties of ``IndexMap``, or its strongest performance points fits your workload, it might be the best hash table implementation. .. _PR45282: https://github.com/rust-lang/rust/pull/45282 Interesting Features -------------------- - Insertion order is preserved (``.swap_remove()`` perturbs the order, like the method name says). - Implements ``.pop() -> Option<(K, V)>`` in O(1) time. - ``IndexMap::new()`` is empty and uses no allocation until you insert something. - Lookup key-value pairs by index and vice versa. - No ``unsafe``. - Supports ``IndexMut``. Where to go from here? ---------------------- - Ideas and PRs for how to implement insertion-order preserving remove (for example tombstones) are welcome. The plan is to split the crate into two hash table implementations a) the current compact index space version and b) the full insertion order version. Ideas that we already did ------------------------- - It can be an *indexable* ordered map in the current fashion (This was implemented in 0.2.0, for potential use as a graph datastructure). - Idea for more cache efficient lookup (This was implemented in 0.1.2). Current ``indices: Vec``. ``Pos`` is interpreted as ``(u32, u32)`` more or less when ``.raw_capacity()`` fits in 32 bits. ``Pos`` then stores both the lower half of the hash and the entry index. This means that the hash values in ``Bucket`` don't need to be accessed while scanning for an entry. Recent Changes ============== - 1.2.0 - Plain ``.remove()`` now has a deprecation message, it informs the user about picking one of the removal functions ``swap_remove`` and ``shift_remove`` which have different performance and order semantics. Plain ``.remove()`` will not be removed, the warning message and method will remain until further. - Add new method ``shift_remove`` for order preserving removal on the map, and ``shift_take`` for the corresponding operation on the set. - Add methods ``swap_remove``, ``swap_remove_entry`` to ``Entry``. - Fix indexset/indexmap to support full paths, like ``indexmap::indexmap!()`` - Internal improvements: fix warnings, deprecations and style lints - 1.1.0 - Added optional feature `"rayon"` that adds parallel iterator support to `IndexMap` and `IndexSet` using Rayon. This includes all the regular iterators in parallel versions, and parallel sort. - Implemented ``Clone`` for ``map::{Iter, Keys, Values}`` and ``set::{Difference, Intersection, Iter, SymmetricDifference, Union}`` - Implemented ``Debug`` for ``map::{Entry, IntoIter, Iter, Keys, Values}`` and ``set::{Difference, Intersection, IntoIter, Iter, SymmetricDifference, Union}`` - Serde trait ``IntoDeserializer`` are implemented for ``IndexMap`` and ``IndexSet``. - Minimum Rust version requirement increased to Rust 1.30 for development builds. - 1.0.2 - The new methods ``IndexMap::insert_full`` and ``IndexSet::insert_full`` are both like ``insert`` with the index included in the return value. - The new method ``Entry::and_modify`` can be used to modify occupied entries, matching the new methods of ``std`` maps in Rust 1.26. - The new method ``Entry::or_default`` inserts a default value in unoccupied entries, matching the new methods of ``std`` maps in Rust 1.28. - 1.0.1 - Document Rust version policy for the crate (see rustdoc) - 1.0.0 - This is the 1.0 release for ``indexmap``! (the crate and datastructure formerly known as “ordermap”) - ``OccupiedEntry::insert`` changed its signature, to use ``&mut self`` for the method receiver, matching the equivalent method for a standard ``HashMap``. Thanks to @dtolnay for finding this bug. - The deprecated old names from ordermap were removed: ``OrderMap``, ``OrderSet``, ``ordermap!{}``, ``orderset!{}``. Use the new ``IndexMap`` etc names instead. - 0.4.1 - Renamed crate to ``indexmap``; the ``ordermap`` crate is now deprecated and the types ``OrderMap/Set`` now have a deprecation notice. - 0.4.0 - This is the last release series for this ``ordermap`` under that name, because the crate is **going to be renamed** to ``indexmap`` (with types ``IndexMap``, ``IndexSet``) and no change in functionality! - The map and its associated structs moved into the ``map`` submodule of the crate, so that the map and set are symmetric + The iterators, ``Entry`` and other structs are now under ``ordermap::map::`` - Internally refactored ``OrderMap`` so that all the main algorithms (insertion, lookup, removal etc) that don't use the ``S`` parameter (the hasher) are compiled without depending on ``S``, which reduces generics bloat. - ``Entry`` no longer has a type parameter ``S``, which is just like the standard ``HashMap``'s entry. - Minimum Rust version requirement increased to Rust 1.18 - 0.3.5 - Documentation improvements - 0.3.4 - The ``.retain()`` methods for ``OrderMap`` and ``OrderSet`` now traverse the elements in order, and the retained elements **keep their order** - Added new methods ``.sort_by()``, ``.sort_keys()`` to ``OrderMap`` and ``.sort_by()``, ``.sort()`` to ``OrderSet``. These methods allow you to sort the maps in place efficiently. - 0.3.3 - Document insertion behaviour better by @lucab - Updated dependences (no feature changes) by @ignatenkobrain - 0.3.2 - Add ``OrderSet`` by @cuviper! - ``OrderMap::drain`` is now (too) a double ended iterator. - 0.3.1 - In all ordermap iterators, forward the ``collect`` method to the underlying iterator as well. - Add crates.io categories. - 0.3.0 - The methods ``get_pair``, ``get_pair_index`` were both replaced by ``get_full`` (and the same for the mutable case). - Method ``swap_remove_pair`` replaced by ``swap_remove_full``. - Add trait ``MutableKeys`` for opt-in mutable key access. Mutable key access is only possible through the methods of this extension trait. - Add new trait ``Equivalent`` for key equivalence. This extends the ``Borrow`` trait mechanism for ``OrderMap::get`` in a backwards compatible way, just some minor type inference related issues may become apparent. See `#10`__ for more information. - Implement ``Extend<(&K, &V)>`` by @xfix. __ https://github.com/bluss/ordermap/pull/10 - 0.2.13 - Fix deserialization to support custom hashers by @Techcable. - Add methods ``.index()`` on the entry types by @garro95. - 0.2.12 - Add methods ``.with_hasher()``, ``.hasher()``. - 0.2.11 - Support ``ExactSizeIterator`` for the iterators. By @Binero. - Use ``Box<[Pos]>`` internally, saving a word in the ``OrderMap`` struct. - Serde support, with crate feature ``"serde-1"``. By @xfix. - 0.2.10 - Add iterator ``.drain(..)`` by @stevej. - 0.2.9 - Add method ``.is_empty()`` by @overvenus. - Implement ``PartialEq, Eq`` by @overvenus. - Add method ``.sorted_by()``. - 0.2.8 - Add iterators ``.values()`` and ``.values_mut()``. - Fix compatibility with 32-bit platforms. - 0.2.7 - Add ``.retain()``. - 0.2.6 - Add ``OccupiedEntry::remove_entry`` and other minor entry methods, so that it now has all the features of ``HashMap``'s entries. - 0.2.5 - Improved ``.pop()`` slightly. - 0.2.4 - Improved performance of ``.insert()`` (`#3`__) by @pczarn. __ https://github.com/bluss/ordermap/pull/3 - 0.2.3 - Generalize ``Entry`` for now, so that it works on hashmaps with non-default hasher. However, there's a lingering compat issue since libstd ``HashMap`` does not parameterize its entries by the hasher (``S`` typarm). - Special case some iterator methods like ``.nth()``. - 0.2.2 - Disable the verbose ``Debug`` impl by default. - 0.2.1 - Fix doc links and clarify docs. - 0.2.0 - Add more ``HashMap`` methods & compat with its API. - Experimental support for ``.entry()`` (the simplest parts of the API). - Add ``.reserve()`` (placeholder impl). - Add ``.remove()`` as synonym for ``.swap_remove()``. - Changed ``.insert()`` to swap value if the entry already exists, and return ``Option``. - Experimental support as an *indexed* hash map! Added methods ``.get_index()``, ``.get_index_mut()``, ``.swap_remove_index()``, ``.get_pair_index()``, ``.get_pair_index_mut()``. - 0.1.2 - Implement the 32/32 split idea for ``Pos`` which improves cache utilization and lookup performance. - 0.1.1 - Initial release. indexmap-1.2.0/benches/bench.rs010064400017510001751000000417451353367262700146270ustar0000000000000000#![feature(test)] extern crate test; extern crate rand; extern crate fnv; #[macro_use] extern crate lazy_static; use std::hash::Hash; use fnv::FnvHasher; use std::hash::BuildHasherDefault; type FnvBuilder = BuildHasherDefault; use test::Bencher; use test::black_box; extern crate indexmap; use indexmap::IndexMap; use std::collections::HashMap; use std::iter::FromIterator; use rand::rngs::SmallRng; use rand::FromEntropy; use rand::seq::SliceRandom; #[bench] fn new_hashmap(b: &mut Bencher) { b.iter(|| { HashMap::::new() }); } #[bench] fn new_orderedmap(b: &mut Bencher) { b.iter(|| { IndexMap::::new() }); } #[bench] fn with_capacity_10e5_hashmap(b: &mut Bencher) { b.iter(|| { HashMap::::with_capacity(10_000) }); } #[bench] fn with_capacity_10e5_orderedmap(b: &mut Bencher) { b.iter(|| { IndexMap::::with_capacity(10_000) }); } #[bench] fn insert_hashmap_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_orderedmap_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_hashmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x.to_string(), ()); } map }); } #[bench] fn insert_orderedmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x.to_string(), ()); } map }); } #[bench] fn insert_hashmap_str_10_000(b: &mut Bencher) { let c = 10_000; let ss = Vec::from_iter((0..c).map(|x| x.to_string())); b.iter(|| { let mut map = HashMap::with_capacity(c); for key in &ss { map.insert(&key[..], ()); } map }); } #[bench] fn insert_orderedmap_str_10_000(b: &mut Bencher) { let c = 10_000; let ss = Vec::from_iter((0..c).map(|x| x.to_string())); b.iter(|| { let mut map = IndexMap::with_capacity(c); for key in &ss { map.insert(&key[..], ()); } map }); } #[bench] fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) { let c = 10_000; let value = [0u64; 10]; b.iter(|| { let mut map = HashMap::with_capacity(c); for i in 0..c { map.insert(i, value); } map }); } #[bench] fn insert_orderedmap_int_bigvalue_10_000(b: &mut Bencher) { let c = 10_000; let value = [0u64; 10]; b.iter(|| { let mut map = IndexMap::with_capacity(c); for i in 0..c { map.insert(i, value); } map }); } #[bench] fn insert_hashmap_100_000(b: &mut Bencher) { let c = 100_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_orderedmap_100_000(b: &mut Bencher) { let c = 100_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_hashmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn insert_orderedmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x, ()); } map }); } #[bench] fn entry_hashmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.entry(x).or_insert(()); } map }); } #[bench] fn entry_orderedmap_150(b: &mut Bencher) { let c = 150; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.entry(x).or_insert(()); } map }); } #[bench] fn iter_sum_hashmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let len = c - c/10; for x in 0..len { map.insert(x, ()); } assert_eq!(map.len(), len); b.iter(|| { map.keys().sum::() }); } #[bench] fn iter_sum_orderedmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let len = c - c/10; for x in 0..len { map.insert(x, ()); } assert_eq!(map.len(), len); b.iter(|| { map.keys().sum::() }); } #[bench] fn iter_black_box_hashmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let len = c - c/10; for x in 0..len { map.insert(x, ()); } assert_eq!(map.len(), len); b.iter(|| { for &key in map.keys() { black_box(key); } }); } #[bench] fn iter_black_box_orderedmap_10_000(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let len = c - c/10; for x in 0..len { map.insert(x, ()); } assert_eq!(map.len(), len); b.iter(|| { for &key in map.keys() { black_box(key); } }); } fn shuffled_keys(iter: I) -> Vec where I: IntoIterator { let mut v = Vec::from_iter(iter); let mut rng = SmallRng::from_entropy(); v.shuffle(&mut rng); v } #[bench] fn lookup_hashmap_10_000_exist(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key, 1); } b.iter(|| { let mut found = 0; for key in 5000..c { found += map.get(&key).is_some() as i32; } found }); } #[bench] fn lookup_hashmap_10_000_noexist(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key, 1); } b.iter(|| { let mut found = 0; for key in c..15000 { found += map.get(&key).is_some() as i32; } found }); } #[bench] fn lookup_orderedmap_10_000_exist(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key, 1); } b.iter(|| { let mut found = 0; for key in 5000..c { found += map.get(&key).is_some() as i32; } found }); } #[bench] fn lookup_orderedmap_10_000_noexist(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key, 1); } b.iter(|| { let mut found = 0; for key in c..15000 { found += map.get(&key).is_some() as i32; } found }); } // number of items to look up const LOOKUP_MAP_SIZE: u32 = 100_000_u32; const LOOKUP_SAMPLE_SIZE: u32 = 5000; const SORT_MAP_SIZE: usize = 10_000; // use lazy_static so that comparison benchmarks use the exact same inputs lazy_static! { static ref KEYS: Vec = { shuffled_keys(0..LOOKUP_MAP_SIZE) }; } lazy_static! { static ref HMAP_100K: HashMap = { let c = LOOKUP_MAP_SIZE; let mut map = HashMap::with_capacity(c as usize); let keys = &*KEYS; for &key in keys { map.insert(key, key); } map }; } lazy_static! { static ref OMAP_100K: IndexMap = { let c = LOOKUP_MAP_SIZE; let mut map = IndexMap::with_capacity(c as usize); let keys = &*KEYS; for &key in keys { map.insert(key, key); } map }; } lazy_static! { static ref OMAP_SORT_U32: IndexMap = { let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); for &key in &KEYS[..SORT_MAP_SIZE] { map.insert(key, key); } map }; } lazy_static! { static ref OMAP_SORT_S: IndexMap = { let mut map = IndexMap::with_capacity(SORT_MAP_SIZE); for &key in &KEYS[..SORT_MAP_SIZE] { map.insert(format!("{:^16x}", &key), String::new()); } map }; } #[bench] fn lookup_hashmap_100_000_multi(b: &mut Bencher) { let map = &*HMAP_100K; b.iter(|| { let mut found = 0; for key in 0..LOOKUP_SAMPLE_SIZE { found += map.get(&key).is_some() as u32; } found }); } #[bench] fn lookup_ordermap_100_000_multi(b: &mut Bencher) { let map = &*OMAP_100K; b.iter(|| { let mut found = 0; for key in 0..LOOKUP_SAMPLE_SIZE { found += map.get(&key).is_some() as u32; } found }); } // inorder: Test looking up keys in the same order as they were inserted #[bench] fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) { let map = &*HMAP_100K; let keys = &*KEYS; b.iter(|| { let mut found = 0; for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { found += map.get(key).is_some() as u32; } found }); } #[bench] fn lookup_ordermap_100_000_inorder_multi(b: &mut Bencher) { let map = &*OMAP_100K; let keys = &*KEYS; b.iter(|| { let mut found = 0; for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] { found += map.get(key).is_some() as u32; } found }); } #[bench] fn lookup_hashmap_100_000_single(b: &mut Bencher) { let map = &*HMAP_100K; let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); b.iter(|| { let key = iter.next().unwrap(); map.get(&key).is_some() }); } #[bench] fn lookup_ordermap_100_000_single(b: &mut Bencher) { let map = &*OMAP_100K; let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle(); b.iter(|| { let key = iter.next().unwrap(); map.get(&key).is_some() }); } const GROW_SIZE: usize = 100_000; type GrowKey = u32; // Test grow/resize without preallocation #[bench] fn grow_fnv_hashmap_100_000(b: &mut Bencher) { b.iter(|| { let mut map: HashMap<_, _, FnvBuilder> = HashMap::default(); for x in 0..GROW_SIZE { map.insert(x as GrowKey, x as GrowKey); } map }); } #[bench] fn grow_fnv_ordermap_100_000(b: &mut Bencher) { b.iter(|| { let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default(); for x in 0..GROW_SIZE { map.insert(x as GrowKey, x as GrowKey); } map }); } const MERGE: u64 = 10_000; #[bench] fn hashmap_merge_simple(b: &mut Bencher) { let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); b.iter(|| { let mut merged = first_map.clone(); merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); merged }); } #[bench] fn hashmap_merge_shuffle(b: &mut Bencher) { let first_map: HashMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: HashMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); let mut v = Vec::new(); let mut rng = SmallRng::from_entropy(); b.iter(|| { let mut merged = first_map.clone(); v.extend(second_map.iter().map(|(&k, &v)| (k, v))); v.shuffle(&mut rng); merged.extend(v.drain(..)); merged }); } #[bench] fn ordermap_merge_simple(b: &mut Bencher) { let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); b.iter(|| { let mut merged = first_map.clone(); merged.extend(second_map.iter().map(|(&k, &v)| (k, v))); merged }); } #[bench] fn ordermap_merge_shuffle(b: &mut Bencher) { let first_map: IndexMap = (0..MERGE).map(|i| (i, ())).collect(); let second_map: IndexMap = (MERGE..MERGE * 2).map(|i| (i, ())).collect(); let mut v = Vec::new(); let mut rng = SmallRng::from_entropy(); b.iter(|| { let mut merged = first_map.clone(); v.extend(second_map.iter().map(|(&k, &v)| (k, v))); v.shuffle(&mut rng); merged.extend(v.drain(..)); merged }); } #[bench] fn swap_remove_ordermap_100_000(b: &mut Bencher) { let map = OMAP_100K.clone(); let mut keys = Vec::from_iter(map.keys().cloned()); let mut rng = SmallRng::from_entropy(); keys.shuffle(&mut rng); b.iter(|| { let mut map = map.clone(); for key in &keys { map.swap_remove(key); } assert_eq!(map.len(), 0); map }); } #[bench] fn shift_remove_ordermap_100_000_few(b: &mut Bencher) { let map = OMAP_100K.clone(); let mut keys = Vec::from_iter(map.keys().cloned()); let mut rng = SmallRng::from_entropy(); keys.shuffle(&mut rng); keys.truncate(50); b.iter(|| { let mut map = map.clone(); for key in &keys { map.shift_remove(key); } assert_eq!(map.len(), OMAP_100K.len() - keys.len()); map }); } #[bench] fn shift_remove_ordermap_2_000_full(b: &mut Bencher) { let mut keys = KEYS[..2_000].to_vec(); let mut map = IndexMap::with_capacity(keys.len()); for &key in &keys { map.insert(key, key); } let mut rng = SmallRng::from_entropy(); keys.shuffle(&mut rng); b.iter(|| { let mut map = map.clone(); for key in &keys { map.shift_remove(key); } assert_eq!(map.len(), 0); map }); } #[bench] fn pop_ordermap_100_000(b: &mut Bencher) { let map = OMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); while !map.is_empty() { map.pop(); } assert_eq!(map.len(), 0); map }); } #[bench] fn few_retain_ordermap_100_000(b: &mut Bencher) { let map = OMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 7 == 0); map }); } #[bench] fn few_retain_hashmap_100_000(b: &mut Bencher) { let map = HMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 7 == 0); map }); } #[bench] fn half_retain_ordermap_100_000(b: &mut Bencher) { let map = OMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 2 == 0); map }); } #[bench] fn half_retain_hashmap_100_000(b: &mut Bencher) { let map = HMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 2 == 0); map }); } #[bench] fn many_retain_ordermap_100_000(b: &mut Bencher) { let map = OMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 100 != 0); map }); } #[bench] fn many_retain_hashmap_100_000(b: &mut Bencher) { let map = HMAP_100K.clone(); b.iter(|| { let mut map = map.clone(); map.retain(|k, _| *k % 100 != 0); map }); } // simple sort impl for comparison pub fn simple_sort(m: &mut IndexMap) { let mut ordered: Vec<_> = m.drain(..).collect(); ordered.sort_by(|left, right| left.0.cmp(&right.0)); m.extend(ordered); } #[bench] fn ordermap_sort_s(b: &mut Bencher) { let map = OMAP_SORT_S.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { let mut map = map.clone(); map.sort_keys(); map }); } #[bench] fn ordermap_simple_sort_s(b: &mut Bencher) { let map = OMAP_SORT_S.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { let mut map = map.clone(); simple_sort(&mut map); map }); } #[bench] fn ordermap_sort_u32(b: &mut Bencher) { let map = OMAP_SORT_U32.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { let mut map = map.clone(); map.sort_keys(); map }); } #[bench] fn ordermap_simple_sort_u32(b: &mut Bencher) { let map = OMAP_SORT_U32.clone(); // there's a map clone there, but it's still useful to profile this b.iter(|| { let mut map = map.clone(); simple_sort(&mut map); map }); } // measure the fixed overhead of cloning in sort benchmarks #[bench] fn ordermap_clone_for_sort_s(b: &mut Bencher) { let map = OMAP_SORT_S.clone(); b.iter(|| { map.clone() }); } #[bench] fn ordermap_clone_for_sort_u32(b: &mut Bencher) { let map = OMAP_SORT_U32.clone(); b.iter(|| { map.clone() }); } indexmap-1.2.0/benches/faststring.rs010064400017510001751000000076261353367262700157340ustar0000000000000000#![feature(test)] extern crate test; extern crate rand; extern crate lazy_static; use test::Bencher; extern crate indexmap; use indexmap::IndexMap; use std::collections::HashMap; use std::iter::FromIterator; use rand::thread_rng; use rand::seq::SliceRandom; use std::hash::{Hash, Hasher}; use std::borrow::Borrow; use std::ops::Deref; #[derive(PartialEq, Eq, Copy, Clone)] pub struct OneShot(pub T); impl Hash for OneShot { fn hash(&self, h: &mut H) { h.write(self.0.as_bytes()) } } impl<'a, S> From<&'a S> for &'a OneShot where S: AsRef { fn from(s: &'a S) -> Self { let s: &str = s.as_ref(); unsafe { &*(s as *const str as *const OneShot) } } } impl Hash for OneShot { fn hash(&self, h: &mut H) { h.write(self.0.as_bytes()) } } impl Borrow> for OneShot { fn borrow(&self) -> &OneShot { <&OneShot>::from(&self.0) } } impl Deref for OneShot { type Target = T; fn deref(&self) -> &T { &self.0 } } fn shuffled_keys(iter: I) -> Vec where I: IntoIterator { let mut v = Vec::from_iter(iter); let mut rng = thread_rng(); v.shuffle(&mut rng); v } #[bench] fn insert_hashmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(x.to_string(), ()); } map }); } #[bench] fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = HashMap::with_capacity(c); for x in 0..c { map.insert(OneShot(x.to_string()), ()); } map }); } #[bench] fn insert_orderedmap_string_10_000(b: &mut Bencher) { let c = 10_000; b.iter(|| { let mut map = IndexMap::with_capacity(c); for x in 0..c { map.insert(x.to_string(), ()); } map }); } #[bench] fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key.to_string(), 1); } let lookups = (5000..c).map(|x| x.to_string()).collect::>(); b.iter(|| { let mut found = 0; for key in &lookups { found += map.get(key).is_some() as i32; } found }); } #[bench] fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) { let c = 10_000; let mut map = HashMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(OneShot(key.to_string()), 1); } let lookups = (5000..c).map(|x| OneShot(x.to_string())).collect::>(); b.iter(|| { let mut found = 0; for key in &lookups { found += map.get(key).is_some() as i32; } found }); } #[bench] fn lookup_ordermap_10_000_exist_string(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(key.to_string(), 1); } let lookups = (5000..c).map(|x| x.to_string()).collect::>(); b.iter(|| { let mut found = 0; for key in &lookups { found += map.get(key).is_some() as i32; } found }); } #[bench] fn lookup_ordermap_10_000_exist_string_oneshot(b: &mut Bencher) { let c = 10_000; let mut map = IndexMap::with_capacity(c); let keys = shuffled_keys(0..c); for &key in &keys { map.insert(OneShot(key.to_string()), 1); } let lookups = (5000..c).map(|x| OneShot(x.to_string())).collect::>(); b.iter(|| { let mut found = 0; for key in &lookups { found += map.get(key).is_some() as i32; } found }); } indexmap-1.2.0/src/equivalent.rs010064400017510001751000000013451324036072100150550ustar0000000000000000 use std::borrow::Borrow; /// Key equivalence trait. /// /// This trait allows hash table lookup to be customized. /// It has one blanket implementation that uses the regular `Borrow` solution, /// just like `HashMap` and `BTreeMap` do, so that you can pass `&str` to lookup /// into a map with `String` keys and so on. /// /// # Contract /// /// The implementor **must** hash like `K`, if it is hashable. pub trait Equivalent { /// Compare self to `key` and return `true` if they are equal. fn equivalent(&self, key: &K) -> bool; } impl Equivalent for Q where Q: Eq, K: Borrow, { #[inline] fn equivalent(&self, key: &K) -> bool { *self == *key.borrow() } } indexmap-1.2.0/src/lib.rs010064400017510001751000000046631353476170100134650ustar0000000000000000 #![deny(unsafe_code)] #![doc(html_root_url = "https://docs.rs/indexmap/1/")] //! [`IndexMap`] is a hash table where the iteration order of the key-value //! pairs is independent of the hash values of the keys. //! //! [`IndexSet`] is a corresponding hash set using the same implementation and //! with similar properties. //! //! [`IndexMap`]: map/struct.IndexMap.html //! [`IndexSet`]: set/struct.IndexSet.html //! //! //! ## Rust Version //! //! This version of indexmap requires Rust 1.18 or later, or 1.30+ for //! development builds. //! //! The indexmap 1.x release series will use a carefully considered version //! upgrade policy, where in a later 1.x version, we will raise the minimum //! required Rust version. #[macro_use] mod macros; #[cfg(feature = "serde-1")] mod serde; mod util; mod equivalent; mod mutable_keys; pub mod set; pub mod map; // Placed after `map` and `set` so new `rayon` methods on the types // are documented after the "normal" methods. #[cfg(feature = "rayon")] mod rayon; pub use equivalent::Equivalent; pub use map::IndexMap; pub use set::IndexSet; // shared private items /// Hash value newtype. Not larger than usize, since anything larger /// isn't used for selecting position anyway. #[derive(Copy, Debug)] struct HashValue(usize); impl HashValue { #[inline(always)] fn get(self) -> usize { self.0 } } impl Clone for HashValue { #[inline] fn clone(&self) -> Self { *self } } impl PartialEq for HashValue { #[inline] fn eq(&self, rhs: &Self) -> bool { self.0 == rhs.0 } } #[derive(Copy, Clone, Debug)] struct Bucket { hash: HashValue, key: K, value: V, } impl Bucket { // field accessors -- used for `f` instead of closures in `.map(f)` fn key_ref(&self) -> &K { &self.key } fn value_ref(&self) -> &V { &self.value } fn value_mut(&mut self) -> &mut V { &mut self.value } fn key(self) -> K { self.key } fn key_value(self) -> (K, V) { (self.key, self.value) } fn refs(&self) -> (&K, &V) { (&self.key, &self.value) } fn ref_mut(&mut self) -> (&K, &mut V) { (&self.key, &mut self.value) } fn muts(&mut self) -> (&mut K, &mut V) { (&mut self.key, &mut self.value) } } trait Entries { type Entry; fn into_entries(self) -> Vec; fn as_entries(&self) -> &[Self::Entry]; fn as_entries_mut(&mut self) -> &mut [Self::Entry]; fn with_entries(&mut self, f: F) where F: FnOnce(&mut [Self::Entry]); } indexmap-1.2.0/src/macros.rs010064400017510001751000000062451353155442400142000ustar0000000000000000 #[macro_export(local_inner_macros)] /// Create an `IndexMap` from a list of key-value pairs /// /// ## Example /// /// ``` /// #[macro_use] extern crate indexmap; /// # fn main() { /// /// let map = indexmap!{ /// "a" => 1, /// "b" => 2, /// }; /// assert_eq!(map["a"], 1); /// assert_eq!(map["b"], 2); /// assert_eq!(map.get("c"), None); /// /// // "a" is the first key /// assert_eq!(map.keys().next(), Some(&"a")); /// # } /// ``` macro_rules! indexmap { (@single $($x:tt)*) => (()); (@count $($rest:expr),*) => (<[()]>::len(&[$(indexmap!(@single $rest)),*])); ($($key:expr => $value:expr,)+) => { indexmap!($($key => $value),+) }; ($($key:expr => $value:expr),*) => { { let _cap = indexmap!(@count $($key),*); let mut _map = $crate::IndexMap::with_capacity(_cap); $( _map.insert($key, $value); )* _map } }; } #[macro_export(local_inner_macros)] /// Create an `IndexSet` from a list of values /// /// ## Example /// /// ``` /// #[macro_use] extern crate indexmap; /// # fn main() { /// /// let set = indexset!{ /// "a", /// "b", /// }; /// assert!(set.contains("a")); /// assert!(set.contains("b")); /// assert!(!set.contains("c")); /// /// // "a" is the first value /// assert_eq!(set.iter().next(), Some(&"a")); /// # } /// ``` macro_rules! indexset { (@single $($x:tt)*) => (()); (@count $($rest:expr),*) => (<[()]>::len(&[$(indexset!(@single $rest)),*])); ($($value:expr,)+) => { indexset!($($value),+) }; ($($value:expr),*) => { { let _cap = indexset!(@count $($value),*); let mut _set = $crate::IndexSet::with_capacity(_cap); $( _set.insert($value); )* _set } }; } // generate all the Iterator methods by just forwarding to the underlying // self.iter and mapping its element. macro_rules! iterator_methods { // $map_elt is the mapping function from the underlying iterator's element // same mapping function for both options and iterators ($map_elt:expr) => { fn next(&mut self) -> Option { self.iter.next().map($map_elt) } fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } fn count(self) -> usize { self.iter.len() } fn nth(&mut self, n: usize) -> Option { self.iter.nth(n).map($map_elt) } fn last(mut self) -> Option { self.next_back() } fn collect(self) -> C where C: FromIterator { // NB: forwarding this directly to standard iterators will // allow it to leverage unstable traits like `TrustedLen`. self.iter.map($map_elt).collect() } } } macro_rules! double_ended_iterator_methods { // $map_elt is the mapping function from the underlying iterator's element // same mapping function for both options and iterators ($map_elt:expr) => { fn next_back(&mut self) -> Option { self.iter.next_back().map($map_elt) } } } indexmap-1.2.0/src/map.rs010064400017510001751000002241001353516575300134700ustar0000000000000000//! `IndexMap` is a hash table where the iteration order of the key-value //! pairs is independent of the hash values of the keys. pub use mutable_keys::MutableKeys; #[cfg(feature = "rayon")] pub use ::rayon::map as rayon; use std::hash::Hash; use std::hash::BuildHasher; use std::hash::Hasher; use std::iter::FromIterator; use std::collections::hash_map::RandomState; use std::ops::RangeFull; use std::cmp::{max, Ordering}; use std::fmt; use std::mem::{replace}; use std::marker::PhantomData; use util::{third, ptrdistance, enumerate}; use equivalent::Equivalent; use { Bucket, Entries, HashValue, }; fn hash_elem_using(build: &B, k: &K) -> HashValue { let mut h = build.build_hasher(); k.hash(&mut h); HashValue(h.finish() as usize) } /// A possibly truncated hash value. /// #[derive(Debug)] struct ShortHash(usize, PhantomData); impl ShortHash { /// Pretend this is a full HashValue, which /// is completely ok w.r.t determining bucket index /// /// - Sz = u32: 32-bit hash is enough to select bucket index /// - Sz = u64: hash is not truncated fn into_hash(self) -> HashValue { HashValue(self.0) } } impl Copy for ShortHash { } impl Clone for ShortHash { #[inline] fn clone(&self) -> Self { *self } } impl PartialEq for ShortHash { #[inline] fn eq(&self, rhs: &Self) -> bool { self.0 == rhs.0 } } // Compare ShortHash == HashValue by truncating appropriately // if applicable before the comparison impl PartialEq for ShortHash where Sz: Size { #[inline] fn eq(&self, rhs: &HashValue) -> bool { if Sz::is_64_bit() { self.0 == rhs.0 } else { lo32(self.0 as u64) == lo32(rhs.0 as u64) } } } impl From> for HashValue { fn from(x: ShortHash) -> Self { HashValue(x.0) } } /// `Pos` is stored in the `indices` array and it points to the index of a /// `Bucket` in self.core.entries. /// /// Pos can be interpreted either as a 64-bit index, or as a 32-bit index and /// a 32-bit hash. /// /// Storing the truncated hash next to the index saves loading the hash from the /// entry, increasing the cache efficiency. /// /// Note that the lower 32 bits of the hash is enough to compute desired /// position and probe distance in a hash map with less than 2**32 buckets. /// /// The IndexMap will simply query its **current raw capacity** to see what its /// current size class is, and dispatch to the 32-bit or 64-bit lookup code as /// appropriate. Only the growth code needs some extra logic to handle the /// transition from one class to another #[derive(Copy)] struct Pos { index: u64, } impl Clone for Pos { #[inline(always)] fn clone(&self) -> Self { *self } } impl fmt::Debug for Pos { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self.pos() { Some(i) => write!(f, "Pos({} / {:x})", i, self.index), None => write!(f, "Pos(None)"), } } } impl Pos { #[inline] fn none() -> Self { Pos { index: !0 } } #[inline] fn is_none(&self) -> bool { self.index == !0 } /// Return the index part of the Pos value inside `Some(_)` if the position /// is not none, otherwise return `None`. #[inline] fn pos(&self) -> Option { if self.index == !0 { None } else { Some(lo32(self.index as u64)) } } /// Set the index part of the Pos value to `i` #[inline] fn set_pos(&mut self, i: usize) where Sz: Size, { debug_assert!(!self.is_none()); if Sz::is_64_bit() { self.index = i as u64; } else { self.index = i as u64 | ((self.index >> 32) << 32) } } #[inline] fn with_hash(i: usize, hash: HashValue) -> Self where Sz: Size { if Sz::is_64_bit() { Pos { index: i as u64, } } else { Pos { index: i as u64 | ((hash.0 as u64) << 32) } } } /// “Resolve” the Pos into a combination of its index value and /// a proxy value to the hash (whether it contains the hash or not /// depends on the size class of the hash map). #[inline] fn resolve(&self) -> Option<(usize, ShortHashProxy)> where Sz: Size { if Sz::is_64_bit() { if !self.is_none() { Some((self.index as usize, ShortHashProxy::new(0))) } else { None } } else { if !self.is_none() { let (i, hash) = split_lo_hi(self.index); Some((i as usize, ShortHashProxy::new(hash as usize))) } else { None } } } /// Like resolve, but the Pos **must** be non-none. Return its index. #[inline] fn resolve_existing_index(&self) -> usize where Sz: Size { debug_assert!(!self.is_none(), "datastructure inconsistent: none where valid Pos expected"); if Sz::is_64_bit() { self.index as usize } else { let (i, _) = split_lo_hi(self.index); i as usize } } } #[inline] fn lo32(x: u64) -> usize { (x & 0xFFFF_FFFF) as usize } // split into low, hi parts #[inline] fn split_lo_hi(x: u64) -> (u32, u32) { (x as u32, (x >> 32) as u32) } // Possibly contains the truncated hash value for an entry, depending on // the size class. struct ShortHashProxy(usize, PhantomData); impl ShortHashProxy where Sz: Size { fn new(x: usize) -> Self { ShortHashProxy(x, PhantomData) } /// Get the hash from either `self` or from a lookup into `entries`, /// depending on `Sz`. fn get_short_hash(&self, entries: &[Bucket], index: usize) -> ShortHash { if Sz::is_64_bit() { ShortHash(entries[index].hash.0, PhantomData) } else { ShortHash(self.0, PhantomData) } } } /// A hash table where the iteration order of the key-value pairs is independent /// of the hash values of the keys. /// /// The interface is closely compatible with the standard `HashMap`, but also /// has additional features. /// /// # Order /// /// The key-value pairs have a consistent order that is determined by /// the sequence of insertion and removal calls on the map. The order does /// not depend on the keys or the hash function at all. /// /// All iterators traverse the map in *the order*. /// /// The insertion order is preserved, with **notable exceptions** like the /// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of /// course result in a new order, depending on the sorting order. /// /// # Indices /// /// The key-value pairs are indexed in a compact range without holes in the /// range `0..self.len()`. For example, the method `.get_full` looks up the /// index for a key, and the method `.get_index` looks up the key-value pair by /// index. /// /// # Examples /// /// ``` /// use indexmap::IndexMap; /// /// // count the frequency of each letter in a sentence. /// let mut letters = IndexMap::new(); /// for ch in "a short treatise on fungi".chars() { /// *letters.entry(ch).or_insert(0) += 1; /// } /// /// assert_eq!(letters[&'s'], 2); /// assert_eq!(letters[&'t'], 3); /// assert_eq!(letters[&'u'], 1); /// assert_eq!(letters.get(&'y'), None); /// ``` #[derive(Clone)] pub struct IndexMap { core: OrderMapCore, hash_builder: S, } // core of the map that does not depend on S #[derive(Clone)] struct OrderMapCore { pub(crate) mask: usize, /// indices are the buckets. indices.len() == raw capacity pub(crate) indices: Box<[Pos]>, /// entries is a dense vec of entries in their order. entries.len() == len pub(crate) entries: Vec>, } #[inline(always)] fn desired_pos(mask: usize, hash: HashValue) -> usize { hash.0 & mask } impl Entries for IndexMap { type Entry = Bucket; fn into_entries(self) -> Vec { self.core.entries } fn as_entries(&self) -> &[Self::Entry] { &self.core.entries } fn as_entries_mut(&mut self) -> &mut [Self::Entry] { &mut self.core.entries } fn with_entries(&mut self, f: F) where F: FnOnce(&mut [Self::Entry]) { let side_index = self.core.save_hash_index(); f(&mut self.core.entries); self.core.restore_hash_index(side_index); } } /// The number of steps that `current` is forward of the desired position for hash #[inline(always)] fn probe_distance(mask: usize, hash: HashValue, current: usize) -> usize { current.wrapping_sub(desired_pos(mask, hash)) & mask } enum Inserted { Done, Swapped { prev_value: V }, RobinHood { probe: usize, old_pos: Pos, } } impl fmt::Debug for IndexMap where K: fmt::Debug + Hash + Eq, V: fmt::Debug, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_map().entries(self.iter()).finish()?; if cfg!(not(feature = "test_debug")) { return Ok(()); } writeln!(f)?; for (i, index) in enumerate(&*self.core.indices) { write!(f, "{}: {:?}", i, index)?; if let Some(pos) = index.pos() { let hash = self.core.entries[pos].hash; let key = &self.core.entries[pos].key; let desire = desired_pos(self.core.mask, hash); write!(f, ", desired={}, probe_distance={}, key={:?}", desire, probe_distance(self.core.mask, hash, i), key)?; } writeln!(f)?; } writeln!(f, "cap={}, raw_cap={}, entries.cap={}", self.capacity(), self.raw_capacity(), self.core.entries.capacity())?; Ok(()) } } #[inline] fn usable_capacity(cap: usize) -> usize { cap - cap / 4 } #[inline] fn to_raw_capacity(n: usize) -> usize { n + n / 3 } // this could not be captured in an efficient iterator macro_rules! probe_loop { ($probe_var: ident < $len: expr, $body: expr) => { loop { if $probe_var < $len { $body $probe_var += 1; } else { $probe_var = 0; } } } } impl IndexMap { /// Create a new map. (Does not allocate.) pub fn new() -> Self { Self::with_capacity(0) } /// Create a new map with capacity for `n` key-value pairs. (Does not /// allocate if `n` is zero.) /// /// Computes in **O(n)** time. pub fn with_capacity(n: usize) -> Self { Self::with_capacity_and_hasher(n, <_>::default()) } } impl IndexMap { /// Create a new map with capacity for `n` key-value pairs. (Does not /// allocate if `n` is zero.) /// /// Computes in **O(n)** time. pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self where S: BuildHasher { if n == 0 { IndexMap { core: OrderMapCore { mask: 0, indices: Box::new([]), entries: Vec::new(), }, hash_builder, } } else { let raw = to_raw_capacity(n); let raw_cap = max(raw.next_power_of_two(), 8); IndexMap { core: OrderMapCore { mask: raw_cap.wrapping_sub(1), indices: vec![Pos::none(); raw_cap].into_boxed_slice(), entries: Vec::with_capacity(usable_capacity(raw_cap)), }, hash_builder, } } } /// Return the number of key-value pairs in the map. /// /// Computes in **O(1)** time. pub fn len(&self) -> usize { self.core.len() } /// Returns true if the map contains no elements. /// /// Computes in **O(1)** time. pub fn is_empty(&self) -> bool { self.len() == 0 } /// Create a new map with `hash_builder` pub fn with_hasher(hash_builder: S) -> Self where S: BuildHasher { Self::with_capacity_and_hasher(0, hash_builder) } /// Return a reference to the map's `BuildHasher`. pub fn hasher(&self) -> &S where S: BuildHasher { &self.hash_builder } /// Computes in **O(1)** time. pub fn capacity(&self) -> usize { self.core.capacity() } #[inline] fn size_class_is_64bit(&self) -> bool { self.core.size_class_is_64bit() } #[inline(always)] fn raw_capacity(&self) -> usize { self.core.raw_capacity() } } impl OrderMapCore { // Return whether we need 32 or 64 bits to specify a bucket or entry index #[cfg(not(feature = "test_low_transition_point"))] fn size_class_is_64bit(&self) -> bool { usize::max_value() > u32::max_value() as usize && self.raw_capacity() >= u32::max_value() as usize } // for testing #[cfg(feature = "test_low_transition_point")] fn size_class_is_64bit(&self) -> bool { self.raw_capacity() >= 64 } #[inline(always)] fn raw_capacity(&self) -> usize { self.indices.len() } } /// Trait for the "size class". Either u32 or u64 depending on the index /// size needed to address an entry's indes in self.core.entries. trait Size { fn is_64_bit() -> bool; fn is_same_size() -> bool { Self::is_64_bit() == T::is_64_bit() } } impl Size for u32 { #[inline] fn is_64_bit() -> bool { false } } impl Size for u64 { #[inline] fn is_64_bit() -> bool { true } } /// Call self.method(args) with `::` or `::` depending on `self` /// size class. /// /// The u32 or u64 is *prepended* to the type parameter list! macro_rules! dispatch_32_vs_64 { // self.methodname with other explicit type params, // size is prepended ($self_:ident . $method:ident::<$($t:ty),*>($($arg:expr),*)) => { if $self_.size_class_is_64bit() { $self_.$method::($($arg),*) } else { $self_.$method::($($arg),*) } }; // self.methodname with only one type param, the size. ($self_:ident . $method:ident ($($arg:expr),*)) => { if $self_.size_class_is_64bit() { $self_.$method::($($arg),*) } else { $self_.$method::($($arg),*) } }; // functionname with size_class_is_64bit as the first argument, only one // type param, the size. ($self_:ident => $function:ident ($($arg:expr),*)) => { if $self_.size_class_is_64bit() { $function::($($arg),*) } else { $function::($($arg),*) } }; } /// Entry for an existing key-value pair or a vacant location to /// insert one. pub enum Entry<'a, K: 'a, V: 'a> { /// Existing slot with equivalent key. Occupied(OccupiedEntry<'a, K, V>), /// Vacant slot (no equivalent key in the map). Vacant(VacantEntry<'a, K, V>), } impl<'a, K, V> Entry<'a, K, V> { /// Computes in **O(1)** time (amortized average). pub fn or_insert(self, default: V) -> &'a mut V { match self { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => entry.insert(default), } } /// Computes in **O(1)** time (amortized average). pub fn or_insert_with(self, call: F) -> &'a mut V where F: FnOnce() -> V, { match self { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => entry.insert(call()), } } pub fn key(&self) -> &K { match *self { Entry::Occupied(ref entry) => entry.key(), Entry::Vacant(ref entry) => entry.key(), } } /// Return the index where the key-value pair exists or will be inserted. pub fn index(&self) -> usize { match *self { Entry::Occupied(ref entry) => entry.index(), Entry::Vacant(ref entry) => entry.index(), } } /// Modifies the entry if it is occupied. pub fn and_modify(self, f: F) -> Self where F: FnOnce(&mut V), { match self { Entry::Occupied(mut o) => { f(o.get_mut()); Entry::Occupied(o) } x => x, } } /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable /// reference to it. Otherwise a mutable reference to an already existent value is returned. /// /// Computes in **O(1)** time (amortized average). pub fn or_default(self) -> &'a mut V where V: Default { match self { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => entry.insert(V::default()), } } } impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for Entry<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Entry::Vacant(ref v) => { f.debug_tuple("Entry") .field(v) .finish() } Entry::Occupied(ref o) => { f.debug_tuple("Entry") .field(o) .finish() } } } } /// A view into an occupied entry in a `IndexMap`. /// It is part of the [`Entry`] enum. /// /// [`Entry`]: enum.Entry.html pub struct OccupiedEntry<'a, K: 'a, V: 'a> { map: &'a mut OrderMapCore, key: K, probe: usize, index: usize, } impl<'a, K, V> OccupiedEntry<'a, K, V> { pub fn key(&self) -> &K { &self.key } pub fn get(&self) -> &V { &self.map.entries[self.index].value } pub fn get_mut(&mut self) -> &mut V { &mut self.map.entries[self.index].value } /// Put the new key in the occupied entry's key slot pub(crate) fn replace_key(self) -> K { let old_key = &mut self.map.entries[self.index].key; replace(old_key, self.key) } /// Return the index of the key-value pair pub fn index(&self) -> usize { self.index } pub fn into_mut(self) -> &'a mut V { &mut self.map.entries[self.index].value } /// Sets the value of the entry to `value`, and returns the entry's old value. pub fn insert(&mut self, value: V) -> V { replace(self.get_mut(), value) } #[deprecated(note = "use `swap_remove` or `shift_remove`")] pub fn remove(self) -> V { self.swap_remove() } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// Like `Vec::swap_remove`, the pair is removed by swapping it with the /// last element of the map and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove(self) -> V { self.swap_remove_entry().1 } /// Remove the key, value pair stored in the map for this entry, and return the value. /// /// Like `Vec::remove`, the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove(self) -> V { self.shift_remove_entry().1 } /// Remove and return the key, value pair stored in the map for this entry #[deprecated(note = "use `swap_remove_entry` or `shift_remove_entry`")] pub fn remove_entry(self) -> (K, V) { self.swap_remove_entry() } /// Remove and return the key, value pair stored in the map for this entry /// /// Like `Vec::swap_remove`, the pair is removed by swapping it with the /// last element of the map and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove_entry(self) -> (K, V) { self.map.swap_remove_found(self.probe, self.index) } /// Remove and return the key, value pair stored in the map for this entry /// /// Like `Vec::remove`, the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove_entry(self) -> (K, V) { self.map.shift_remove_found(self.probe, self.index) } } impl<'a, K: 'a + fmt::Debug, V: 'a + fmt::Debug> fmt::Debug for OccupiedEntry<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) .field("value", self.get()) .finish() } } /// A view into a vacant entry in a `IndexMap`. /// It is part of the [`Entry`] enum. /// /// [`Entry`]: enum.Entry.html pub struct VacantEntry<'a, K: 'a, V: 'a> { map: &'a mut OrderMapCore, key: K, hash: HashValue, probe: usize, } impl<'a, K, V> VacantEntry<'a, K, V> { pub fn key(&self) -> &K { &self.key } pub fn into_key(self) -> K { self.key } /// Return the index where the key-value pair will be inserted. pub fn index(&self) -> usize { self.map.len() } pub fn insert(self, value: V) -> &'a mut V { if self.map.size_class_is_64bit() { self.insert_impl::(value) } else { self.insert_impl::(value) } } fn insert_impl(self, value: V) -> &'a mut V where Sz: Size { let index = self.map.entries.len(); self.map.entries.push(Bucket { hash: self.hash, key: self.key, value }); let old_pos = Pos::with_hash::(index, self.hash); self.map.insert_phase_2::(self.probe, old_pos); &mut {self.map}.entries[index].value } } impl<'a, K: 'a + fmt::Debug, V: 'a> fmt::Debug for VacantEntry<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_tuple("VacantEntry") .field(self.key()) .finish() } } impl IndexMap where K: Hash + Eq, S: BuildHasher, { // FIXME: reduce duplication (compare with insert) fn entry_phase_1(&mut self, key: K) -> Entry where Sz: Size { let hash = hash_elem_using(&self.hash_builder, &key); self.core.entry_phase_1::(hash, key) } /// Remove all key-value pairs in the map, while preserving its capacity. /// /// Computes in **O(n)** time. pub fn clear(&mut self) { self.core.clear(); } /// Reserve capacity for `additional` more key-value pairs. /// /// FIXME Not implemented fully yet. pub fn reserve(&mut self, additional: usize) { if additional > 0 { self.reserve_one(); } } // First phase: Look for the preferred location for key. // // We will know if `key` is already in the map, before we need to insert it. // When we insert they key, it might be that we need to continue displacing // entries (robin hood hashing), in which case Inserted::RobinHood is returned fn insert_phase_1(&mut self, key: K, value: V) -> Inserted where Sz: Size { let hash = hash_elem_using(&self.hash_builder, &key); self.core.insert_phase_1::(hash, key, value) } fn reserve_one(&mut self) { if self.len() == self.capacity() { dispatch_32_vs_64!(self.double_capacity()); } } fn double_capacity(&mut self) where Sz: Size, { self.core.double_capacity::(); } /// Insert a key-value pair in the map. /// /// If an equivalent key already exists in the map: the key remains and /// retains in its place in the order, its corresponding value is updated /// with `value` and the older value is returned inside `Some(_)`. /// /// If no equivalent key existed in the map: the new key-value pair is /// inserted, last in order, and `None` is returned. /// /// Computes in **O(1)** time (amortized average). /// /// See also [`entry`](#method.entry) if you you want to insert *or* modify /// or if you need to get the index of the corresponding key-value pair. pub fn insert(&mut self, key: K, value: V) -> Option { self.reserve_one(); if self.size_class_is_64bit() { match self.insert_phase_1::(key, value) { Inserted::Swapped { prev_value } => Some(prev_value), Inserted::Done => None, Inserted::RobinHood { probe, old_pos } => { self.core.insert_phase_2::(probe, old_pos); None } } } else { match self.insert_phase_1::(key, value) { Inserted::Swapped { prev_value } => Some(prev_value), Inserted::Done => None, Inserted::RobinHood { probe, old_pos } => { self.core.insert_phase_2::(probe, old_pos); None } } } } /// Insert a key-value pair in the map, and get their index. /// /// If an equivalent key already exists in the map: the key remains and /// retains in its place in the order, its corresponding value is updated /// with `value` and the older value is returned inside `(index, Some(_))`. /// /// If no equivalent key existed in the map: the new key-value pair is /// inserted, last in order, and `(index, None)` is returned. /// /// Computes in **O(1)** time (amortized average). /// /// See also [`entry`](#method.entry) if you you want to insert *or* modify /// or if you need to get the index of the corresponding key-value pair. pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option) { let entry = self.entry(key); let index = entry.index(); match entry { Entry::Occupied(mut entry) => (index, Some(entry.insert(value))), Entry::Vacant(entry) => { entry.insert(value); (index, None) } } } /// Get the given key’s corresponding entry in the map for insertion and/or /// in-place manipulation. /// /// Computes in **O(1)** time (amortized average). pub fn entry(&mut self, key: K) -> Entry { self.reserve_one(); dispatch_32_vs_64!(self.entry_phase_1(key)) } /// Return an iterator over the key-value pairs of the map, in their order pub fn iter(&self) -> Iter { Iter { iter: self.core.entries.iter() } } /// Return an iterator over the key-value pairs of the map, in their order pub fn iter_mut(&mut self) -> IterMut { IterMut { iter: self.core.entries.iter_mut() } } /// Return an iterator over the keys of the map, in their order pub fn keys(&self) -> Keys { Keys { iter: self.core.entries.iter() } } /// Return an iterator over the values of the map, in their order pub fn values(&self) -> Values { Values { iter: self.core.entries.iter() } } /// Return an iterator over mutable references to the the values of the map, /// in their order pub fn values_mut(&mut self) -> ValuesMut { ValuesMut { iter: self.core.entries.iter_mut() } } /// Return `true` if an equivalent to `key` exists in the map. /// /// Computes in **O(1)** time (average). pub fn contains_key(&self, key: &Q) -> bool where Q: Hash + Equivalent, { self.find(key).is_some() } /// Return a reference to the value stored for `key`, if it is present, /// else `None`. /// /// Computes in **O(1)** time (average). pub fn get(&self, key: &Q) -> Option<&V> where Q: Hash + Equivalent, { self.get_full(key).map(third) } /// Return item index, key and value pub fn get_full(&self, key: &Q) -> Option<(usize, &K, &V)> where Q: Hash + Equivalent, { if let Some((_, found)) = self.find(key) { let entry = &self.core.entries[found]; Some((found, &entry.key, &entry.value)) } else { None } } pub fn get_mut(&mut self, key: &Q) -> Option<&mut V> where Q: Hash + Equivalent, { self.get_full_mut(key).map(third) } pub fn get_full_mut(&mut self, key: &Q) -> Option<(usize, &K, &mut V)> where Q: Hash + Equivalent, { self.get_full_mut2_impl(key).map(|(i, k, v)| (i, &*k, v)) } pub(crate) fn get_full_mut2_impl(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)> where Q: Hash + Equivalent, { if let Some((_, found)) = self.find(key) { let entry = &mut self.core.entries[found]; Some((found, &mut entry.key, &mut entry.value)) } else { None } } /// Return probe (indices) and position (entries) pub(crate) fn find(&self, key: &Q) -> Option<(usize, usize)> where Q: Hash + Equivalent, { if self.is_empty() { return None; } let h = hash_elem_using(&self.hash_builder, key); self.core.find_using(h, move |entry| { Q::equivalent(key, &entry.key) }) } /// NOTE: Same as .swap_remove /// /// Computes in **O(1)** time (average). #[deprecated(note = "use `swap_remove`")] pub fn remove(&mut self, key: &Q) -> Option where Q: Hash + Equivalent, { self.swap_remove(key) } /// Remove the key-value pair equivalent to `key` and return /// its value. /// /// Like `Vec::swap_remove`, the pair is removed by swapping it with the /// last element of the map and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(1)** time (average). pub fn swap_remove(&mut self, key: &Q) -> Option where Q: Hash + Equivalent, { self.swap_remove_full(key).map(third) } /// Remove the key-value pair equivalent to `key` and return it and /// the index it had. /// /// Like `Vec::swap_remove`, the pair is removed by swapping it with the /// last element of the map and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(1)** time (average). pub fn swap_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> where Q: Hash + Equivalent, { let (probe, found) = match self.find(key) { None => return None, Some(t) => t, }; let (k, v) = self.core.swap_remove_found(probe, found); Some((found, k, v)) } /// Remove the key-value pair equivalent to `key` and return /// its value. /// /// Like `Vec::remove`, the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(n)** time (average). pub fn shift_remove(&mut self, key: &Q) -> Option where Q: Hash + Equivalent, { self.shift_remove_full(key).map(third) } /// Remove the key-value pair equivalent to `key` and return it and /// the index it had. /// /// Like `Vec::remove`, the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `None` if `key` is not in map. /// /// Computes in **O(n)** time (average). pub fn shift_remove_full(&mut self, key: &Q) -> Option<(usize, K, V)> where Q: Hash + Equivalent, { let (probe, found) = match self.find(key) { None => return None, Some(t) => t, }; let (k, v) = self.core.shift_remove_found(probe, found); Some((found, k, v)) } /// Remove the last key-value pair /// /// Computes in **O(1)** time (average). pub fn pop(&mut self) -> Option<(K, V)> { self.core.pop_impl() } /// Scan through each key-value pair in the map and keep those where the /// closure `keep` returns `true`. /// /// The elements are visited in order, and remaining elements keep their /// order. /// /// Computes in **O(n)** time (average). pub fn retain(&mut self, mut keep: F) where F: FnMut(&K, &mut V) -> bool, { self.retain_mut(move |k, v| keep(k, v)); } pub(crate) fn retain_mut(&mut self, keep: F) where F: FnMut(&mut K, &mut V) -> bool, { dispatch_32_vs_64!(self.retain_mut_sz::<_>(keep)); } fn retain_mut_sz(&mut self, keep: F) where F: FnMut(&mut K, &mut V) -> bool, Sz: Size, { self.core.retain_in_order_impl::(keep); } /// Sort the map’s key-value pairs by the default ordering of the keys. /// /// See `sort_by` for details. pub fn sort_keys(&mut self) where K: Ord, { self.core.sort_by(key_cmp) } /// Sort the map’s key-value pairs in place using the comparison /// function `compare`. /// /// The comparison function receives two key and value pairs to compare (you /// can sort by keys or values or their combination as needed). /// /// Computes in **O(n log n + c)** time and **O(n)** space where *n* is /// the length of the map and *c* the capacity. The sort is stable. pub fn sort_by(&mut self, compare: F) where F: FnMut(&K, &V, &K, &V) -> Ordering, { self.core.sort_by(compare) } /// Sort the key-value pairs of the map and return a by value iterator of /// the key-value pairs with the result. /// /// The sort is stable. pub fn sorted_by(mut self, mut cmp: F) -> IntoIter where F: FnMut(&K, &V, &K, &V) -> Ordering { self.core.entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); self.into_iter() } /// Clears the `IndexMap`, returning all key-value pairs as a drain iterator. /// Keeps the allocated memory for reuse. pub fn drain(&mut self, range: RangeFull) -> Drain { self.core.clear_indices(); Drain { iter: self.core.entries.drain(range), } } } fn key_cmp(k1: &K, _v1: &V, k2: &K, _v2: &V) -> Ordering where K: Ord { Ord::cmp(k1, k2) } impl IndexMap { /// Get a key-value pair by index /// /// Valid indices are *0 <= index < self.len()* /// /// Computes in **O(1)** time. pub fn get_index(&self, index: usize) -> Option<(&K, &V)> { self.core.entries.get(index).map(Bucket::refs) } /// Get a key-value pair by index /// /// Valid indices are *0 <= index < self.len()* /// /// Computes in **O(1)** time. pub fn get_index_mut(&mut self, index: usize) -> Option<(&mut K, &mut V)> { self.core.entries.get_mut(index).map(Bucket::muts) } /// Remove the key-value pair by index /// /// Valid indices are *0 <= index < self.len()* /// /// Like `Vec::swap_remove`, the pair is removed by swapping it with the /// last element of the map and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> { let (probe, found) = match self.core.entries.get(index) .map(|e| self.core.find_existing_entry(e)) { None => return None, Some(t) => t, }; Some(self.core.swap_remove_found(probe, found)) } /// Remove the key-value pair by index /// /// Valid indices are *0 <= index < self.len()* /// /// Like `Vec::remove`, the pair is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> { let (probe, found) = match self.core.entries.get(index) .map(|e| self.core.find_existing_entry(e)) { None => return None, Some(t) => t, }; Some(self.core.shift_remove_found(probe, found)) } } // Methods that don't use any properties (Hash / Eq) of K. // // It's cleaner to separate them out, then the compiler checks that we are not // using Hash + Eq at all in these methods. // // However, we should probably not let this show in the public API or docs. impl OrderMapCore { fn len(&self) -> usize { self.entries.len() } fn capacity(&self) -> usize { usable_capacity(self.raw_capacity()) } fn clear(&mut self) { self.entries.clear(); self.clear_indices(); } // clear self.indices to the same state as "no elements" fn clear_indices(&mut self) { for pos in self.indices.iter_mut() { *pos = Pos::none(); } } fn first_allocation(&mut self) { debug_assert_eq!(self.len(), 0); let raw_cap = 8usize; self.mask = raw_cap.wrapping_sub(1); self.indices = vec![Pos::none(); raw_cap].into_boxed_slice(); self.entries = Vec::with_capacity(usable_capacity(raw_cap)); } #[inline(never)] // `Sz` is *current* Size class, before grow fn double_capacity(&mut self) where Sz: Size { debug_assert!(self.raw_capacity() == 0 || self.len() > 0); if self.raw_capacity() == 0 { return self.first_allocation(); } // find first ideally placed element -- start of cluster let mut first_ideal = 0; for (i, index) in enumerate(&*self.indices) { if let Some(pos) = index.pos() { if 0 == probe_distance(self.mask, self.entries[pos].hash, i) { first_ideal = i; break; } } } // visit the entries in an order where we can simply reinsert them // into self.indices without any bucket stealing. let new_raw_cap = self.indices.len() * 2; let old_indices = replace(&mut self.indices, vec![Pos::none(); new_raw_cap].into_boxed_slice()); self.mask = new_raw_cap.wrapping_sub(1); // `Sz` is the old size class, and either u32 or u64 is the new for &pos in &old_indices[first_ideal..] { dispatch_32_vs_64!(self.reinsert_entry_in_order::(pos)); } for &pos in &old_indices[..first_ideal] { dispatch_32_vs_64!(self.reinsert_entry_in_order::(pos)); } let more = self.capacity() - self.len(); self.entries.reserve_exact(more); } // write to self.indices // read from self.entries at `pos` // // reinserting rewrites all `Pos` entries anyway. This handles transitioning // from u32 to u64 size class if needed by using the two type parameters. fn reinsert_entry_in_order(&mut self, pos: Pos) where SzNew: Size, SzOld: Size, { if let Some((i, hash_proxy)) = pos.resolve::() { // only if the size class is conserved can we use the short hash let entry_hash = if SzOld::is_same_size::() { hash_proxy.get_short_hash(&self.entries, i).into_hash() } else { self.entries[i].hash }; // find first empty bucket and insert there let mut probe = desired_pos(self.mask, entry_hash); probe_loop!(probe < self.indices.len(), { if self.indices[probe].is_none() { // empty bucket, insert here self.indices[probe] = Pos::with_hash::(i, entry_hash); return; } }); } } fn pop_impl(&mut self) -> Option<(K, V)> { let (probe, found) = match self.entries.last() .map(|e| self.find_existing_entry(e)) { None => return None, Some(t) => t, }; debug_assert_eq!(found, self.entries.len() - 1); Some(self.swap_remove_found(probe, found)) } // FIXME: reduce duplication (compare with insert) fn entry_phase_1(&mut self, hash: HashValue, key: K) -> Entry where Sz: Size, K: Eq, { let mut probe = desired_pos(self.mask, hash); let mut dist = 0; debug_assert!(self.len() < self.raw_capacity()); probe_loop!(probe < self.indices.len(), { if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { let entry_hash = hash_proxy.get_short_hash(&self.entries, i); // if existing element probed less than us, swap let their_dist = probe_distance(self.mask, entry_hash.into_hash(), probe); if their_dist < dist { // robin hood: steal the spot if it's better for us return Entry::Vacant(VacantEntry { map: self, hash: hash, key: key, probe: probe, }); } else if entry_hash == hash && self.entries[i].key == key { return Entry::Occupied(OccupiedEntry { map: self, key: key, probe: probe, index: i, }); } } else { // empty bucket, insert here return Entry::Vacant(VacantEntry { map: self, hash: hash, key: key, probe: probe, }); } dist += 1; }); } // First phase: Look for the preferred location for key. // // We will know if `key` is already in the map, before we need to insert it. // When we insert they key, it might be that we need to continue displacing // entries (robin hood hashing), in which case Inserted::RobinHood is returned fn insert_phase_1(&mut self, hash: HashValue, key: K, value: V) -> Inserted where Sz: Size, K: Eq, { let mut probe = desired_pos(self.mask, hash); let mut dist = 0; let insert_kind; debug_assert!(self.len() < self.raw_capacity()); probe_loop!(probe < self.indices.len(), { let pos = &mut self.indices[probe]; if let Some((i, hash_proxy)) = pos.resolve::() { let entry_hash = hash_proxy.get_short_hash(&self.entries, i); // if existing element probed less than us, swap let their_dist = probe_distance(self.mask, entry_hash.into_hash(), probe); if their_dist < dist { // robin hood: steal the spot if it's better for us let index = self.entries.len(); insert_kind = Inserted::RobinHood { probe: probe, old_pos: Pos::with_hash::(index, hash), }; break; } else if entry_hash == hash && self.entries[i].key == key { return Inserted::Swapped { prev_value: replace(&mut self.entries[i].value, value), }; } } else { // empty bucket, insert here let index = self.entries.len(); *pos = Pos::with_hash::(index, hash); insert_kind = Inserted::Done; break; } dist += 1; }); self.entries.push(Bucket { hash, key, value }); insert_kind } /// phase 2 is post-insert where we forward-shift `Pos` in the indices. fn insert_phase_2(&mut self, mut probe: usize, mut old_pos: Pos) where Sz: Size { probe_loop!(probe < self.indices.len(), { let pos = &mut self.indices[probe]; if pos.is_none() { *pos = old_pos; break; } else { old_pos = replace(pos, old_pos); } }); } /// Return probe (indices) and position (entries) fn find_using(&self, hash: HashValue, key_eq: F) -> Option<(usize, usize)> where F: Fn(&Bucket) -> bool, { dispatch_32_vs_64!(self.find_using_impl::<_>(hash, key_eq)) } fn find_using_impl(&self, hash: HashValue, key_eq: F) -> Option<(usize, usize)> where F: Fn(&Bucket) -> bool, Sz: Size, { debug_assert!(self.len() > 0); let mut probe = desired_pos(self.mask, hash); let mut dist = 0; probe_loop!(probe < self.indices.len(), { if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { let entry_hash = hash_proxy.get_short_hash(&self.entries, i); if dist > probe_distance(self.mask, entry_hash.into_hash(), probe) { // give up when probe distance is too long return None; } else if entry_hash == hash && key_eq(&self.entries[i]) { return Some((probe, i)); } } else { return None; } dist += 1; }); } /// Find `entry` which is already placed inside self.entries; /// return its probe and entry index. fn find_existing_entry(&self, entry: &Bucket) -> (usize, usize) { debug_assert!(self.len() > 0); let hash = entry.hash; let actual_pos = ptrdistance(&self.entries[0], entry); let probe = dispatch_32_vs_64!(self => find_existing_entry_at(&self.indices, hash, self.mask, actual_pos)); (probe, actual_pos) } /// Remove an entry by shifting all entries that follow it fn shift_remove_found(&mut self, probe: usize, found: usize) -> (K, V) { dispatch_32_vs_64!(self.shift_remove_found_impl(probe, found)) } fn shift_remove_found_impl(&mut self, probe: usize, found: usize) -> (K, V) where Sz: Size { // index `probe` and entry `found` is to be removed // use Vec::remove, but then we need to update the indices that point // to all of the other entries that have to move self.indices[probe] = Pos::none(); let entry = self.entries.remove(found); // correct indices that point to the entries that followed the removed entry. // use a heuristic between a full sweep vs. a `probe_loop!` for every shifted item. if self.indices.len() < (self.entries.len() - found) * 2 { // shift all indices greater than `found` for pos in self.indices.iter_mut() { if let Some((i, _)) = pos.resolve::() { if i > found { // shift the index pos.set_pos::(i - 1); } } } } else { // find each following entry to shift its index for (offset, entry) in enumerate(&self.entries[found..]) { let index = found + offset; let mut probe = desired_pos(self.mask, entry.hash); probe_loop!(probe < self.indices.len(), { let pos = &mut self.indices[probe]; if let Some((i, _)) = pos.resolve::() { if i == index + 1 { // found it, shift it pos.set_pos::(index); break; } } }); } } self.backward_shift_after_removal::(probe); (entry.key, entry.value) } /// Remove an entry by swapping it with the last fn swap_remove_found(&mut self, probe: usize, found: usize) -> (K, V) { dispatch_32_vs_64!(self.swap_remove_found_impl(probe, found)) } fn swap_remove_found_impl(&mut self, probe: usize, found: usize) -> (K, V) where Sz: Size { // index `probe` and entry `found` is to be removed // use swap_remove, but then we need to update the index that points // to the other entry that has to move self.indices[probe] = Pos::none(); let entry = self.entries.swap_remove(found); // correct index that points to the entry that had to swap places if let Some(entry) = self.entries.get(found) { // was not last element // examine new element in `found` and find it in indices let mut probe = desired_pos(self.mask, entry.hash); probe_loop!(probe < self.indices.len(), { let pos = &mut self.indices[probe]; if let Some((i, _)) = pos.resolve::() { if i >= self.entries.len() { // found it pos.set_pos::(found); break; } } }); } self.backward_shift_after_removal::(probe); (entry.key, entry.value) } fn backward_shift_after_removal(&mut self, probe_at_remove: usize) where Sz: Size { // backward shift deletion in self.indices // after probe, shift all non-ideally placed indices backward let mut last_probe = probe_at_remove; let mut probe = probe_at_remove + 1; probe_loop!(probe < self.indices.len(), { if let Some((i, hash_proxy)) = self.indices[probe].resolve::() { let entry_hash = hash_proxy.get_short_hash(&self.entries, i); if probe_distance(self.mask, entry_hash.into_hash(), probe) > 0 { self.indices[last_probe] = self.indices[probe]; self.indices[probe] = Pos::none(); } else { break; } } else { break; } last_probe = probe; }); } fn retain_in_order_impl(&mut self, mut keep: F) where F: FnMut(&mut K, &mut V) -> bool, Sz: Size, { // Like Vec::retain in self.entries; for each removed key-value pair, // we clear its corresponding spot in self.indices, and run the // usual backward shift in self.indices. let len = self.entries.len(); let mut n_deleted = 0; for i in 0..len { let will_keep; let hash; { let ent = &mut self.entries[i]; hash = ent.hash; will_keep = keep(&mut ent.key, &mut ent.value); }; let probe = find_existing_entry_at::(&self.indices, hash, self.mask, i); if !will_keep { n_deleted += 1; self.indices[probe] = Pos::none(); self.backward_shift_after_removal::(probe); } else if n_deleted > 0 { self.indices[probe].set_pos::(i - n_deleted); self.entries.swap(i - n_deleted, i); } } self.entries.truncate(len - n_deleted); } fn sort_by(&mut self, mut compare: F) where F: FnMut(&K, &V, &K, &V) -> Ordering, { let side_index = self.save_hash_index(); self.entries.sort_by(move |ei, ej| compare(&ei.key, &ei.value, &ej.key, &ej.value)); self.restore_hash_index(side_index); } fn save_hash_index(&mut self) -> Vec { // Temporarily use the hash field in a bucket to store the old index. // Save the old hash values in `side_index`. Then we can sort // `self.entries` in place. Vec::from_iter(enumerate(&mut self.entries).map(|(i, elt)| { replace(&mut elt.hash, HashValue(i)).get() })) } fn restore_hash_index(&mut self, mut side_index: Vec) { // Write back the hash values from side_index and fill `side_index` with // a mapping from the old to the new index instead. for (i, ent) in enumerate(&mut self.entries) { let old_index = ent.hash.get(); ent.hash = HashValue(replace(&mut side_index[old_index], i)); } // Apply new index to self.indices dispatch_32_vs_64!(self => apply_new_index(&mut self.indices, &side_index)); fn apply_new_index(indices: &mut [Pos], new_index: &[usize]) where Sz: Size { for pos in indices { if let Some((i, _)) = pos.resolve::() { pos.set_pos::(new_index[i]); } } } } } /// Find, in the indices, an entry that already exists at a known position /// inside self.entries in the IndexMap. /// /// This is effectively reverse lookup, from the entries into the hash buckets. /// /// Return the probe index (into self.indices) /// /// + indices: The self.indices of the map, /// + hash: The full hash value from the bucket /// + mask: self.mask. /// + entry_index: The index of the entry in self.entries fn find_existing_entry_at(indices: &[Pos], hash: HashValue, mask: usize, entry_index: usize) -> usize where Sz: Size, { let mut probe = desired_pos(mask, hash); probe_loop!(probe < indices.len(), { // the entry *must* be present; if we hit a Pos::none this was not true // and there is a debug assertion in resolve_existing_index for that. let i = indices[probe].resolve_existing_index::(); if i == entry_index { return probe; } }); } use std::slice::Iter as SliceIter; use std::slice::IterMut as SliceIterMut; use std::vec::IntoIter as VecIntoIter; /// An iterator over the keys of a `IndexMap`. /// /// This `struct` is created by the [`keys`] method on [`IndexMap`]. See its /// documentation for more. /// /// [`keys`]: struct.IndexMap.html#method.keys /// [`IndexMap`]: struct.IndexMap.html pub struct Keys<'a, K: 'a, V: 'a> { pub(crate) iter: SliceIter<'a, Bucket>, } impl<'a, K, V> Iterator for Keys<'a, K, V> { type Item = &'a K; iterator_methods!(Bucket::key_ref); } impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> { fn next_back(&mut self) -> Option<&'a K> { self.iter.next_back().map(Bucket::key_ref) } } impl<'a, K, V> ExactSizeIterator for Keys<'a, K, V> { fn len(&self) -> usize { self.iter.len() } } // FIXME(#26925) Remove in favor of `#[derive(Clone)]` impl<'a, K, V> Clone for Keys<'a, K, V> { fn clone(&self) -> Keys<'a, K, V> { Keys { iter: self.iter.clone() } } } impl<'a, K: fmt::Debug, V> fmt::Debug for Keys<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list() .entries(self.clone()) .finish() } } /// An iterator over the values of a `IndexMap`. /// /// This `struct` is created by the [`values`] method on [`IndexMap`]. See its /// documentation for more. /// /// [`values`]: struct.IndexMap.html#method.values /// [`IndexMap`]: struct.IndexMap.html pub struct Values<'a, K: 'a, V: 'a> { iter: SliceIter<'a, Bucket>, } impl<'a, K, V> Iterator for Values<'a, K, V> { type Item = &'a V; iterator_methods!(Bucket::value_ref); } impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> { fn next_back(&mut self) -> Option { self.iter.next_back().map(Bucket::value_ref) } } impl<'a, K, V> ExactSizeIterator for Values<'a, K, V> { fn len(&self) -> usize { self.iter.len() } } // FIXME(#26925) Remove in favor of `#[derive(Clone)]` impl<'a, K, V> Clone for Values<'a, K, V> { fn clone(&self) -> Values<'a, K, V> { Values { iter: self.iter.clone() } } } impl<'a, K, V: fmt::Debug> fmt::Debug for Values<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list() .entries(self.clone()) .finish() } } /// A mutable iterator over the values of a `IndexMap`. /// /// This `struct` is created by the [`values_mut`] method on [`IndexMap`]. See its /// documentation for more. /// /// [`values_mut`]: struct.IndexMap.html#method.values_mut /// [`IndexMap`]: struct.IndexMap.html pub struct ValuesMut<'a, K: 'a, V: 'a> { iter: SliceIterMut<'a, Bucket>, } impl<'a, K, V> Iterator for ValuesMut<'a, K, V> { type Item = &'a mut V; iterator_methods!(Bucket::value_mut); } impl<'a, K, V> DoubleEndedIterator for ValuesMut<'a, K, V> { fn next_back(&mut self) -> Option { self.iter.next_back().map(Bucket::value_mut) } } impl<'a, K, V> ExactSizeIterator for ValuesMut<'a, K, V> { fn len(&self) -> usize { self.iter.len() } } /// An iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`iter`] method on [`IndexMap`]. See its /// documentation for more. /// /// [`iter`]: struct.IndexMap.html#method.iter /// [`IndexMap`]: struct.IndexMap.html pub struct Iter<'a, K: 'a, V: 'a> { iter: SliceIter<'a, Bucket>, } impl<'a, K, V> Iterator for Iter<'a, K, V> { type Item = (&'a K, &'a V); iterator_methods!(Bucket::refs); } impl<'a, K, V> DoubleEndedIterator for Iter<'a, K, V> { fn next_back(&mut self) -> Option { self.iter.next_back().map(Bucket::refs) } } impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> { fn len(&self) -> usize { self.iter.len() } } // FIXME(#26925) Remove in favor of `#[derive(Clone)]` impl<'a, K, V> Clone for Iter<'a, K, V> { fn clone(&self) -> Iter<'a, K, V> { Iter { iter: self.iter.clone() } } } impl<'a, K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list() .entries(self.clone()) .finish() } } /// A mutable iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`iter_mut`] method on [`IndexMap`]. See its /// documentation for more. /// /// [`iter_mut`]: struct.IndexMap.html#method.iter_mut /// [`IndexMap`]: struct.IndexMap.html pub struct IterMut<'a, K: 'a, V: 'a> { iter: SliceIterMut<'a, Bucket>, } impl<'a, K, V> Iterator for IterMut<'a, K, V> { type Item = (&'a K, &'a mut V); iterator_methods!(Bucket::ref_mut); } impl<'a, K, V> DoubleEndedIterator for IterMut<'a, K, V> { fn next_back(&mut self) -> Option { self.iter.next_back().map(Bucket::ref_mut) } } impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> { fn len(&self) -> usize { self.iter.len() } } /// An owning iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`into_iter`] method on [`IndexMap`] /// (provided by the `IntoIterator` trait). See its documentation for more. /// /// [`into_iter`]: struct.IndexMap.html#method.into_iter /// [`IndexMap`]: struct.IndexMap.html pub struct IntoIter { pub(crate) iter: VecIntoIter>, } impl Iterator for IntoIter { type Item = (K, V); iterator_methods!(Bucket::key_value); } impl<'a, K, V> DoubleEndedIterator for IntoIter { fn next_back(&mut self) -> Option { self.iter.next_back().map(Bucket::key_value) } } impl ExactSizeIterator for IntoIter { fn len(&self) -> usize { self.iter.len() } } impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } /// A draining iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`drain`] method on [`IndexMap`]. See its /// documentation for more. /// /// [`drain`]: struct.IndexMap.html#method.drain /// [`IndexMap`]: struct.IndexMap.html pub struct Drain<'a, K, V> where K: 'a, V: 'a { pub(crate) iter: ::std::vec::Drain<'a, Bucket> } impl<'a, K, V> Iterator for Drain<'a, K, V> { type Item = (K, V); iterator_methods!(Bucket::key_value); } impl<'a, K, V> DoubleEndedIterator for Drain<'a, K, V> { double_ended_iterator_methods!(Bucket::key_value); } impl<'a, K, V, S> IntoIterator for &'a IndexMap where K: Hash + Eq, S: BuildHasher, { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl<'a, K, V, S> IntoIterator for &'a mut IndexMap where K: Hash + Eq, S: BuildHasher, { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() } } impl IntoIterator for IndexMap where K: Hash + Eq, S: BuildHasher, { type Item = (K, V); type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { iter: self.core.entries.into_iter(), } } } use std::ops::{Index, IndexMut}; impl<'a, K, V, Q: ?Sized, S> Index<&'a Q> for IndexMap where Q: Hash + Equivalent, K: Hash + Eq, S: BuildHasher, { type Output = V; /// ***Panics*** if `key` is not present in the map. fn index(&self, key: &'a Q) -> &V { if let Some(v) = self.get(key) { v } else { panic!("IndexMap: key not found") } } } /// Mutable indexing allows changing / updating values of key-value /// pairs that are already present. /// /// You can **not** insert new pairs with index syntax, use `.insert()`. impl<'a, K, V, Q: ?Sized, S> IndexMut<&'a Q> for IndexMap where Q: Hash + Equivalent, K: Hash + Eq, S: BuildHasher, { /// ***Panics*** if `key` is not present in the map. fn index_mut(&mut self, key: &'a Q) -> &mut V { if let Some(v) = self.get_mut(key) { v } else { panic!("IndexMap: key not found") } } } impl FromIterator<(K, V)> for IndexMap where K: Hash + Eq, S: BuildHasher + Default, { /// Create an `IndexMap` from the sequence of key-value pairs in the /// iterable. /// /// `from_iter` uses the same logic as `extend`. See /// [`extend`](#method.extend) for more details. fn from_iter>(iterable: I) -> Self { let iter = iterable.into_iter(); let (low, _) = iter.size_hint(); let mut map = Self::with_capacity_and_hasher(low, <_>::default()); map.extend(iter); map } } impl Extend<(K, V)> for IndexMap where K: Hash + Eq, S: BuildHasher, { /// Extend the map with all key-value pairs in the iterable. /// /// This is equivalent to calling [`insert`](#method.insert) for each of /// them in order, which means that for keys that already existed /// in the map, their value is updated but it keeps the existing order. /// /// New keys are inserted inserted in the order in the sequence. If /// equivalents of a key occur more than once, the last corresponding value /// prevails. fn extend>(&mut self, iterable: I) { for (k, v) in iterable { self.insert(k, v); } } } impl<'a, K, V, S> Extend<(&'a K, &'a V)> for IndexMap where K: Hash + Eq + Copy, V: Copy, S: BuildHasher, { /// Extend the map with all key-value pairs in the iterable. /// /// See the first extend method for more details. fn extend>(&mut self, iterable: I) { self.extend(iterable.into_iter().map(|(&key, &value)| (key, value))); } } impl Default for IndexMap where S: BuildHasher + Default, { /// Return an empty `IndexMap` fn default() -> Self { Self::with_capacity_and_hasher(0, S::default()) } } impl PartialEq> for IndexMap where K: Hash + Eq, V1: PartialEq, S1: BuildHasher, S2: BuildHasher { fn eq(&self, other: &IndexMap) -> bool { if self.len() != other.len() { return false; } self.iter().all(|(key, value)| other.get(key).map_or(false, |v| *value == *v)) } } impl Eq for IndexMap where K: Eq + Hash, V: Eq, S: BuildHasher { } #[cfg(test)] mod tests { use super::*; use util::enumerate; #[test] fn it_works() { let mut map = IndexMap::new(); assert_eq!(map.is_empty(), true); map.insert(1, ()); map.insert(1, ()); assert_eq!(map.len(), 1); assert!(map.get(&1).is_some()); assert_eq!(map.is_empty(), false); } #[test] fn new() { let map = IndexMap::::new(); println!("{:?}", map); assert_eq!(map.capacity(), 0); assert_eq!(map.len(), 0); assert_eq!(map.is_empty(), true); } #[test] fn insert() { let insert = [0, 4, 2, 12, 8, 7, 11, 5]; let not_present = [1, 3, 6, 9, 10]; let mut map = IndexMap::with_capacity(insert.len()); for (i, &elt) in enumerate(&insert) { assert_eq!(map.len(), i); map.insert(elt, elt); assert_eq!(map.len(), i + 1); assert_eq!(map.get(&elt), Some(&elt)); assert_eq!(map[&elt], elt); } println!("{:?}", map); for &elt in ¬_present { assert!(map.get(&elt).is_none()); } } #[test] fn insert_full() { let insert = vec![9, 2, 7, 1, 4, 6, 13]; let present = vec![1, 6, 2]; let mut map = IndexMap::with_capacity(insert.len()); for (i, &elt) in enumerate(&insert) { assert_eq!(map.len(), i); let (index, existing) = map.insert_full(elt, elt); assert_eq!(existing, None); assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); assert_eq!(map.len(), i + 1); } let len = map.len(); for &elt in &present { let (index, existing) = map.insert_full(elt, elt); assert_eq!(existing, Some(elt)); assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0)); assert_eq!(map.len(), len); } } #[test] fn insert_2() { let mut map = IndexMap::with_capacity(16); let mut keys = vec![]; keys.extend(0..16); keys.extend(128..267); for &i in &keys { let old_map = map.clone(); map.insert(i, ()); for key in old_map.keys() { if map.get(key).is_none() { println!("old_map: {:?}", old_map); println!("map: {:?}", map); panic!("did not find {} in map", key); } } } for &i in &keys { assert!(map.get(&i).is_some(), "did not find {}", i); } } #[test] fn insert_order() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut map = IndexMap::new(); for &elt in &insert { map.insert(elt, ()); } assert_eq!(map.keys().count(), map.len()); assert_eq!(map.keys().count(), insert.len()); for (a, b) in insert.iter().zip(map.keys()) { assert_eq!(a, b); } for (i, k) in (0..insert.len()).zip(map.keys()) { assert_eq!(map.get_index(i).unwrap().0, k); } } #[test] fn grow() { let insert = [0, 4, 2, 12, 8, 7, 11]; let not_present = [1, 3, 6, 9, 10]; let mut map = IndexMap::with_capacity(insert.len()); for (i, &elt) in enumerate(&insert) { assert_eq!(map.len(), i); map.insert(elt, elt); assert_eq!(map.len(), i + 1); assert_eq!(map.get(&elt), Some(&elt)); assert_eq!(map[&elt], elt); } println!("{:?}", map); for &elt in &insert { map.insert(elt * 10, elt); } for &elt in &insert { map.insert(elt * 100, elt); } for (i, &elt) in insert.iter().cycle().enumerate().take(100) { map.insert(elt * 100 + i as i32, elt); } println!("{:?}", map); for &elt in ¬_present { assert!(map.get(&elt).is_none()); } } #[test] fn remove() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut map = IndexMap::new(); for &elt in &insert { map.insert(elt, elt); } assert_eq!(map.keys().count(), map.len()); assert_eq!(map.keys().count(), insert.len()); for (a, b) in insert.iter().zip(map.keys()) { assert_eq!(a, b); } let remove_fail = [99, 77]; let remove = [4, 12, 8, 7]; for &key in &remove_fail { assert!(map.swap_remove_full(&key).is_none()); } println!("{:?}", map); for &key in &remove { //println!("{:?}", map); let index = map.get_full(&key).unwrap().0; assert_eq!(map.swap_remove_full(&key), Some((index, key, key))); } println!("{:?}", map); for key in &insert { assert_eq!(map.get(key).is_some(), !remove.contains(key)); } assert_eq!(map.len(), insert.len() - remove.len()); assert_eq!(map.keys().count(), insert.len() - remove.len()); } #[test] fn remove_to_empty() { let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 }; map.swap_remove(&5).unwrap(); map.swap_remove(&4).unwrap(); map.swap_remove(&0).unwrap(); assert!(map.is_empty()); } #[test] fn swap_remove_index() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut map = IndexMap::new(); for &elt in &insert { map.insert(elt, elt * 2); } let mut vector = insert.to_vec(); let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; // check that the same swap remove sequence on vec and map // have the same result. for &rm in remove_sequence { let out_vec = vector.swap_remove(rm); let (out_map, _) = map.swap_remove_index(rm).unwrap(); assert_eq!(out_vec, out_map); } assert_eq!(vector.len(), map.len()); for (a, b) in vector.iter().zip(map.keys()) { assert_eq!(a, b); } } #[test] fn partial_eq_and_eq() { let mut map_a = IndexMap::new(); map_a.insert(1, "1"); map_a.insert(2, "2"); let mut map_b = map_a.clone(); assert_eq!(map_a, map_b); map_b.swap_remove(&1); assert_ne!(map_a, map_b); let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.to_owned())).collect(); assert_ne!(map_a, map_c); assert_ne!(map_c, map_a); } #[test] fn extend() { let mut map = IndexMap::new(); map.extend(vec![(&1, &2), (&3, &4)]); map.extend(vec![(5, 6)]); assert_eq!(map.into_iter().collect::>(), vec![(1, 2), (3, 4), (5, 6)]); } #[test] fn entry() { let mut map = IndexMap::new(); map.insert(1, "1"); map.insert(2, "2"); { let e = map.entry(3); assert_eq!(e.index(), 2); let e = e.or_insert("3"); assert_eq!(e, &"3"); } let e = map.entry(2); assert_eq!(e.index(), 1); assert_eq!(e.key(), &2); match e { Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"), Entry::Vacant(_) => panic!() } assert_eq!(e.or_insert("4"), &"2"); } #[test] fn entry_and_modify() { let mut map = IndexMap::new(); map.insert(1, "1"); map.entry(1).and_modify(|x| *x = "2"); assert_eq!(Some(&"2"), map.get(&1)); map.entry(2).and_modify(|x| *x = "doesn't exist"); assert_eq!(None, map.get(&2)); } #[test] fn entry_or_default() { let mut map = IndexMap::new(); #[derive(Debug, PartialEq)] enum TestEnum { DefaultValue, NonDefaultValue, } impl Default for TestEnum { fn default() -> Self { TestEnum::DefaultValue } } map.insert(1, TestEnum::NonDefaultValue); assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default()); assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default()); } #[test] fn keys() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_iter().collect(); let keys: Vec<_> = map.keys().cloned().collect(); assert_eq!(keys.len(), 3); assert!(keys.contains(&1)); assert!(keys.contains(&2)); assert!(keys.contains(&3)); } #[test] fn values() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_iter().collect(); let values: Vec<_> = map.values().cloned().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&'a')); assert!(values.contains(&'b')); assert!(values.contains(&'c')); } #[test] fn values_mut() { let vec = vec![(1, 1), (2, 2), (3, 3)]; let mut map: IndexMap<_, _> = vec.into_iter().collect(); for value in map.values_mut() { *value *= 2 } let values: Vec<_> = map.values().cloned().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&2)); assert!(values.contains(&4)); assert!(values.contains(&6)); } } indexmap-1.2.0/src/mutable_keys.rs010064400017510001751000000042201337460514400153700ustar0000000000000000 use std::hash::Hash; use std::hash::BuildHasher; use super::{IndexMap, Equivalent}; pub struct PrivateMarker { } /// Opt-in mutable access to keys. /// /// These methods expose `&mut K`, mutable references to the key as it is stored /// in the map. /// You are allowed to modify the keys in the hashmap **if the modifcation /// does not change the key’s hash and equality**. /// /// If keys are modified erronously, you can no longer look them up. /// This is sound (memory safe) but a logical error hazard (just like /// implementing PartialEq, Eq, or Hash incorrectly would be). /// /// `use` this trait to enable its methods for `IndexMap`. pub trait MutableKeys { type Key; type Value; /// Return item index, mutable reference to key and value fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut Self::Key, &mut Self::Value)> where Q: Hash + Equivalent; /// Scan through each key-value pair in the map and keep those where the /// closure `keep` returns `true`. /// /// The elements are visited in order, and remaining elements keep their /// order. /// /// Computes in **O(n)** time (average). fn retain2(&mut self, keep: F) where F: FnMut(&mut Self::Key, &mut Self::Value) -> bool; /// This method is not useful in itself – it is there to “seal” the trait /// for external implementation, so that we can add methods without /// causing breaking changes. fn __private_marker(&self) -> PrivateMarker; } /// Opt-in mutable access to keys. /// /// See [`MutableKeys`](trait.MutableKeys.html) for more information. impl MutableKeys for IndexMap where K: Eq + Hash, S: BuildHasher, { type Key = K; type Value = V; fn get_full_mut2(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)> where Q: Hash + Equivalent, { self.get_full_mut2_impl(key) } fn retain2(&mut self, keep: F) where F: FnMut(&mut K, &mut V) -> bool, { self.retain_mut(keep) } fn __private_marker(&self) -> PrivateMarker { PrivateMarker { } } } indexmap-1.2.0/src/rayon/map.rs010064400017510001751000000336211353367262700146270ustar0000000000000000//! Parallel iterator types for `IndexMap` with [rayon](https://docs.rs/rayon/1.0/rayon). //! //! You will rarely need to interact with this module directly unless you need to name one of the //! iterator types. //! //! Requires crate feature `"rayon"` use super::collect; use super::rayon::prelude::*; use super::rayon::iter::plumbing::{Consumer, UnindexedConsumer, ProducerCallback}; use std::cmp::Ordering; use std::fmt; use std::hash::Hash; use std::hash::BuildHasher; use Bucket; use Entries; use IndexMap; /// Requires crate feature `"rayon"`. impl IntoParallelIterator for IndexMap where K: Hash + Eq + Send, V: Send, S: BuildHasher, { type Item = (K, V); type Iter = IntoParIter; fn into_par_iter(self) -> Self::Iter { IntoParIter { entries: self.into_entries(), } } } /// A parallel owning iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`into_par_iter`] method on [`IndexMap`] /// (provided by rayon's `IntoParallelIterator` trait). See its documentation for more. /// /// [`into_par_iter`]: ../struct.IndexMap.html#method.into_par_iter /// [`IndexMap`]: ../struct.IndexMap.html pub struct IntoParIter { entries: Vec>, } impl fmt::Debug for IntoParIter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let iter = self.entries.iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } impl ParallelIterator for IntoParIter { type Item = (K, V); parallel_iterator_methods!(Bucket::key_value); } impl IndexedParallelIterator for IntoParIter { indexed_parallel_iterator_methods!(Bucket::key_value); } /// Requires crate feature `"rayon"`. impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap where K: Hash + Eq + Sync, V: Sync, S: BuildHasher, { type Item = (&'a K, &'a V); type Iter = ParIter<'a, K, V>; fn into_par_iter(self) -> Self::Iter { ParIter { entries: self.as_entries(), } } } /// A parallel iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`par_iter`] method on [`IndexMap`] /// (provided by rayon's `IntoParallelRefIterator` trait). See its documentation for more. /// /// [`par_iter`]: ../struct.IndexMap.html#method.par_iter /// [`IndexMap`]: ../struct.IndexMap.html pub struct ParIter<'a, K: 'a, V: 'a> { entries: &'a [Bucket], } impl<'a, K, V> Clone for ParIter<'a, K, V> { fn clone(&self) -> ParIter<'a, K, V> { ParIter { ..*self } } } impl<'a, K: fmt::Debug, V: fmt::Debug> fmt::Debug for ParIter<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let iter = self.entries.iter().map(Bucket::refs); f.debug_list().entries(iter).finish() } } impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> { type Item = (&'a K, &'a V); parallel_iterator_methods!(Bucket::refs); } impl<'a, K: Sync, V: Sync> IndexedParallelIterator for ParIter<'a, K, V> { indexed_parallel_iterator_methods!(Bucket::refs); } /// Requires crate feature `"rayon"`. impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap where K: Hash + Eq + Sync + Send, V: Send, S: BuildHasher, { type Item = (&'a K, &'a mut V); type Iter = ParIterMut<'a, K, V>; fn into_par_iter(self) -> Self::Iter { ParIterMut { entries: self.as_entries_mut(), } } } /// A parallel mutable iterator over the entries of a `IndexMap`. /// /// This `struct` is created by the [`par_iter_mut`] method on [`IndexMap`] /// (provided by rayon's `IntoParallelRefMutIterator` trait). See its documentation for more. /// /// [`par_iter_mut`]: ../struct.IndexMap.html#method.par_iter_mut /// [`IndexMap`]: ../struct.IndexMap.html pub struct ParIterMut<'a, K: 'a, V: 'a> { entries: &'a mut [Bucket], } impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> { type Item = (&'a K, &'a mut V); parallel_iterator_methods!(Bucket::ref_mut); } impl<'a, K: Sync + Send, V: Send> IndexedParallelIterator for ParIterMut<'a, K, V> { indexed_parallel_iterator_methods!(Bucket::ref_mut); } /// Requires crate feature `"rayon"`. impl IndexMap where K: Hash + Eq + Sync, V: Sync, S: BuildHasher, { /// Return a parallel iterator over the keys of the map. /// /// While parallel iterators can process items in any order, their relative order /// in the map is still preserved for operations like `reduce` and `collect`. pub fn par_keys(&self) -> ParKeys { ParKeys { entries: self.as_entries(), } } /// Return a parallel iterator over the values of the map. /// /// While parallel iterators can process items in any order, their relative order /// in the map is still preserved for operations like `reduce` and `collect`. pub fn par_values(&self) -> ParValues { ParValues { entries: self.as_entries(), } } /// Returns `true` if `self` contains all of the same key-value pairs as `other`, /// regardless of each map's indexed order, determined in parallel. pub fn par_eq(&self, other: &IndexMap) -> bool where V: PartialEq, V2: Sync, S2: BuildHasher + Sync { self.len() == other.len() && self.par_iter().all(move |(key, value)| { other.get(key).map_or(false, |v| *value == *v) }) } } /// A parallel iterator over the keys of a `IndexMap`. /// /// This `struct` is created by the [`par_keys`] method on [`IndexMap`]. See its /// documentation for more. /// /// [`par_keys`]: ../struct.IndexMap.html#method.par_keys /// [`IndexMap`]: ../struct.IndexMap.html pub struct ParKeys<'a, K: 'a, V: 'a> { entries: &'a [Bucket], } impl<'a, K, V> Clone for ParKeys<'a, K, V> { fn clone(&self) -> ParKeys<'a, K, V> { ParKeys { ..*self } } } impl<'a, K: fmt::Debug, V> fmt::Debug for ParKeys<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let iter = self.entries.iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> { type Item = &'a K; parallel_iterator_methods!(Bucket::key_ref); } impl<'a, K: Sync, V: Sync> IndexedParallelIterator for ParKeys<'a, K, V> { indexed_parallel_iterator_methods!(Bucket::key_ref); } /// A parallel iterator over the values of a `IndexMap`. /// /// This `struct` is created by the [`par_values`] method on [`IndexMap`]. See its /// documentation for more. /// /// [`par_values`]: ../struct.IndexMap.html#method.par_values /// [`IndexMap`]: ../struct.IndexMap.html pub struct ParValues<'a, K: 'a, V: 'a> { entries: &'a [Bucket], } impl<'a, K, V> Clone for ParValues<'a, K, V> { fn clone(&self) -> ParValues<'a, K, V> { ParValues { ..*self } } } impl<'a, K, V: fmt::Debug> fmt::Debug for ParValues<'a, K, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let iter = self.entries.iter().map(Bucket::value_ref); f.debug_list().entries(iter).finish() } } impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> { type Item = &'a V; parallel_iterator_methods!(Bucket::value_ref); } impl<'a, K: Sync, V: Sync> IndexedParallelIterator for ParValues<'a, K, V> { indexed_parallel_iterator_methods!(Bucket::value_ref); } /// Requires crate feature `"rayon"`. impl IndexMap where K: Hash + Eq + Send, V: Send, S: BuildHasher, { /// Return a parallel iterator over mutable references to the the values of the map /// /// While parallel iterators can process items in any order, their relative order /// in the map is still preserved for operations like `reduce` and `collect`. pub fn par_values_mut(&mut self) -> ParValuesMut { ParValuesMut { entries: self.as_entries_mut(), } } /// Sort the map’s key-value pairs in parallel, by the default ordering of the keys. pub fn par_sort_keys(&mut self) where K: Ord, { self.with_entries(|entries| { entries.par_sort_by(|a, b| K::cmp(&a.key, &b.key)); }); } /// Sort the map’s key-value pairs in place and in parallel, using the comparison /// function `compare`. /// /// The comparison function receives two key and value pairs to compare (you /// can sort by keys or values or their combination as needed). pub fn par_sort_by(&mut self, cmp: F) where F: Fn(&K, &V, &K, &V) -> Ordering + Sync, { self.with_entries(|entries| { entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); }); } /// Sort the key-value pairs of the map in parallel and return a by value parallel /// iterator of the key-value pairs with the result. pub fn par_sorted_by(self, cmp: F) -> IntoParIter where F: Fn(&K, &V, &K, &V) -> Ordering + Sync { let mut entries = self.into_entries(); entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value)); IntoParIter { entries } } } /// A parallel mutable iterator over the values of a `IndexMap`. /// /// This `struct` is created by the [`par_values_mut`] method on [`IndexMap`]. See its /// documentation for more. /// /// [`par_values_mut`]: ../struct.IndexMap.html#method.par_values_mut /// [`IndexMap`]: ../struct.IndexMap.html pub struct ParValuesMut<'a, K: 'a, V: 'a> { entries: &'a mut [Bucket], } impl<'a, K: Send, V: Send> ParallelIterator for ParValuesMut<'a, K, V> { type Item = &'a mut V; parallel_iterator_methods!(Bucket::value_mut); } impl<'a, K: Send, V: Send> IndexedParallelIterator for ParValuesMut<'a, K, V> { indexed_parallel_iterator_methods!(Bucket::value_mut); } /// Requires crate feature `"rayon"`. impl FromParallelIterator<(K, V)> for IndexMap where K: Eq + Hash + Send, V: Send, S: BuildHasher + Default + Send, { fn from_par_iter(iter: I) -> Self where I: IntoParallelIterator { let list = collect(iter); let len = list.iter().map(Vec::len).sum(); let mut map = Self::with_capacity_and_hasher(len, S::default()); for vec in list { map.extend(vec); } map } } /// Requires crate feature `"rayon"`. impl ParallelExtend<(K, V)> for IndexMap where K: Eq + Hash + Send, V: Send, S: BuildHasher + Send, { fn par_extend(&mut self, iter: I) where I: IntoParallelIterator { for vec in collect(iter) { self.extend(vec); } } } /// Requires crate feature `"rayon"`. impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for IndexMap where K: Copy + Eq + Hash + Send + Sync, V: Copy + Send + Sync, S: BuildHasher + Send, { fn par_extend(&mut self, iter: I) where I: IntoParallelIterator { for vec in collect(iter) { self.extend(vec); } } } #[cfg(test)] mod tests { use super::*; #[test] fn insert_order() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut map = IndexMap::new(); for &elt in &insert { map.insert(elt, ()); } assert_eq!(map.par_keys().count(), map.len()); assert_eq!(map.par_keys().count(), insert.len()); insert.par_iter().zip(map.par_keys()).for_each(|(a, b)| { assert_eq!(a, b); }); (0..insert.len()).into_par_iter().zip(map.par_keys()).for_each(|(i, k)| { assert_eq!(map.get_index(i).unwrap().0, k); }); } #[test] fn partial_eq_and_eq() { let mut map_a = IndexMap::new(); map_a.insert(1, "1"); map_a.insert(2, "2"); let mut map_b = map_a.clone(); assert!(map_a.par_eq(&map_b)); map_b.swap_remove(&1); assert!(!map_a.par_eq(&map_b)); map_b.insert(3, "3"); assert!(!map_a.par_eq(&map_b)); let map_c: IndexMap<_, String> = map_b.into_par_iter().map(|(k, v)| (k, v.to_owned())).collect(); assert!(!map_a.par_eq(&map_c)); assert!(!map_c.par_eq(&map_a)); } #[test] fn extend() { let mut map = IndexMap::new(); map.par_extend(vec![(&1, &2), (&3, &4)]); map.par_extend(vec![(5, 6)]); assert_eq!(map.into_par_iter().collect::>(), vec![(1, 2), (3, 4), (5, 6)]); } #[test] fn keys() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_par_iter().collect(); let keys: Vec<_> = map.par_keys().cloned().collect(); assert_eq!(keys.len(), 3); assert!(keys.contains(&1)); assert!(keys.contains(&2)); assert!(keys.contains(&3)); } #[test] fn values() { let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')]; let map: IndexMap<_, _> = vec.into_par_iter().collect(); let values: Vec<_> = map.par_values().cloned().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&'a')); assert!(values.contains(&'b')); assert!(values.contains(&'c')); } #[test] fn values_mut() { let vec = vec![(1, 1), (2, 2), (3, 3)]; let mut map: IndexMap<_, _> = vec.into_par_iter().collect(); map.par_values_mut().for_each(|value| { *value *= 2 }); let values: Vec<_> = map.par_values().cloned().collect(); assert_eq!(values.len(), 3); assert!(values.contains(&2)); assert!(values.contains(&4)); assert!(values.contains(&6)); } } indexmap-1.2.0/src/rayon/mod.rs010064400017510001751000000044741352703340100146150ustar0000000000000000 extern crate rayon; use self::rayon::prelude::*; use std::collections::LinkedList; // generate `ParallelIterator` methods by just forwarding to the underlying // self.entries and mapping its elements. macro_rules! parallel_iterator_methods { // $map_elt is the mapping function from the underlying iterator's element ($map_elt:expr) => { fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer { self.entries.into_par_iter() .map($map_elt) .drive_unindexed(consumer) } // NB: This allows indexed collection, e.g. directly into a `Vec`, but the // underlying iterator must really be indexed. We should remove this if we // start having tombstones that must be filtered out. fn opt_len(&self) -> Option { Some(self.entries.len()) } } } // generate `IndexedParallelIterator` methods by just forwarding to the underlying // self.entries and mapping its elements. macro_rules! indexed_parallel_iterator_methods { // $map_elt is the mapping function from the underlying iterator's element ($map_elt:expr) => { fn drive(self, consumer: C) -> C::Result where C: Consumer { self.entries.into_par_iter() .map($map_elt) .drive(consumer) } fn len(&self) -> usize { self.entries.len() } fn with_producer(self, callback: CB) -> CB::Output where CB: ProducerCallback { self.entries.into_par_iter() .map($map_elt) .with_producer(callback) } } } pub mod map; pub mod set; // This form of intermediate collection is also how Rayon collects `HashMap`. // Note that the order will also be preserved! fn collect(iter: I) -> LinkedList> { iter.into_par_iter() .fold(Vec::new, |mut vec, elem| { vec.push(elem); vec }) .map(|vec| { let mut list = LinkedList::new(); list.push_back(vec); list }) .reduce(LinkedList::new, |mut list1, mut list2| { list1.append(&mut list2); list1 }) } indexmap-1.2.0/src/rayon/set.rs010064400017510001751000000457231353367262700146530ustar0000000000000000//! Parallel iterator types for `IndexSet` with [rayon](https://docs.rs/rayon/1.0/rayon). //! //! You will rarely need to interact with this module directly unless you need to name one of the //! iterator types. //! //! Requires crate feature `"rayon"`. use super::collect; use super::rayon::prelude::*; use super::rayon::iter::plumbing::{Consumer, UnindexedConsumer, ProducerCallback}; use std::cmp::Ordering; use std::fmt; use std::hash::Hash; use std::hash::BuildHasher; use Entries; use IndexSet; type Bucket = ::Bucket; /// Requires crate feature `"rayon"`. impl IntoParallelIterator for IndexSet where T: Hash + Eq + Send, S: BuildHasher, { type Item = T; type Iter = IntoParIter; fn into_par_iter(self) -> Self::Iter { IntoParIter { entries: self.into_entries(), } } } /// A parallel owning iterator over the items of a `IndexSet`. /// /// This `struct` is created by the [`into_par_iter`] method on [`IndexSet`] /// (provided by rayon's `IntoParallelIterator` trait). See its documentation for more. /// /// [`IndexSet`]: ../struct.IndexSet.html /// [`into_par_iter`]: ../struct.IndexSet.html#method.into_par_iter pub struct IntoParIter { entries: Vec>, } impl fmt::Debug for IntoParIter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let iter = self.entries.iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } impl ParallelIterator for IntoParIter { type Item = T; parallel_iterator_methods!(Bucket::key); } impl IndexedParallelIterator for IntoParIter { indexed_parallel_iterator_methods!(Bucket::key); } /// Requires crate feature `"rayon"`. impl<'a, T, S> IntoParallelIterator for &'a IndexSet where T: Hash + Eq + Sync, S: BuildHasher, { type Item = &'a T; type Iter = ParIter<'a, T>; fn into_par_iter(self) -> Self::Iter { ParIter { entries: self.as_entries(), } } } /// A parallel iterator over the items of a `IndexSet`. /// /// This `struct` is created by the [`par_iter`] method on [`IndexSet`] /// (provided by rayon's `IntoParallelRefIterator` trait). See its documentation for more. /// /// [`IndexSet`]: ../struct.IndexSet.html /// [`par_iter`]: ../struct.IndexSet.html#method.par_iter pub struct ParIter<'a, T: 'a> { entries: &'a [Bucket], } impl<'a, T> Clone for ParIter<'a, T> { fn clone(&self) -> Self { ParIter { ..*self } } } impl<'a, T: fmt::Debug> fmt::Debug for ParIter<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let iter = self.entries.iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { type Item = &'a T; parallel_iterator_methods!(Bucket::key_ref); } impl<'a, T: Sync> IndexedParallelIterator for ParIter<'a, T> { indexed_parallel_iterator_methods!(Bucket::key_ref); } /// Requires crate feature `"rayon"`. impl IndexSet where T: Hash + Eq + Sync, S: BuildHasher + Sync, { /// Return a parallel iterator over the values that are in `self` but not `other`. /// /// While parallel iterators can process items in any order, their relative order /// in the `self` set is still preserved for operations like `reduce` and `collect`. pub fn par_difference<'a, S2>(&'a self, other: &'a IndexSet) -> ParDifference<'a, T, S, S2> where S2: BuildHasher + Sync { ParDifference { set1: self, set2: other, } } /// Return a parallel iterator over the values that are in `self` or `other`, /// but not in both. /// /// While parallel iterators can process items in any order, their relative order /// in the sets is still preserved for operations like `reduce` and `collect`. /// Values from `self` are produced in their original order, followed by /// values from `other` in their original order. pub fn par_symmetric_difference<'a, S2>(&'a self, other: &'a IndexSet) -> ParSymmetricDifference<'a, T, S, S2> where S2: BuildHasher + Sync { ParSymmetricDifference { set1: self, set2: other, } } /// Return a parallel iterator over the values that are in both `self` and `other`. /// /// While parallel iterators can process items in any order, their relative order /// in the `self` set is still preserved for operations like `reduce` and `collect`. pub fn par_intersection<'a, S2>(&'a self, other: &'a IndexSet) -> ParIntersection<'a, T, S, S2> where S2: BuildHasher + Sync { ParIntersection { set1: self, set2: other, } } /// Return a parallel iterator over all values that are in `self` or `other`. /// /// While parallel iterators can process items in any order, their relative order /// in the sets is still preserved for operations like `reduce` and `collect`. /// Values from `self` are produced in their original order, followed by /// values that are unique to `other` in their original order. pub fn par_union<'a, S2>(&'a self, other: &'a IndexSet) -> ParUnion<'a, T, S, S2> where S2: BuildHasher + Sync { ParUnion { set1: self, set2: other, } } /// Returns `true` if `self` contains all of the same values as `other`, /// regardless of each set's indexed order, determined in parallel. pub fn par_eq(&self, other: &IndexSet) -> bool where S2: BuildHasher + Sync { self.len() == other.len() && self.par_is_subset(other) } /// Returns `true` if `self` has no elements in common with `other`, /// determined in parallel. pub fn par_is_disjoint(&self, other: &IndexSet) -> bool where S2: BuildHasher + Sync { if self.len() <= other.len() { self.par_iter().all(move |value| !other.contains(value)) } else { other.par_iter().all(move |value| !self.contains(value)) } } /// Returns `true` if all elements of `other` are contained in `self`, /// determined in parallel. pub fn par_is_superset(&self, other: &IndexSet) -> bool where S2: BuildHasher + Sync { other.par_is_subset(self) } /// Returns `true` if all elements of `self` are contained in `other`, /// determined in parallel. pub fn par_is_subset(&self, other: &IndexSet) -> bool where S2: BuildHasher + Sync { self.len() <= other.len() && self.par_iter().all(move |value| other.contains(value)) } } /// A parallel iterator producing elements in the difference of `IndexSet`s. /// /// This `struct` is created by the [`par_difference`] method on [`IndexSet`]. /// See its documentation for more. /// /// [`IndexSet`]: ../struct.IndexSet.html /// [`par_difference`]: ../struct.IndexSet.html#method.par_difference pub struct ParDifference<'a, T: 'a, S1: 'a, S2: 'a> { set1: &'a IndexSet, set2: &'a IndexSet, } impl<'a, T, S1, S2> Clone for ParDifference<'a, T, S1, S2> { fn clone(&self) -> Self { ParDifference { ..*self } } } impl<'a, T, S1, S2> fmt::Debug for ParDifference<'a, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.set1.difference(&self.set2)).finish() } } impl<'a, T, S1, S2> ParallelIterator for ParDifference<'a, T, S1, S2> where T: Hash + Eq + Sync, S1: BuildHasher + Sync, S2: BuildHasher + Sync, { type Item = &'a T; fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer { let Self { set1, set2 } = self; set1.par_iter() .filter(move |&item| !set2.contains(item)) .drive_unindexed(consumer) } } /// A parallel iterator producing elements in the intersection of `IndexSet`s. /// /// This `struct` is created by the [`par_intersection`] method on [`IndexSet`]. /// See its documentation for more. /// /// [`IndexSet`]: ../struct.IndexSet.html /// [`par_intersection`]: ../struct.IndexSet.html#method.par_intersection pub struct ParIntersection<'a, T: 'a, S1: 'a, S2: 'a> { set1: &'a IndexSet, set2: &'a IndexSet, } impl<'a, T, S1, S2> Clone for ParIntersection<'a, T, S1, S2> { fn clone(&self) -> Self { ParIntersection { ..*self } } } impl<'a, T, S1, S2> fmt::Debug for ParIntersection<'a, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.set1.intersection(&self.set2)).finish() } } impl<'a, T, S1, S2> ParallelIterator for ParIntersection<'a, T, S1, S2> where T: Hash + Eq + Sync, S1: BuildHasher + Sync, S2: BuildHasher + Sync, { type Item = &'a T; fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer { let Self { set1, set2 } = self; set1.par_iter() .filter(move |&item| set2.contains(item)) .drive_unindexed(consumer) } } /// A parallel iterator producing elements in the symmetric difference of `IndexSet`s. /// /// This `struct` is created by the [`par_symmetric_difference`] method on /// [`IndexSet`]. See its documentation for more. /// /// [`IndexSet`]: ../struct.IndexSet.html /// [`par_symmetric_difference`]: ../struct.IndexSet.html#method.par_symmetric_difference pub struct ParSymmetricDifference<'a, T: 'a, S1: 'a, S2: 'a> { set1: &'a IndexSet, set2: &'a IndexSet, } impl<'a, T, S1, S2> Clone for ParSymmetricDifference<'a, T, S1, S2> { fn clone(&self) -> Self { ParSymmetricDifference { ..*self } } } impl<'a, T, S1, S2> fmt::Debug for ParSymmetricDifference<'a, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.set1.symmetric_difference(&self.set2)).finish() } } impl<'a, T, S1, S2> ParallelIterator for ParSymmetricDifference<'a, T, S1, S2> where T: Hash + Eq + Sync, S1: BuildHasher + Sync, S2: BuildHasher + Sync, { type Item = &'a T; fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer { let Self { set1, set2 } = self; set1.par_difference(set2) .chain(set2.par_difference(set1)) .drive_unindexed(consumer) } } /// A parallel iterator producing elements in the union of `IndexSet`s. /// /// This `struct` is created by the [`par_union`] method on [`IndexSet`]. /// See its documentation for more. /// /// [`IndexSet`]: ../struct.IndexSet.html /// [`par_union`]: ../struct.IndexSet.html#method.par_union pub struct ParUnion<'a, T: 'a, S1: 'a, S2: 'a> { set1: &'a IndexSet, set2: &'a IndexSet, } impl<'a, T, S1, S2> Clone for ParUnion<'a, T, S1, S2> { fn clone(&self) -> Self { ParUnion { ..*self } } } impl<'a, T, S1, S2> fmt::Debug for ParUnion<'a, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.set1.union(&self.set2)).finish() } } impl<'a, T, S1, S2> ParallelIterator for ParUnion<'a, T, S1, S2> where T: Hash + Eq + Sync, S1: BuildHasher + Sync, S2: BuildHasher + Sync, { type Item = &'a T; fn drive_unindexed(self, consumer: C) -> C::Result where C: UnindexedConsumer { let Self { set1, set2 } = self; set1.par_iter() .chain(set2.par_difference(set1)) .drive_unindexed(consumer) } } /// Requires crate feature `"rayon"`. impl IndexSet where T: Hash + Eq + Send, S: BuildHasher + Send, { /// Sort the set’s values in parallel by their default ordering. pub fn par_sort(&mut self) where T: Ord, { self.with_entries(|entries| { entries.par_sort_by(|a, b| T::cmp(&a.key, &b.key)); }); } /// Sort the set’s values in place and in parallel, using the comparison function `compare`. pub fn par_sort_by(&mut self, cmp: F) where F: Fn(&T, &T) -> Ordering + Sync, { self.with_entries(|entries| { entries.par_sort_by(move |a, b| cmp(&a.key, &b.key)); }); } /// Sort the values of the set in parallel and return a by value parallel iterator of /// the values with the result. pub fn par_sorted_by(self, cmp: F) -> IntoParIter where F: Fn(&T, &T) -> Ordering + Sync { let mut entries = self.into_entries(); entries.par_sort_by(move |a, b| cmp(&a.key, &b.key)); IntoParIter { entries } } } /// Requires crate feature `"rayon"`. impl FromParallelIterator for IndexSet where T: Eq + Hash + Send, S: BuildHasher + Default + Send, { fn from_par_iter(iter: I) -> Self where I: IntoParallelIterator { let list = collect(iter); let len = list.iter().map(Vec::len).sum(); let mut set = Self::with_capacity_and_hasher(len, S::default()); for vec in list { set.extend(vec); } set } } /// Requires crate feature `"rayon"`. impl ParallelExtend<(T)> for IndexSet where T: Eq + Hash + Send, S: BuildHasher + Send, { fn par_extend(&mut self, iter: I) where I: IntoParallelIterator { for vec in collect(iter) { self.extend(vec); } } } /// Requires crate feature `"rayon"`. impl<'a, T: 'a, S> ParallelExtend<&'a T> for IndexSet where T: Copy + Eq + Hash + Send + Sync, S: BuildHasher + Send, { fn par_extend(&mut self, iter: I) where I: IntoParallelIterator { for vec in collect(iter) { self.extend(vec); } } } #[cfg(test)] mod tests { use super::*; #[test] fn insert_order() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &insert { set.insert(elt); } assert_eq!(set.par_iter().count(), set.len()); assert_eq!(set.par_iter().count(), insert.len()); insert.par_iter().zip(&set).for_each(|(a, b)| { assert_eq!(a, b); }); (0..insert.len()).into_par_iter().zip(&set).for_each(|(i, v)| { assert_eq!(set.get_index(i).unwrap(), v); }); } #[test] fn partial_eq_and_eq() { let mut set_a = IndexSet::new(); set_a.insert(1); set_a.insert(2); let mut set_b = set_a.clone(); assert!(set_a.par_eq(&set_b)); set_b.swap_remove(&1); assert!(!set_a.par_eq(&set_b)); set_b.insert(3); assert!(!set_a.par_eq(&set_b)); let set_c: IndexSet<_> = set_b.into_par_iter().collect(); assert!(!set_a.par_eq(&set_c)); assert!(!set_c.par_eq(&set_a)); } #[test] fn extend() { let mut set = IndexSet::new(); set.par_extend(vec![&1, &2, &3, &4]); set.par_extend(vec![5, 6]); assert_eq!(set.into_par_iter().collect::>(), vec![1, 2, 3, 4, 5, 6]); } #[test] fn comparisons() { let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).collect(); assert!(!set_a.par_is_disjoint(&set_a)); assert!(set_a.par_is_subset(&set_a)); assert!(set_a.par_is_superset(&set_a)); assert!(set_a.par_is_disjoint(&set_b)); assert!(set_b.par_is_disjoint(&set_a)); assert!(!set_a.par_is_subset(&set_b)); assert!(!set_b.par_is_subset(&set_a)); assert!(!set_a.par_is_superset(&set_b)); assert!(!set_b.par_is_superset(&set_a)); assert!(!set_a.par_is_disjoint(&set_c)); assert!(!set_c.par_is_disjoint(&set_a)); assert!(set_a.par_is_subset(&set_c)); assert!(!set_c.par_is_subset(&set_a)); assert!(!set_a.par_is_superset(&set_c)); assert!(set_c.par_is_superset(&set_a)); assert!(!set_c.par_is_disjoint(&set_d)); assert!(!set_d.par_is_disjoint(&set_c)); assert!(!set_c.par_is_subset(&set_d)); assert!(!set_d.par_is_subset(&set_c)); assert!(!set_c.par_is_superset(&set_d)); assert!(!set_d.par_is_superset(&set_c)); } #[test] fn iter_comparisons() { use std::iter::empty; fn check<'a, I1, I2>(iter1: I1, iter2: I2) where I1: ParallelIterator, I2: Iterator, { let v1: Vec<_> = iter1.cloned().collect(); let v2: Vec<_> = iter2.collect(); assert_eq!(v1, v2); } let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).rev().collect(); check(set_a.par_difference(&set_a), empty()); check(set_a.par_symmetric_difference(&set_a), empty()); check(set_a.par_intersection(&set_a), 0..3); check(set_a.par_union(&set_a), 0..3); check(set_a.par_difference(&set_b), 0..3); check(set_b.par_difference(&set_a), 3..6); check(set_a.par_symmetric_difference(&set_b), 0..6); check(set_b.par_symmetric_difference(&set_a), (3..6).chain(0..3)); check(set_a.par_intersection(&set_b), empty()); check(set_b.par_intersection(&set_a), empty()); check(set_a.par_union(&set_b), 0..6); check(set_b.par_union(&set_a), (3..6).chain(0..3)); check(set_a.par_difference(&set_c), empty()); check(set_c.par_difference(&set_a), 3..6); check(set_a.par_symmetric_difference(&set_c), 3..6); check(set_c.par_symmetric_difference(&set_a), 3..6); check(set_a.par_intersection(&set_c), 0..3); check(set_c.par_intersection(&set_a), 0..3); check(set_a.par_union(&set_c), 0..6); check(set_c.par_union(&set_a), 0..6); check(set_c.par_difference(&set_d), 0..3); check(set_d.par_difference(&set_c), (6..9).rev()); check(set_c.par_symmetric_difference(&set_d), (0..3).chain((6..9).rev())); check(set_d.par_symmetric_difference(&set_c), (6..9).rev().chain(0..3)); check(set_c.par_intersection(&set_d), 3..6); check(set_d.par_intersection(&set_c), (3..6).rev()); check(set_c.par_union(&set_d), (0..6).chain((6..9).rev())); check(set_d.par_union(&set_c), (3..9).rev().chain(0..3)); } } indexmap-1.2.0/src/serde.rs010064400017510001751000000102301353367262700140130ustar0000000000000000 extern crate serde; use self::serde::ser::{Serialize, Serializer, SerializeMap, SerializeSeq}; use self::serde::de::{Deserialize, Deserializer, Error, IntoDeserializer, MapAccess, SeqAccess, Visitor}; use self::serde::de::value::{MapDeserializer, SeqDeserializer}; use std::fmt::{self, Formatter}; use std::hash::{BuildHasher, Hash}; use std::marker::PhantomData; use IndexMap; /// Requires crate feature `"serde-1"` impl Serialize for IndexMap where K: Serialize + Hash + Eq, V: Serialize, S: BuildHasher { fn serialize(&self, serializer: T) -> Result where T: Serializer { let mut map_serializer = serializer.serialize_map(Some(self.len()))?; for (key, value) in self { map_serializer.serialize_entry(key, value)?; } map_serializer.end() } } struct OrderMapVisitor(PhantomData<(K, V, S)>); impl<'de, K, V, S> Visitor<'de> for OrderMapVisitor where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: Default + BuildHasher { type Value = IndexMap; fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { write!(formatter, "a map") } fn visit_map(self, mut map: A) -> Result where A: MapAccess<'de> { let mut values = IndexMap::with_capacity_and_hasher(map.size_hint().unwrap_or(0), S::default()); while let Some((key, value)) = map.next_entry()? { values.insert(key, value); } Ok(values) } } /// Requires crate feature `"serde-1"` impl<'de, K, V, S> Deserialize<'de> for IndexMap where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: Default + BuildHasher { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer.deserialize_map(OrderMapVisitor(PhantomData)) } } impl<'de, K, V, S, E> IntoDeserializer<'de, E> for IndexMap where K: IntoDeserializer<'de, E> + Eq + Hash, V: IntoDeserializer<'de, E>, S: BuildHasher, E: Error, { type Deserializer = MapDeserializer<'de, ::IntoIter, E>; fn into_deserializer(self) -> Self::Deserializer { MapDeserializer::new(self.into_iter()) } } use IndexSet; /// Requires crate feature `"serde-1"` impl Serialize for IndexSet where T: Serialize + Hash + Eq, S: BuildHasher { fn serialize(&self, serializer: Se) -> Result where Se: Serializer { let mut set_serializer = serializer.serialize_seq(Some(self.len()))?; for value in self { set_serializer.serialize_element(value)?; } set_serializer.end() } } struct OrderSetVisitor(PhantomData<(T, S)>); impl<'de, T, S> Visitor<'de> for OrderSetVisitor where T: Deserialize<'de> + Eq + Hash, S: Default + BuildHasher { type Value = IndexSet; fn expecting(&self, formatter: &mut Formatter) -> fmt::Result { write!(formatter, "a set") } fn visit_seq(self, mut seq: A) -> Result where A: SeqAccess<'de> { let mut values = IndexSet::with_capacity_and_hasher(seq.size_hint().unwrap_or(0), S::default()); while let Some(value) = seq.next_element()? { values.insert(value); } Ok(values) } } /// Requires crate feature `"serde-1"` impl<'de, T, S> Deserialize<'de> for IndexSet where T: Deserialize<'de> + Eq + Hash, S: Default + BuildHasher { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { deserializer.deserialize_seq(OrderSetVisitor(PhantomData)) } } impl<'de, T, S, E> IntoDeserializer<'de, E> for IndexSet where T: IntoDeserializer<'de, E> + Eq + Hash, S: BuildHasher, E: Error, { type Deserializer = SeqDeserializer<::IntoIter, E>; fn into_deserializer(self) -> Self::Deserializer { SeqDeserializer::new(self.into_iter()) } } indexmap-1.2.0/src/set.rs010064400017510001751000001214661353367262700135220ustar0000000000000000//! A hash set implemented using `IndexMap` #[cfg(feature = "rayon")] pub use ::rayon::set as rayon; use std::cmp::Ordering; use std::collections::hash_map::RandomState; use std::fmt; use std::iter::{FromIterator, Chain}; use std::hash::{Hash, BuildHasher}; use std::ops::RangeFull; use std::ops::{BitAnd, BitOr, BitXor, Sub}; use std::slice; use std::vec; use super::{IndexMap, Equivalent, Entries}; type Bucket = super::Bucket; /// A hash set where the iteration order of the values is independent of their /// hash values. /// /// The interface is closely compatible with the standard `HashSet`, but also /// has additional features. /// /// # Order /// /// The values have a consistent order that is determined by the sequence of /// insertion and removal calls on the set. The order does not depend on the /// values or the hash function at all. Note that insertion order and value /// are not affected if a re-insertion is attempted once an element is /// already present. /// /// All iterators traverse the set *in order*. Set operation iterators like /// `union` produce a concatenated order, as do their matching "bitwise" /// operators. See their documentation for specifics. /// /// The insertion order is preserved, with **notable exceptions** like the /// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of /// course result in a new order, depending on the sorting order. /// /// # Indices /// /// The values are indexed in a compact range without holes in the range /// `0..self.len()`. For example, the method `.get_full` looks up the index for /// a value, and the method `.get_index` looks up the value by index. /// /// # Examples /// /// ``` /// use indexmap::IndexSet; /// /// // Collects which letters appear in a sentence. /// let letters: IndexSet<_> = "a short treatise on fungi".chars().collect(); /// /// assert!(letters.contains(&'s')); /// assert!(letters.contains(&'t')); /// assert!(letters.contains(&'u')); /// assert!(!letters.contains(&'y')); /// ``` #[derive(Clone)] pub struct IndexSet { map: IndexMap, } impl Entries for IndexSet { type Entry = Bucket; fn into_entries(self) -> Vec { self.map.into_entries() } fn as_entries(&self) -> &[Self::Entry] { self.map.as_entries() } fn as_entries_mut(&mut self) -> &mut [Self::Entry] { self.map.as_entries_mut() } fn with_entries(&mut self, f: F) where F: FnOnce(&mut [Self::Entry]) { self.map.with_entries(f); } } impl fmt::Debug for IndexSet where T: fmt::Debug + Hash + Eq, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if cfg!(not(feature = "test_debug")) { f.debug_set().entries(self.iter()).finish() } else { // Let the inner `IndexMap` print all of its details f.debug_struct("IndexSet").field("map", &self.map).finish() } } } impl IndexSet { /// Create a new set. (Does not allocate.) pub fn new() -> Self { IndexSet { map: IndexMap::new() } } /// Create a new set with capacity for `n` elements. /// (Does not allocate if `n` is zero.) /// /// Computes in **O(n)** time. pub fn with_capacity(n: usize) -> Self { IndexSet { map: IndexMap::with_capacity(n) } } } impl IndexSet { /// Create a new set with capacity for `n` elements. /// (Does not allocate if `n` is zero.) /// /// Computes in **O(n)** time. pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self where S: BuildHasher { IndexSet { map: IndexMap::with_capacity_and_hasher(n, hash_builder) } } /// Return the number of elements in the set. /// /// Computes in **O(1)** time. pub fn len(&self) -> usize { self.map.len() } /// Returns true if the set contains no elements. /// /// Computes in **O(1)** time. pub fn is_empty(&self) -> bool { self.map.is_empty() } /// Create a new set with `hash_builder` pub fn with_hasher(hash_builder: S) -> Self where S: BuildHasher { IndexSet { map: IndexMap::with_hasher(hash_builder) } } /// Return a reference to the set's `BuildHasher`. pub fn hasher(&self) -> &S where S: BuildHasher { self.map.hasher() } /// Computes in **O(1)** time. pub fn capacity(&self) -> usize { self.map.capacity() } } impl IndexSet where T: Hash + Eq, S: BuildHasher, { /// Remove all elements in the set, while preserving its capacity. /// /// Computes in **O(n)** time. pub fn clear(&mut self) { self.map.clear(); } /// FIXME Not implemented fully yet pub fn reserve(&mut self, additional: usize) { self.map.reserve(additional); } /// Insert the value into the set. /// /// If an equivalent item already exists in the set, it returns /// `false` leaving the original value in the set and without /// altering its insertion order. Otherwise, it inserts the new /// item and returns `true`. /// /// Computes in **O(1)** time (amortized average). pub fn insert(&mut self, value: T) -> bool { self.map.insert(value, ()).is_none() } /// Insert the value into the set, and get its index. /// /// If an equivalent item already exists in the set, it returns /// the index of the existing item and `false`, leaving the /// original value in the set and without altering its insertion /// order. Otherwise, it inserts the new item and returns the index /// of the inserted item and `true`. /// /// Computes in **O(1)** time (amortized average). pub fn insert_full(&mut self, value: T) -> (usize, bool) { use super::map::Entry::*; match self.map.entry(value) { Occupied(e) => (e.index(), false), Vacant(e) => { let index = e.index(); e.insert(()); (index, true) } } } /// Return an iterator over the values of the set, in their order pub fn iter(&self) -> Iter { Iter { iter: self.map.keys().iter } } /// Return an iterator over the values that are in `self` but not `other`. /// /// Values are produced in the same order that they appear in `self`. pub fn difference<'a, S2>(&'a self, other: &'a IndexSet) -> Difference<'a, T, S2> where S2: BuildHasher { Difference { iter: self.iter(), other, } } /// Return an iterator over the values that are in `self` or `other`, /// but not in both. /// /// Values from `self` are produced in their original order, followed by /// values from `other` in their original order. pub fn symmetric_difference<'a, S2>(&'a self, other: &'a IndexSet) -> SymmetricDifference<'a, T, S, S2> where S2: BuildHasher { SymmetricDifference { iter: self.difference(other).chain(other.difference(self)), } } /// Return an iterator over the values that are in both `self` and `other`. /// /// Values are produced in the same order that they appear in `self`. pub fn intersection<'a, S2>(&'a self, other: &'a IndexSet) -> Intersection<'a, T, S2> where S2: BuildHasher { Intersection { iter: self.iter(), other, } } /// Return an iterator over all values that are in `self` or `other`. /// /// Values from `self` are produced in their original order, followed by /// values that are unique to `other` in their original order. pub fn union<'a, S2>(&'a self, other: &'a IndexSet) -> Union<'a, T, S> where S2: BuildHasher { Union { iter: self.iter().chain(other.difference(self)), } } /// Return `true` if an equivalent to `value` exists in the set. /// /// Computes in **O(1)** time (average). pub fn contains(&self, value: &Q) -> bool where Q: Hash + Equivalent, { self.map.contains_key(value) } /// Return a reference to the value stored in the set, if it is present, /// else `None`. /// /// Computes in **O(1)** time (average). pub fn get(&self, value: &Q) -> Option<&T> where Q: Hash + Equivalent, { self.map.get_full(value).map(|(_, x, &())| x) } /// Return item index and value pub fn get_full(&self, value: &Q) -> Option<(usize, &T)> where Q: Hash + Equivalent, { self.map.get_full(value).map(|(i, x, &())| (i, x)) } /// Adds a value to the set, replacing the existing value, if any, that is /// equal to the given one. Returns the replaced value. /// /// Computes in **O(1)** time (average). pub fn replace(&mut self, value: T) -> Option { use super::map::Entry::*; match self.map.entry(value) { Vacant(e) => { e.insert(()); None }, Occupied(e) => Some(e.replace_key()), } } /// FIXME Same as .swap_remove /// /// Computes in **O(1)** time (average). #[deprecated(note = "use `swap_remove` or `shift_remove`")] pub fn remove(&mut self, value: &Q) -> bool where Q: Hash + Equivalent, { self.swap_remove(value) } /// Remove the value from the set, and return `true` if it was present. /// /// Like `Vec::swap_remove`, the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Return `false` if `value` was not in the set. /// /// Computes in **O(1)** time (average). pub fn swap_remove(&mut self, value: &Q) -> bool where Q: Hash + Equivalent, { self.map.swap_remove(value).is_some() } /// Remove the value from the set, and return `true` if it was present. /// /// Like `Vec::remove`, the value is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `false` if `value` was not in the set. /// /// Computes in **O(n)** time (average). pub fn shift_remove(&mut self, value: &Q) -> bool where Q: Hash + Equivalent, { self.map.shift_remove(value).is_some() } /// FIXME Same as .swap_take /// /// Computes in **O(1)** time (average). #[deprecated(note = "use `swap_take` or `shift_take`")] pub fn take(&mut self, value: &Q) -> Option where Q: Hash + Equivalent, { self.swap_take(value) } /// Removes and returns the value in the set, if any, that is equal to the /// given one. /// /// Like `Vec::swap_remove`, the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Return `None` if `value` was not in the set. /// /// Computes in **O(1)** time (average). pub fn swap_take(&mut self, value: &Q) -> Option where Q: Hash + Equivalent, { self.map.swap_remove_full(value).map(|(_, x, ())| x) } /// Removes and returns the value in the set, if any, that is equal to the /// given one. /// /// Like `Vec::remove`, the value is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `None` if `value` was not in the set. /// /// Computes in **O(n)** time (average). pub fn shift_take(&mut self, value: &Q) -> Option where Q: Hash + Equivalent, { self.map.shift_remove_full(value).map(|(_, x, ())| x) } /// Remove the value from the set return it and the index it had. /// /// Like `Vec::swap_remove`, the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Return `None` if `value` was not in the set. pub fn swap_remove_full(&mut self, value: &Q) -> Option<(usize, T)> where Q: Hash + Equivalent, { self.map.swap_remove_full(value).map(|(i, x, ())| (i, x)) } /// Remove the value from the set return it and the index it had. /// /// Like `Vec::remove`, the value is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Return `None` if `value` was not in the set. pub fn shift_remove_full(&mut self, value: &Q) -> Option<(usize, T)> where Q: Hash + Equivalent, { self.map.shift_remove_full(value).map(|(i, x, ())| (i, x)) } /// Remove the last value /// /// Computes in **O(1)** time (average). pub fn pop(&mut self) -> Option { self.map.pop().map(|(x, ())| x) } /// Scan through each value in the set and keep those where the /// closure `keep` returns `true`. /// /// The elements are visited in order, and remaining elements keep their /// order. /// /// Computes in **O(n)** time (average). pub fn retain(&mut self, mut keep: F) where F: FnMut(&T) -> bool, { self.map.retain(move |x, &mut ()| keep(x)) } /// Sort the set’s values by their default ordering. /// /// See `sort_by` for details. pub fn sort(&mut self) where T: Ord, { self.map.sort_keys() } /// Sort the set’s values in place using the comparison function `compare`. /// /// Computes in **O(n log n)** time and **O(n)** space. The sort is stable. pub fn sort_by(&mut self, mut compare: F) where F: FnMut(&T, &T) -> Ordering, { self.map.sort_by(move |a, _, b, _| compare(a, b)); } /// Sort the values of the set and return a by value iterator of /// the values with the result. /// /// The sort is stable. pub fn sorted_by(self, mut cmp: F) -> IntoIter where F: FnMut(&T, &T) -> Ordering { IntoIter { iter: self.map.sorted_by(move |a, &(), b, &()| cmp(a, b)).iter, } } /// Clears the `IndexSet`, returning all values as a drain iterator. /// Keeps the allocated memory for reuse. pub fn drain(&mut self, range: RangeFull) -> Drain { Drain { iter: self.map.drain(range).iter, } } } impl IndexSet { /// Get a value by index /// /// Valid indices are *0 <= index < self.len()* /// /// Computes in **O(1)** time. pub fn get_index(&self, index: usize) -> Option<&T> { self.map.get_index(index).map(|(x, &())| x) } /// Remove the key-value pair by index /// /// Valid indices are *0 <= index < self.len()* /// /// Like `Vec::swap_remove`, the value is removed by swapping it with the /// last element of the set and popping it off. **This perturbs /// the postion of what used to be the last element!** /// /// Computes in **O(1)** time (average). pub fn swap_remove_index(&mut self, index: usize) -> Option { self.map.swap_remove_index(index).map(|(x, ())| x) } /// Remove the key-value pair by index /// /// Valid indices are *0 <= index < self.len()* /// /// Like `Vec::remove`, the value is removed by shifting all of the /// elements that follow it, preserving their relative order. /// **This perturbs the index of all of those elements!** /// /// Computes in **O(n)** time (average). pub fn shift_remove_index(&mut self, index: usize) -> Option { self.map.shift_remove_index(index).map(|(x, ())| x) } } /// An owning iterator over the items of a `IndexSet`. /// /// This `struct` is created by the [`into_iter`] method on [`IndexSet`] /// (provided by the `IntoIterator` trait). See its documentation for more. /// /// [`IndexSet`]: struct.IndexSet.html /// [`into_iter`]: struct.IndexSet.html#method.into_iter pub struct IntoIter { iter: vec::IntoIter>, } impl Iterator for IntoIter { type Item = T; iterator_methods!(Bucket::key); } impl DoubleEndedIterator for IntoIter { fn next_back(&mut self) -> Option { self.iter.next_back().map(Bucket::key) } } impl ExactSizeIterator for IntoIter { fn len(&self) -> usize { self.iter.len() } } impl fmt::Debug for IntoIter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let iter = self.iter.as_slice().iter().map(Bucket::key_ref); f.debug_list().entries(iter).finish() } } /// An iterator over the items of a `IndexSet`. /// /// This `struct` is created by the [`iter`] method on [`IndexSet`]. /// See its documentation for more. /// /// [`IndexSet`]: struct.IndexSet.html /// [`iter`]: struct.IndexSet.html#method.iter pub struct Iter<'a, T: 'a> { iter: slice::Iter<'a, Bucket>, } impl<'a, T> Iterator for Iter<'a, T> { type Item = &'a T; iterator_methods!(Bucket::key_ref); } impl<'a, T> DoubleEndedIterator for Iter<'a, T> { fn next_back(&mut self) -> Option { self.iter.next_back().map(Bucket::key_ref) } } impl<'a, T> ExactSizeIterator for Iter<'a, T> { fn len(&self) -> usize { self.iter.len() } } impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Self { Iter { iter: self.iter.clone() } } } impl<'a, T: fmt::Debug> fmt::Debug for Iter<'a, T> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A draining iterator over the items of a `IndexSet`. /// /// This `struct` is created by the [`drain`] method on [`IndexSet`]. /// See its documentation for more. /// /// [`IndexSet`]: struct.IndexSet.html /// [`drain`]: struct.IndexSet.html#method.drain pub struct Drain<'a, T: 'a> { iter: vec::Drain<'a, Bucket>, } impl<'a, T> Iterator for Drain<'a, T> { type Item = T; iterator_methods!(Bucket::key); } impl<'a, T> DoubleEndedIterator for Drain<'a, T> { double_ended_iterator_methods!(Bucket::key); } impl<'a, T, S> IntoIterator for &'a IndexSet where T: Hash + Eq, S: BuildHasher, { type Item = &'a T; type IntoIter = Iter<'a, T>; fn into_iter(self) -> Self::IntoIter { self.iter() } } impl IntoIterator for IndexSet where T: Hash + Eq, S: BuildHasher, { type Item = T; type IntoIter = IntoIter; fn into_iter(self) -> Self::IntoIter { IntoIter { iter: self.map.into_iter().iter, } } } impl FromIterator for IndexSet where T: Hash + Eq, S: BuildHasher + Default, { fn from_iter>(iterable: I) -> Self { let iter = iterable.into_iter().map(|x| (x, ())); IndexSet { map: IndexMap::from_iter(iter) } } } impl Extend for IndexSet where T: Hash + Eq, S: BuildHasher, { fn extend>(&mut self, iterable: I) { let iter = iterable.into_iter().map(|x| (x, ())); self.map.extend(iter); } } impl<'a, T, S> Extend<&'a T> for IndexSet where T: Hash + Eq + Copy + 'a, S: BuildHasher, { fn extend>(&mut self, iterable: I) { let iter = iterable.into_iter().cloned(); // FIXME: use `copied` in Rust 1.36 self.extend(iter); } } impl Default for IndexSet where S: BuildHasher + Default, { /// Return an empty `IndexSet` fn default() -> Self { IndexSet { map: IndexMap::default() } } } impl PartialEq> for IndexSet where T: Hash + Eq, S1: BuildHasher, S2: BuildHasher { fn eq(&self, other: &IndexSet) -> bool { self.len() == other.len() && self.is_subset(other) } } impl Eq for IndexSet where T: Eq + Hash, S: BuildHasher { } impl IndexSet where T: Eq + Hash, S: BuildHasher { /// Returns `true` if `self` has no elements in common with `other`. pub fn is_disjoint(&self, other: &IndexSet) -> bool where S2: BuildHasher { if self.len() <= other.len() { self.iter().all(move |value| !other.contains(value)) } else { other.iter().all(move |value| !self.contains(value)) } } /// Returns `true` if all elements of `self` are contained in `other`. pub fn is_subset(&self, other: &IndexSet) -> bool where S2: BuildHasher { self.len() <= other.len() && self.iter().all(move |value| other.contains(value)) } /// Returns `true` if all elements of `other` are contained in `self`. pub fn is_superset(&self, other: &IndexSet) -> bool where S2: BuildHasher { other.is_subset(self) } } /// A lazy iterator producing elements in the difference of `IndexSet`s. /// /// This `struct` is created by the [`difference`] method on [`IndexSet`]. /// See its documentation for more. /// /// [`IndexSet`]: struct.IndexSet.html /// [`difference`]: struct.IndexSet.html#method.difference pub struct Difference<'a, T: 'a, S: 'a> { iter: Iter<'a, T>, other: &'a IndexSet, } impl<'a, T, S> Iterator for Difference<'a, T, S> where T: Eq + Hash, S: BuildHasher { type Item = &'a T; fn next(&mut self) -> Option { while let Some(item) = self.iter.next() { if !self.other.contains(item) { return Some(item); } } None } fn size_hint(&self) -> (usize, Option) { (0, self.iter.size_hint().1) } } impl<'a, T, S> DoubleEndedIterator for Difference<'a, T, S> where T: Eq + Hash, S: BuildHasher { fn next_back(&mut self) -> Option { while let Some(item) = self.iter.next_back() { if !self.other.contains(item) { return Some(item); } } None } } impl<'a, T, S> Clone for Difference<'a, T, S> { fn clone(&self) -> Self { Difference { iter: self.iter.clone(), ..*self } } } impl<'a, T, S> fmt::Debug for Difference<'a, T, S> where T: fmt::Debug + Eq + Hash, S: BuildHasher { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A lazy iterator producing elements in the intersection of `IndexSet`s. /// /// This `struct` is created by the [`intersection`] method on [`IndexSet`]. /// See its documentation for more. /// /// [`IndexSet`]: struct.IndexSet.html /// [`intersection`]: struct.IndexSet.html#method.intersection pub struct Intersection<'a, T: 'a, S: 'a> { iter: Iter<'a, T>, other: &'a IndexSet, } impl<'a, T, S> Iterator for Intersection<'a, T, S> where T: Eq + Hash, S: BuildHasher { type Item = &'a T; fn next(&mut self) -> Option { while let Some(item) = self.iter.next() { if self.other.contains(item) { return Some(item); } } None } fn size_hint(&self) -> (usize, Option) { (0, self.iter.size_hint().1) } } impl<'a, T, S> DoubleEndedIterator for Intersection<'a, T, S> where T: Eq + Hash, S: BuildHasher { fn next_back(&mut self) -> Option { while let Some(item) = self.iter.next_back() { if self.other.contains(item) { return Some(item); } } None } } impl<'a, T, S> Clone for Intersection<'a, T, S> { fn clone(&self) -> Self { Intersection { iter: self.iter.clone(), ..*self } } } impl<'a, T, S> fmt::Debug for Intersection<'a, T, S> where T: fmt::Debug + Eq + Hash, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A lazy iterator producing elements in the symmetric difference of `IndexSet`s. /// /// This `struct` is created by the [`symmetric_difference`] method on /// [`IndexSet`]. See its documentation for more. /// /// [`IndexSet`]: struct.IndexSet.html /// [`symmetric_difference`]: struct.IndexSet.html#method.symmetric_difference pub struct SymmetricDifference<'a, T: 'a, S1: 'a, S2: 'a> { iter: Chain, Difference<'a, T, S1>>, } impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2> where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { type Item = &'a T; fn next(&mut self) -> Option { self.iter.next() } fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } fn fold(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B { self.iter.fold(init, f) } } impl<'a, T, S1, S2> DoubleEndedIterator for SymmetricDifference<'a, T, S1, S2> where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn next_back(&mut self) -> Option { self.iter.next_back() } } impl<'a, T, S1, S2> Clone for SymmetricDifference<'a, T, S1, S2> { fn clone(&self) -> Self { SymmetricDifference { iter: self.iter.clone() } } } impl<'a, T, S1, S2> fmt::Debug for SymmetricDifference<'a, T, S1, S2> where T: fmt::Debug + Eq + Hash, S1: BuildHasher, S2: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } /// A lazy iterator producing elements in the union of `IndexSet`s. /// /// This `struct` is created by the [`union`] method on [`IndexSet`]. /// See its documentation for more. /// /// [`IndexSet`]: struct.IndexSet.html /// [`union`]: struct.IndexSet.html#method.union pub struct Union<'a, T: 'a, S: 'a> { iter: Chain, Difference<'a, T, S>>, } impl<'a, T, S> Iterator for Union<'a, T, S> where T: Eq + Hash, S: BuildHasher, { type Item = &'a T; fn next(&mut self) -> Option { self.iter.next() } fn size_hint(&self) -> (usize, Option) { self.iter.size_hint() } fn fold(self, init: B, f: F) -> B where F: FnMut(B, Self::Item) -> B { self.iter.fold(init, f) } } impl<'a, T, S> DoubleEndedIterator for Union<'a, T, S> where T: Eq + Hash, S: BuildHasher, { fn next_back(&mut self) -> Option { self.iter.next_back() } } impl<'a, T, S> Clone for Union<'a, T, S> { fn clone(&self) -> Self { Union { iter: self.iter.clone() } } } impl<'a, T, S> fmt::Debug for Union<'a, T, S> where T: fmt::Debug + Eq + Hash, S: BuildHasher, { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } impl<'a, 'b, T, S1, S2> BitAnd<&'b IndexSet> for &'a IndexSet where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = IndexSet; /// Returns the set intersection, cloned into a new set. /// /// Values are collected in the same order that they appear in `self`. fn bitand(self, other: &'b IndexSet) -> Self::Output { self.intersection(other).cloned().collect() } } impl<'a, 'b, T, S1, S2> BitOr<&'b IndexSet> for &'a IndexSet where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = IndexSet; /// Returns the set union, cloned into a new set. /// /// Values from `self` are collected in their original order, followed by /// values that are unique to `other` in their original order. fn bitor(self, other: &'b IndexSet) -> Self::Output { self.union(other).cloned().collect() } } impl<'a, 'b, T, S1, S2> BitXor<&'b IndexSet> for &'a IndexSet where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = IndexSet; /// Returns the set symmetric-difference, cloned into a new set. /// /// Values from `self` are collected in their original order, followed by /// values from `other` in their original order. fn bitxor(self, other: &'b IndexSet) -> Self::Output { self.symmetric_difference(other).cloned().collect() } } impl<'a, 'b, T, S1, S2> Sub<&'b IndexSet> for &'a IndexSet where T: Eq + Hash + Clone, S1: BuildHasher + Default, S2: BuildHasher, { type Output = IndexSet; /// Returns the set difference, cloned into a new set. /// /// Values are collected in the same order that they appear in `self`. fn sub(self, other: &'b IndexSet) -> Self::Output { self.difference(other).cloned().collect() } } #[cfg(test)] mod tests { use super::*; use util::enumerate; #[test] fn it_works() { let mut set = IndexSet::new(); assert_eq!(set.is_empty(), true); set.insert(1); set.insert(1); assert_eq!(set.len(), 1); assert!(set.get(&1).is_some()); assert_eq!(set.is_empty(), false); } #[test] fn new() { let set = IndexSet::::new(); println!("{:?}", set); assert_eq!(set.capacity(), 0); assert_eq!(set.len(), 0); assert_eq!(set.is_empty(), true); } #[test] fn insert() { let insert = [0, 4, 2, 12, 8, 7, 11, 5]; let not_present = [1, 3, 6, 9, 10]; let mut set = IndexSet::with_capacity(insert.len()); for (i, &elt) in enumerate(&insert) { assert_eq!(set.len(), i); set.insert(elt); assert_eq!(set.len(), i + 1); assert_eq!(set.get(&elt), Some(&elt)); } println!("{:?}", set); for &elt in ¬_present { assert!(set.get(&elt).is_none()); } } #[test] fn insert_full() { let insert = vec![9, 2, 7, 1, 4, 6, 13]; let present = vec![1, 6, 2]; let mut set = IndexSet::with_capacity(insert.len()); for (i, &elt) in enumerate(&insert) { assert_eq!(set.len(), i); let (index, success) = set.insert_full(elt); assert!(success); assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); assert_eq!(set.len(), i + 1); } let len = set.len(); for &elt in &present { let (index, success) = set.insert_full(elt); assert!(!success); assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0)); assert_eq!(set.len(), len); } } #[test] fn insert_2() { let mut set = IndexSet::with_capacity(16); let mut values = vec![]; values.extend(0..16); values.extend(128..267); for &i in &values { let old_set = set.clone(); set.insert(i); for value in old_set.iter() { if set.get(value).is_none() { println!("old_set: {:?}", old_set); println!("set: {:?}", set); panic!("did not find {} in set", value); } } } for &i in &values { assert!(set.get(&i).is_some(), "did not find {}", i); } } #[test] fn insert_dup() { let mut elements = vec![0, 2, 4, 6, 8]; let mut set: IndexSet = elements.drain(..).collect(); { let (i, v) = set.get_full(&0).unwrap(); assert_eq!(set.len(), 5); assert_eq!(i, 0); assert_eq!(*v, 0); } { let inserted = set.insert(0); let (i, v) = set.get_full(&0).unwrap(); assert_eq!(set.len(), 5); assert_eq!(inserted, false); assert_eq!(i, 0); assert_eq!(*v, 0); } } #[test] fn insert_order() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &insert { set.insert(elt); } assert_eq!(set.iter().count(), set.len()); assert_eq!(set.iter().count(), insert.len()); for (a, b) in insert.iter().zip(set.iter()) { assert_eq!(a, b); } for (i, v) in (0..insert.len()).zip(set.iter()) { assert_eq!(set.get_index(i).unwrap(), v); } } #[test] fn grow() { let insert = [0, 4, 2, 12, 8, 7, 11]; let not_present = [1, 3, 6, 9, 10]; let mut set = IndexSet::with_capacity(insert.len()); for (i, &elt) in enumerate(&insert) { assert_eq!(set.len(), i); set.insert(elt); assert_eq!(set.len(), i + 1); assert_eq!(set.get(&elt), Some(&elt)); } println!("{:?}", set); for &elt in &insert { set.insert(elt * 10); } for &elt in &insert { set.insert(elt * 100); } for (i, &elt) in insert.iter().cycle().enumerate().take(100) { set.insert(elt * 100 + i as i32); } println!("{:?}", set); for &elt in ¬_present { assert!(set.get(&elt).is_none()); } } #[test] fn remove() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &insert { set.insert(elt); } assert_eq!(set.iter().count(), set.len()); assert_eq!(set.iter().count(), insert.len()); for (a, b) in insert.iter().zip(set.iter()) { assert_eq!(a, b); } let remove_fail = [99, 77]; let remove = [4, 12, 8, 7]; for &value in &remove_fail { assert!(set.swap_remove_full(&value).is_none()); } println!("{:?}", set); for &value in &remove { //println!("{:?}", set); let index = set.get_full(&value).unwrap().0; assert_eq!(set.swap_remove_full(&value), Some((index, value))); } println!("{:?}", set); for value in &insert { assert_eq!(set.get(value).is_some(), !remove.contains(value)); } assert_eq!(set.len(), insert.len() - remove.len()); assert_eq!(set.iter().count(), insert.len() - remove.len()); } #[test] fn swap_remove_index() { let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23]; let mut set = IndexSet::new(); for &elt in &insert { set.insert(elt); } let mut vector = insert.to_vec(); let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1]; // check that the same swap remove sequence on vec and set // have the same result. for &rm in remove_sequence { let out_vec = vector.swap_remove(rm); let out_set = set.swap_remove_index(rm).unwrap(); assert_eq!(out_vec, out_set); } assert_eq!(vector.len(), set.len()); for (a, b) in vector.iter().zip(set.iter()) { assert_eq!(a, b); } } #[test] fn partial_eq_and_eq() { let mut set_a = IndexSet::new(); set_a.insert(1); set_a.insert(2); let mut set_b = set_a.clone(); assert_eq!(set_a, set_b); set_b.swap_remove(&1); assert_ne!(set_a, set_b); let set_c: IndexSet<_> = set_b.into_iter().collect(); assert_ne!(set_a, set_c); assert_ne!(set_c, set_a); } #[test] fn extend() { let mut set = IndexSet::new(); set.extend(vec![&1, &2, &3, &4]); set.extend(vec![5, 6]); assert_eq!(set.into_iter().collect::>(), vec![1, 2, 3, 4, 5, 6]); } #[test] fn comparisons() { let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).collect(); assert!(!set_a.is_disjoint(&set_a)); assert!(set_a.is_subset(&set_a)); assert!(set_a.is_superset(&set_a)); assert!(set_a.is_disjoint(&set_b)); assert!(set_b.is_disjoint(&set_a)); assert!(!set_a.is_subset(&set_b)); assert!(!set_b.is_subset(&set_a)); assert!(!set_a.is_superset(&set_b)); assert!(!set_b.is_superset(&set_a)); assert!(!set_a.is_disjoint(&set_c)); assert!(!set_c.is_disjoint(&set_a)); assert!(set_a.is_subset(&set_c)); assert!(!set_c.is_subset(&set_a)); assert!(!set_a.is_superset(&set_c)); assert!(set_c.is_superset(&set_a)); assert!(!set_c.is_disjoint(&set_d)); assert!(!set_d.is_disjoint(&set_c)); assert!(!set_c.is_subset(&set_d)); assert!(!set_d.is_subset(&set_c)); assert!(!set_c.is_superset(&set_d)); assert!(!set_d.is_superset(&set_c)); } #[test] fn iter_comparisons() { use std::iter::empty; fn check<'a, I1, I2>(iter1: I1, iter2: I2) where I1: Iterator, I2: Iterator, { assert!(iter1.cloned().eq(iter2)); } let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).rev().collect(); check(set_a.difference(&set_a), empty()); check(set_a.symmetric_difference(&set_a), empty()); check(set_a.intersection(&set_a), 0..3); check(set_a.union(&set_a), 0..3); check(set_a.difference(&set_b), 0..3); check(set_b.difference(&set_a), 3..6); check(set_a.symmetric_difference(&set_b), 0..6); check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3)); check(set_a.intersection(&set_b), empty()); check(set_b.intersection(&set_a), empty()); check(set_a.union(&set_b), 0..6); check(set_b.union(&set_a), (3..6).chain(0..3)); check(set_a.difference(&set_c), empty()); check(set_c.difference(&set_a), 3..6); check(set_a.symmetric_difference(&set_c), 3..6); check(set_c.symmetric_difference(&set_a), 3..6); check(set_a.intersection(&set_c), 0..3); check(set_c.intersection(&set_a), 0..3); check(set_a.union(&set_c), 0..6); check(set_c.union(&set_a), 0..6); check(set_c.difference(&set_d), 0..3); check(set_d.difference(&set_c), (6..9).rev()); check(set_c.symmetric_difference(&set_d), (0..3).chain((6..9).rev())); check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3)); check(set_c.intersection(&set_d), 3..6); check(set_d.intersection(&set_c), (3..6).rev()); check(set_c.union(&set_d), (0..6).chain((6..9).rev())); check(set_d.union(&set_c), (3..9).rev().chain(0..3)); } #[test] fn ops() { let empty = IndexSet::::new(); let set_a: IndexSet<_> = (0..3).collect(); let set_b: IndexSet<_> = (3..6).collect(); let set_c: IndexSet<_> = (0..6).collect(); let set_d: IndexSet<_> = (3..9).rev().collect(); // FIXME: #[allow(clippy::eq_op)] in Rust 1.31 #[cfg_attr(feature = "cargo-clippy", allow(renamed_and_removed_lints, eq_op))] { assert_eq!(&set_a & &set_a, set_a); assert_eq!(&set_a | &set_a, set_a); assert_eq!(&set_a ^ &set_a, empty); assert_eq!(&set_a - &set_a, empty); } assert_eq!(&set_a & &set_b, empty); assert_eq!(&set_b & &set_a, empty); assert_eq!(&set_a | &set_b, set_c); assert_eq!(&set_b | &set_a, set_c); assert_eq!(&set_a ^ &set_b, set_c); assert_eq!(&set_b ^ &set_a, set_c); assert_eq!(&set_a - &set_b, set_a); assert_eq!(&set_b - &set_a, set_b); assert_eq!(&set_a & &set_c, set_a); assert_eq!(&set_c & &set_a, set_a); assert_eq!(&set_a | &set_c, set_c); assert_eq!(&set_c | &set_a, set_c); assert_eq!(&set_a ^ &set_c, set_b); assert_eq!(&set_c ^ &set_a, set_b); assert_eq!(&set_a - &set_c, empty); assert_eq!(&set_c - &set_a, set_b); assert_eq!(&set_c & &set_d, set_b); assert_eq!(&set_d & &set_c, set_b); assert_eq!(&set_c | &set_d, &set_a | &set_d); assert_eq!(&set_d | &set_c, &set_a | &set_d); assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b)); assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b)); assert_eq!(&set_c - &set_d, set_a); assert_eq!(&set_d - &set_c, &set_d - &set_b); } } indexmap-1.2.0/src/util.rs010064400017510001751000000006501353516575300136720ustar0000000000000000 use std::iter::Enumerate; use std::mem::size_of; pub fn third(t: (A, B, C)) -> C { t.2 } pub fn enumerate(iterable: I) -> Enumerate where I: IntoIterator { iterable.into_iter().enumerate() } /// return the number of steps from a to b pub fn ptrdistance(a: *const T, b: *const T) -> usize { debug_assert!(a as usize <= b as usize); (b as usize - a as usize) / size_of::() } indexmap-1.2.0/tests/equivalent_trait.rs010064400017510001751000000021011353367262700166420ustar0000000000000000 #[macro_use] extern crate indexmap; use indexmap::Equivalent; use std::hash::Hash; #[derive(Debug, Hash)] pub struct Pair(pub A, pub B); impl PartialEq<(A, B)> for Pair where C: PartialEq, D: PartialEq, { fn eq(&self, rhs: &(A, B)) -> bool { self.0 == rhs.0 && self.1 == rhs.1 } } impl Equivalent for Pair where Pair: PartialEq, A: Hash + Eq, B: Hash + Eq, { fn equivalent(&self, other: &X) -> bool { *self == *other } } #[test] fn test_lookup() { let s = String::from; let map = indexmap! { (s("a"), s("b")) => 1, (s("a"), s("x")) => 2, }; assert!(map.contains_key(&Pair("a", "b"))); assert!(!map.contains_key(&Pair("b", "a"))); } #[test] fn test_string_str() { let s = String::from; let mut map = indexmap! { s("a") => 1, s("b") => 2, s("x") => 3, s("y") => 4, }; assert!(map.contains_key("a")); assert!(!map.contains_key("z")); assert_eq!(map.swap_remove("b"), Some(2)); } indexmap-1.2.0/tests/macros_full_path.rs010064400017510001751000000004051353155442400166010ustar0000000000000000 #[test] fn test_create_map() { let _m = indexmap::indexmap! { 1 => 2, 7 => 1, 2 => 2, 3 => 3, }; } #[test] fn test_create_set() { let _s = indexmap::indexset! { 1, 7, 2, 3, }; } indexmap-1.2.0/tests/quick.rs010064400017510001751000000257751353367262700144240ustar0000000000000000 extern crate indexmap; extern crate itertools; #[macro_use] extern crate quickcheck; extern crate rand; extern crate fnv; use indexmap::IndexMap; use itertools::Itertools; use quickcheck::Arbitrary; use quickcheck::Gen; use rand::Rng; use fnv::FnvHasher; use std::hash::{BuildHasher, BuildHasherDefault}; type FnvBuilder = BuildHasherDefault; type OrderMapFnv = IndexMap; use std::collections::HashSet; use std::collections::HashMap; use std::iter::FromIterator; use std::hash::Hash; use std::fmt::Debug; use std::ops::Deref; use std::cmp::min; use indexmap::map::Entry as OEntry; use std::collections::hash_map::Entry as HEntry; fn set<'a, T: 'a, I>(iter: I) -> HashSet where I: IntoIterator, T: Copy + Hash + Eq { iter.into_iter().cloned().collect() } fn indexmap<'a, T: 'a, I>(iter: I) -> IndexMap where I: IntoIterator, T: Copy + Hash + Eq, { IndexMap::from_iter(iter.into_iter().cloned().map(|k| (k, ()))) } quickcheck! { fn contains(insert: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } insert.iter().all(|&key| map.get(&key).is_some()) } fn contains_not(insert: Vec, not: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } let nots = &set(¬) - &set(&insert); nots.iter().all(|&key| map.get(&key).is_none()) } fn insert_remove(insert: Vec, remove: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } for &key in &remove { map.swap_remove(&key); } let elements = &set(&insert) - &set(&remove); map.len() == elements.len() && map.iter().count() == elements.len() && elements.iter().all(|k| map.get(k).is_some()) } fn insertion_order(insert: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } itertools::assert_equal(insert.iter().unique(), map.keys()); true } fn pop(insert: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } let mut pops = Vec::new(); while let Some((key, _v)) = map.pop() { pops.push(key); } pops.reverse(); itertools::assert_equal(insert.iter().unique(), &pops); true } fn with_cap(cap: usize) -> bool { let map: IndexMap = IndexMap::with_capacity(cap); println!("wish: {}, got: {} (diff: {})", cap, map.capacity(), map.capacity() as isize - cap as isize); map.capacity() >= cap } fn drain(insert: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } let mut clone = map.clone(); let drained = clone.drain(..); for (key, _) in drained { map.swap_remove(&key); } map.is_empty() } fn shift_remove(insert: Vec, remove: Vec) -> bool { let mut map = IndexMap::new(); for &key in &insert { map.insert(key, ()); } for &key in &remove { map.shift_remove(&key); } let elements = &set(&insert) - &set(&remove); // Check that order is preserved after removals let mut iter = map.keys(); for &key in insert.iter().unique() { if elements.contains(&key) { assert_eq!(Some(key), iter.next().cloned()); } } map.len() == elements.len() && map.iter().count() == elements.len() && elements.iter().all(|k| map.get(k).is_some()) } } use Op::*; #[derive(Copy, Clone, Debug)] enum Op { Add(K, V), Remove(K), AddEntry(K, V), RemoveEntry(K), } impl Arbitrary for Op where K: Arbitrary, V: Arbitrary, { fn arbitrary(g: &mut G) -> Self { match g.gen::() % 4 { 0 => Add(K::arbitrary(g), V::arbitrary(g)), 1 => AddEntry(K::arbitrary(g), V::arbitrary(g)), 2 => Remove(K::arbitrary(g)), _ => RemoveEntry(K::arbitrary(g)), } } } fn do_ops(ops: &[Op], a: &mut IndexMap, b: &mut HashMap) where K: Hash + Eq + Clone, V: Clone, S: BuildHasher, { for op in ops { match *op { Add(ref k, ref v) => { a.insert(k.clone(), v.clone()); b.insert(k.clone(), v.clone()); } AddEntry(ref k, ref v) => { a.entry(k.clone()).or_insert_with(|| v.clone()); b.entry(k.clone()).or_insert_with(|| v.clone()); } Remove(ref k) => { a.swap_remove(k); b.remove(k); } RemoveEntry(ref k) => { if let OEntry::Occupied(ent) = a.entry(k.clone()) { ent.swap_remove_entry(); } if let HEntry::Occupied(ent) = b.entry(k.clone()) { ent.remove_entry(); } } } //println!("{:?}", a); } } fn assert_maps_equivalent(a: &IndexMap, b: &HashMap) -> bool where K: Hash + Eq + Debug, V: Eq + Debug, { assert_eq!(a.len(), b.len()); assert_eq!(a.iter().next().is_some(), b.iter().next().is_some()); for key in a.keys() { assert!(b.contains_key(key), "b does not contain {:?}", key); } for key in b.keys() { assert!(a.get(key).is_some(), "a does not contain {:?}", key); } for key in a.keys() { assert_eq!(a[key], b[key]); } true } quickcheck! { fn operations_i8(ops: Large>>) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops, &mut map, &mut reference); assert_maps_equivalent(&map, &reference) } fn operations_string(ops: Vec>) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops, &mut map, &mut reference); assert_maps_equivalent(&map, &reference) } fn keys_values(ops: Large>>) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops, &mut map, &mut reference); let mut visit = IndexMap::new(); for (k, v) in map.keys().zip(map.values()) { assert_eq!(&map[k], v); assert!(!visit.contains_key(k)); visit.insert(*k, *v); } assert_eq!(visit.len(), reference.len()); true } fn keys_values_mut(ops: Large>>) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops, &mut map, &mut reference); let mut visit = IndexMap::new(); let keys = Vec::from_iter(map.keys().cloned()); for (k, v) in keys.iter().zip(map.values_mut()) { assert_eq!(&reference[k], v); assert!(!visit.contains_key(k)); visit.insert(*k, *v); } assert_eq!(visit.len(), reference.len()); true } fn equality(ops1: Vec>, removes: Vec) -> bool { let mut map = IndexMap::new(); let mut reference = HashMap::new(); do_ops(&ops1, &mut map, &mut reference); let mut ops2 = ops1.clone(); for &r in &removes { if !ops2.is_empty() { let i = r % ops2.len(); ops2.remove(i); } } let mut map2 = OrderMapFnv::default(); let mut reference2 = HashMap::new(); do_ops(&ops2, &mut map2, &mut reference2); assert_eq!(map == map2, reference == reference2); true } fn retain_ordered(keys: Large>, remove: Large>) -> () { let mut map = indexmap(keys.iter()); let initial_map = map.clone(); // deduplicated in-order input let remove_map = indexmap(remove.iter()); let keys_s = set(keys.iter()); let remove_s = set(remove.iter()); let answer = &keys_s - &remove_s; map.retain(|k, _| !remove_map.contains_key(k)); // check the values assert_eq!(map.len(), answer.len()); for key in &answer { assert!(map.contains_key(key)); } // check the order itertools::assert_equal(map.keys(), initial_map.keys().filter(|&k| !remove_map.contains_key(k))); } fn sort_1(keyvals: Large>) -> () { let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); let mut answer = keyvals.0; answer.sort_by_key(|t| t.0); // reverse dedup: Because IndexMap::from_iter keeps the last value for // identical keys answer.reverse(); answer.dedup_by_key(|t| t.0); answer.reverse(); map.sort_by(|k1, _, k2, _| Ord::cmp(k1, k2)); // check it contains all the values it should for &(key, val) in &answer { assert_eq!(map[&key], val); } // check the order let mapv = Vec::from_iter(map); assert_eq!(answer, mapv); } fn sort_2(keyvals: Large>) -> () { let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec()); map.sort_by(|_, v1, _, v2| Ord::cmp(v1, v2)); assert_sorted_by_key(map, |t| t.1); } } fn assert_sorted_by_key(iterable: I, key: Key) where I: IntoIterator, I::Item: Ord + Clone + Debug, Key: Fn(&I::Item) -> X, X: Ord, { let input = Vec::from_iter(iterable); let mut sorted = input.clone(); sorted.sort_by_key(key); assert_eq!(input, sorted); } #[derive(Clone, Debug, Hash, PartialEq, Eq)] struct Alpha(String); impl Deref for Alpha { type Target = String; fn deref(&self) -> &String { &self.0 } } const ALPHABET: &[u8] = b"abcdefghijklmnopqrstuvwxyz"; impl Arbitrary for Alpha { fn arbitrary(g: &mut G) -> Self { let len = g.next_u32() % g.size() as u32; let len = min(len, 16); Alpha((0..len).map(|_| { ALPHABET[g.next_u32() as usize % ALPHABET.len()] as char }).collect()) } fn shrink(&self) -> Box> { Box::new((**self).shrink().map(Alpha)) } } /// quickcheck Arbitrary adaptor -- make a larger vec #[derive(Clone, Debug)] struct Large(T); impl Deref for Large { type Target = T; fn deref(&self) -> &T { &self.0 } } impl Arbitrary for Large> where T: Arbitrary { fn arbitrary(g: &mut G) -> Self { let len = g.next_u32() % (g.size() * 10) as u32; Large((0..len).map(|_| T::arbitrary(g)).collect()) } fn shrink(&self) -> Box> { Box::new((**self).shrink().map(Large)) } } indexmap-1.2.0/tests/serde.rs010064400017510001751000000031331337460514400143630ustar0000000000000000#![cfg(feature = "serde-1")] #[macro_use] extern crate indexmap; extern crate serde_test; extern crate fnv; use serde_test::{Token, assert_tokens}; #[test] fn test_serde() { let map = indexmap! { 1 => 2, 3 => 4 }; assert_tokens(&map, &[Token::Map { len: Some(2) }, Token::I32(1), Token::I32(2), Token::I32(3), Token::I32(4), Token::MapEnd]); } #[test] fn test_serde_set() { let set = indexset! { 1, 2, 3, 4 }; assert_tokens(&set, &[Token::Seq { len: Some(4) }, Token::I32(1), Token::I32(2), Token::I32(3), Token::I32(4), Token::SeqEnd]); } #[test] fn test_serde_fnv_hasher() { let mut map: ::indexmap::IndexMap = Default::default(); map.insert(1, 2); map.insert(3, 4); assert_tokens(&map, &[Token::Map { len: Some(2) }, Token::I32(1), Token::I32(2), Token::I32(3), Token::I32(4), Token::MapEnd]); } #[test] fn test_serde_map_fnv_hasher() { let mut set: ::indexmap::IndexSet = Default::default(); set.extend(1..5); assert_tokens(&set, &[Token::Seq { len: Some(4) }, Token::I32(1), Token::I32(2), Token::I32(3), Token::I32(4), Token::SeqEnd]); } indexmap-1.2.0/tests/tests.rs010064400017510001751000000010371337460514400144240ustar0000000000000000 #[macro_use] extern crate indexmap; extern crate itertools; #[test] fn test_sort() { let m = indexmap! { 1 => 2, 7 => 1, 2 => 2, 3 => 3, }; itertools::assert_equal(m.sorted_by(|_k1, v1, _k2, v2| v1.cmp(v2)), vec![(7, 1), (1, 2), (2, 2), (3, 3)]); } #[test] fn test_sort_set() { let s = indexset! { 1, 7, 2, 3, }; itertools::assert_equal(s.sorted_by(|v1, v2| v1.cmp(v2)), vec![1, 2, 3, 7]); } indexmap-1.2.0/.cargo_vcs_info.json0000644000000001120000000000000126720ustar00{ "git": { "sha1": "45dad09a97f7e9474dbfb8590defecf2e4d52a9e" } }