pax_global_header00006660000000000000000000000064143037134510014513gustar00rootroot0000000000000052 comment=8c0b94e793a66495e0b1f34a5eb26bd7dc672db0 abseil-20220623.1/000077500000000000000000000000001430371345100133535ustar00rootroot00000000000000abseil-20220623.1/.clang-format000066400000000000000000000000631430371345100157250ustar00rootroot00000000000000--- Language: Cpp BasedOnStyle: Google ... abseil-20220623.1/.github/000077500000000000000000000000001430371345100147135ustar00rootroot00000000000000abseil-20220623.1/.github/ISSUE_TEMPLATE/000077500000000000000000000000001430371345100170765ustar00rootroot00000000000000abseil-20220623.1/.github/ISSUE_TEMPLATE/00-bug_report.md000066400000000000000000000022321430371345100220040ustar00rootroot00000000000000--- name: Bug report about: Create a report to help us improve title: '' labels: 'bug' assignees: '' --- **Describe the bug** Include a clear and concise description of what the problem is, including what you expected to happen, and what actually happened. **Steps to reproduce the bug** It's important that we are able to reproduce the problem that you are experiencing. Please provide all code and relevant steps to reproduce the problem, including your `BUILD`/`CMakeLists.txt` file and build commands. Links to a GitHub branch or [godbolt.org](https://godbolt.org/) that demonstrate the problem are also helpful. **What version of Abseil are you using?** **What operating system and version are you using** If you are using a Linux distribution please include the name and version of the distribution as well. **What compiler and version are you using?** Please include the output of `gcc -v` or `clang -v`, or the equivalent for your compiler. **What build system are you using?** Please include the output of `bazel --version` or `cmake --version`, or the equivalent for your build system. **Additional context** Add any other context about the problem here. abseil-20220623.1/.github/ISSUE_TEMPLATE/90-question.md000066400000000000000000000001571430371345100215200ustar00rootroot00000000000000--- name: Question about: Have a question? Ask us anything! :-) title: '' labels: 'question' assignees: '' --- abseil-20220623.1/.github/ISSUE_TEMPLATE/config.yml000066400000000000000000000000331430371345100210620ustar00rootroot00000000000000blank_issues_enabled: true abseil-20220623.1/.gitignore000066400000000000000000000004711430371345100153450ustar00rootroot00000000000000# Ignore all bazel-* symlinks. /bazel-* # Ignore Bazel verbose explanations --verbose_explanations # Ignore CMake usual build directory build # Ignore Vim files *.swp # Ignore QtCreator Project file CMakeLists.txt.user # Ignore VS Code files .vscode/* # Ignore generated python artifacts *.pyc copts/__pycache__/ abseil-20220623.1/ABSEIL_ISSUE_TEMPLATE.md000066400000000000000000000021071430371345100170770ustar00rootroot00000000000000Please submit a new Abseil Issue using the template below: ## [Short title of proposed API change(s)] -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- ## Background [Provide the background information that is required in order to evaluate the proposed API changes. No controversial claims should be made here. If there are design constraints that need to be considered, they should be presented here **along with justification for those constraints**. Linking to other docs is good, but please keep the **pertinent information as self contained** as possible in this section.] ## Proposed API Change (s) [Please clearly describe the API change(s) being proposed. If multiple changes, please keep them clearly distinguished. When possible, **use example code snippets to illustrate before-after API usages**. List pros-n-cons. Highlight the main questions that you want to be answered. Given the Abseil project compatibility requirements, describe why the API change is safe.] abseil-20220623.1/AUTHORS000066400000000000000000000004471430371345100144300ustar00rootroot00000000000000# This is the list of Abseil authors for copyright purposes. # # This does not necessarily list everyone who has contributed code, since in # some cases, their employer may be the copyright holder. To see the full list # of contributors, see the revision history in source control. Google Inc. abseil-20220623.1/BUILD.bazel000066400000000000000000000014151430371345100152320ustar00rootroot00000000000000# # Copyright 2020 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # package(default_visibility = ["//visibility:public"]) licenses(["notice"]) # Apache 2.0 # Expose license for external usage through bazel. exports_files([ "AUTHORS", "LICENSE", ]) abseil-20220623.1/CMake/000077500000000000000000000000001430371345100143335ustar00rootroot00000000000000abseil-20220623.1/CMake/AbseilDll.cmake000066400000000000000000000375361430371345100172060ustar00rootroot00000000000000include(CMakeParseArguments) include(GNUInstallDirs) set(ABSL_INTERNAL_DLL_FILES "algorithm/algorithm.h" "algorithm/container.h" "base/attributes.h" "base/call_once.h" "base/casts.h" "base/config.h" "base/const_init.h" "base/dynamic_annotations.h" "base/internal/atomic_hook.h" "base/internal/cycleclock.cc" "base/internal/cycleclock.h" "base/internal/direct_mmap.h" "base/internal/dynamic_annotations.h" "base/internal/endian.h" "base/internal/errno_saver.h" "base/internal/fast_type_id.h" "base/internal/hide_ptr.h" "base/internal/identity.h" "base/internal/invoke.h" "base/internal/inline_variable.h" "base/internal/low_level_alloc.cc" "base/internal/low_level_alloc.h" "base/internal/low_level_scheduling.h" "base/internal/per_thread_tls.h" "base/internal/prefetch.h" "base/internal/pretty_function.h" "base/internal/raw_logging.cc" "base/internal/raw_logging.h" "base/internal/scheduling_mode.h" "base/internal/scoped_set_env.cc" "base/internal/scoped_set_env.h" "base/internal/strerror.h" "base/internal/strerror.cc" "base/internal/spinlock.cc" "base/internal/spinlock.h" "base/internal/spinlock_wait.cc" "base/internal/spinlock_wait.h" "base/internal/sysinfo.cc" "base/internal/sysinfo.h" "base/internal/thread_annotations.h" "base/internal/thread_identity.cc" "base/internal/thread_identity.h" "base/internal/throw_delegate.cc" "base/internal/throw_delegate.h" "base/internal/tsan_mutex_interface.h" "base/internal/unaligned_access.h" "base/internal/unscaledcycleclock.cc" "base/internal/unscaledcycleclock.h" "base/log_severity.cc" "base/log_severity.h" "base/macros.h" "base/optimization.h" "base/options.h" "base/policy_checks.h" "base/port.h" "base/thread_annotations.h" "cleanup/cleanup.h" "cleanup/internal/cleanup.h" "container/btree_map.h" "container/btree_set.h" "container/fixed_array.h" "container/flat_hash_map.h" "container/flat_hash_set.h" "container/inlined_vector.h" "container/internal/btree.h" "container/internal/btree_container.h" "container/internal/common.h" "container/internal/compressed_tuple.h" "container/internal/container_memory.h" "container/internal/counting_allocator.h" "container/internal/hash_function_defaults.h" "container/internal/hash_policy_traits.h" "container/internal/hashtable_debug.h" "container/internal/hashtable_debug_hooks.h" "container/internal/hashtablez_sampler.cc" "container/internal/hashtablez_sampler.h" "container/internal/hashtablez_sampler_force_weak_definition.cc" "container/internal/inlined_vector.h" "container/internal/layout.h" "container/internal/node_slot_policy.h" "container/internal/raw_hash_map.h" "container/internal/raw_hash_set.cc" "container/internal/raw_hash_set.h" "container/internal/tracked.h" "container/node_hash_map.h" "container/node_hash_set.h" "debugging/failure_signal_handler.cc" "debugging/failure_signal_handler.h" "debugging/leak_check.h" "debugging/stacktrace.cc" "debugging/stacktrace.h" "debugging/symbolize.cc" "debugging/symbolize.h" "debugging/internal/address_is_readable.cc" "debugging/internal/address_is_readable.h" "debugging/internal/demangle.cc" "debugging/internal/demangle.h" "debugging/internal/elf_mem_image.cc" "debugging/internal/elf_mem_image.h" "debugging/internal/examine_stack.cc" "debugging/internal/examine_stack.h" "debugging/internal/stack_consumption.cc" "debugging/internal/stack_consumption.h" "debugging/internal/stacktrace_config.h" "debugging/internal/symbolize.h" "debugging/internal/vdso_support.cc" "debugging/internal/vdso_support.h" "functional/any_invocable.h" "functional/internal/front_binder.h" "functional/bind_front.h" "functional/function_ref.h" "functional/internal/any_invocable.h" "functional/internal/function_ref.h" "hash/hash.h" "hash/internal/city.h" "hash/internal/city.cc" "hash/internal/hash.h" "hash/internal/hash.cc" "hash/internal/spy_hash_state.h" "hash/internal/low_level_hash.h" "hash/internal/low_level_hash.cc" "memory/memory.h" "meta/type_traits.h" "numeric/bits.h" "numeric/int128.cc" "numeric/int128.h" "numeric/internal/bits.h" "numeric/internal/representation.h" "profiling/internal/exponential_biased.cc" "profiling/internal/exponential_biased.h" "profiling/internal/periodic_sampler.cc" "profiling/internal/periodic_sampler.h" "profiling/internal/sample_recorder.h" "random/bernoulli_distribution.h" "random/beta_distribution.h" "random/bit_gen_ref.h" "random/discrete_distribution.cc" "random/discrete_distribution.h" "random/distributions.h" "random/exponential_distribution.h" "random/gaussian_distribution.cc" "random/gaussian_distribution.h" "random/internal/distribution_caller.h" "random/internal/fastmath.h" "random/internal/fast_uniform_bits.h" "random/internal/generate_real.h" "random/internal/iostream_state_saver.h" "random/internal/mock_helpers.h" "random/internal/nonsecure_base.h" "random/internal/pcg_engine.h" "random/internal/platform.h" "random/internal/pool_urbg.cc" "random/internal/pool_urbg.h" "random/internal/randen.cc" "random/internal/randen.h" "random/internal/randen_detect.cc" "random/internal/randen_detect.h" "random/internal/randen_engine.h" "random/internal/randen_hwaes.cc" "random/internal/randen_hwaes.h" "random/internal/randen_round_keys.cc" "random/internal/randen_slow.cc" "random/internal/randen_slow.h" "random/internal/randen_traits.h" "random/internal/salted_seed_seq.h" "random/internal/seed_material.cc" "random/internal/seed_material.h" "random/internal/sequence_urbg.h" "random/internal/traits.h" "random/internal/uniform_helper.h" "random/internal/wide_multiply.h" "random/log_uniform_int_distribution.h" "random/poisson_distribution.h" "random/random.h" "random/seed_gen_exception.cc" "random/seed_gen_exception.h" "random/seed_sequences.cc" "random/seed_sequences.h" "random/uniform_int_distribution.h" "random/uniform_real_distribution.h" "random/zipf_distribution.h" "status/internal/status_internal.h" "status/internal/statusor_internal.h" "status/status.h" "status/status.cc" "status/statusor.h" "status/statusor.cc" "status/status_payload_printer.h" "status/status_payload_printer.cc" "strings/ascii.cc" "strings/ascii.h" "strings/charconv.cc" "strings/charconv.h" "strings/cord.cc" "strings/cord.h" "strings/cord_analysis.cc" "strings/cord_analysis.h" "strings/cord_buffer.cc" "strings/cord_buffer.h" "strings/escaping.cc" "strings/escaping.h" "strings/internal/charconv_bigint.cc" "strings/internal/charconv_bigint.h" "strings/internal/charconv_parse.cc" "strings/internal/charconv_parse.h" "strings/internal/cord_data_edge.h" "strings/internal/cord_internal.cc" "strings/internal/cord_internal.h" "strings/internal/cord_rep_btree.cc" "strings/internal/cord_rep_btree.h" "strings/internal/cord_rep_btree_navigator.cc" "strings/internal/cord_rep_btree_navigator.h" "strings/internal/cord_rep_btree_reader.cc" "strings/internal/cord_rep_btree_reader.h" "strings/internal/cord_rep_crc.cc" "strings/internal/cord_rep_crc.h" "strings/internal/cord_rep_consume.h" "strings/internal/cord_rep_consume.cc" "strings/internal/cord_rep_flat.h" "strings/internal/cord_rep_ring.cc" "strings/internal/cord_rep_ring.h" "strings/internal/cord_rep_ring_reader.h" "strings/internal/cordz_functions.cc" "strings/internal/cordz_functions.h" "strings/internal/cordz_handle.cc" "strings/internal/cordz_handle.h" "strings/internal/cordz_info.cc" "strings/internal/cordz_info.h" "strings/internal/cordz_sample_token.cc" "strings/internal/cordz_sample_token.h" "strings/internal/cordz_statistics.h" "strings/internal/cordz_update_scope.h" "strings/internal/cordz_update_tracker.h" "strings/internal/stl_type_traits.h" "strings/internal/string_constant.h" "strings/match.cc" "strings/match.h" "strings/numbers.cc" "strings/numbers.h" "strings/str_format.h" "strings/str_cat.cc" "strings/str_cat.h" "strings/str_join.h" "strings/str_replace.cc" "strings/str_replace.h" "strings/str_split.cc" "strings/str_split.h" "strings/string_view.cc" "strings/string_view.h" "strings/strip.h" "strings/substitute.cc" "strings/substitute.h" "strings/internal/char_map.h" "strings/internal/escaping.h" "strings/internal/escaping.cc" "strings/internal/memutil.cc" "strings/internal/memutil.h" "strings/internal/ostringstream.cc" "strings/internal/ostringstream.h" "strings/internal/pow10_helper.cc" "strings/internal/pow10_helper.h" "strings/internal/resize_uninitialized.h" "strings/internal/str_format/arg.cc" "strings/internal/str_format/arg.h" "strings/internal/str_format/bind.cc" "strings/internal/str_format/bind.h" "strings/internal/str_format/checker.h" "strings/internal/str_format/extension.cc" "strings/internal/str_format/extension.h" "strings/internal/str_format/float_conversion.cc" "strings/internal/str_format/float_conversion.h" "strings/internal/str_format/output.cc" "strings/internal/str_format/output.h" "strings/internal/str_format/parser.cc" "strings/internal/str_format/parser.h" "strings/internal/str_join_internal.h" "strings/internal/str_split_internal.h" "strings/internal/utf8.cc" "strings/internal/utf8.h" "synchronization/barrier.cc" "synchronization/barrier.h" "synchronization/blocking_counter.cc" "synchronization/blocking_counter.h" "synchronization/mutex.cc" "synchronization/mutex.h" "synchronization/notification.cc" "synchronization/notification.h" "synchronization/internal/create_thread_identity.cc" "synchronization/internal/create_thread_identity.h" "synchronization/internal/futex.h" "synchronization/internal/graphcycles.cc" "synchronization/internal/graphcycles.h" "synchronization/internal/kernel_timeout.h" "synchronization/internal/per_thread_sem.cc" "synchronization/internal/per_thread_sem.h" "synchronization/internal/thread_pool.h" "synchronization/internal/waiter.cc" "synchronization/internal/waiter.h" "time/civil_time.cc" "time/civil_time.h" "time/clock.cc" "time/clock.h" "time/duration.cc" "time/format.cc" "time/time.cc" "time/time.h" "time/internal/cctz/include/cctz/civil_time.h" "time/internal/cctz/include/cctz/civil_time_detail.h" "time/internal/cctz/include/cctz/time_zone.h" "time/internal/cctz/include/cctz/zone_info_source.h" "time/internal/cctz/src/civil_time_detail.cc" "time/internal/cctz/src/time_zone_fixed.cc" "time/internal/cctz/src/time_zone_fixed.h" "time/internal/cctz/src/time_zone_format.cc" "time/internal/cctz/src/time_zone_if.cc" "time/internal/cctz/src/time_zone_if.h" "time/internal/cctz/src/time_zone_impl.cc" "time/internal/cctz/src/time_zone_impl.h" "time/internal/cctz/src/time_zone_info.cc" "time/internal/cctz/src/time_zone_info.h" "time/internal/cctz/src/time_zone_libc.cc" "time/internal/cctz/src/time_zone_libc.h" "time/internal/cctz/src/time_zone_lookup.cc" "time/internal/cctz/src/time_zone_posix.cc" "time/internal/cctz/src/time_zone_posix.h" "time/internal/cctz/src/tzfile.h" "time/internal/cctz/src/zone_info_source.cc" "types/any.h" "types/bad_any_cast.cc" "types/bad_any_cast.h" "types/bad_optional_access.cc" "types/bad_optional_access.h" "types/bad_variant_access.cc" "types/bad_variant_access.h" "types/compare.h" "types/internal/conformance_aliases.h" "types/internal/conformance_archetype.h" "types/internal/conformance_profile.h" "types/internal/parentheses.h" "types/internal/transform_args.h" "types/internal/variant.h" "types/optional.h" "types/internal/optional.h" "types/span.h" "types/internal/span.h" "types/variant.h" "utility/utility.h" "debugging/leak_check.cc" ) set(ABSL_INTERNAL_DLL_TARGETS "stacktrace" "symbolize" "examine_stack" "failure_signal_handler" "debugging_internal" "demangle_internal" "leak_check" "stack_consumption" "debugging" "hash" "spy_hash_state" "city" "memory" "strings" "strings_internal" "cord" "str_format" "str_format_internal" "pow10_helper" "int128" "numeric" "utility" "any" "bad_any_cast" "bad_any_cast_impl" "span" "optional" "bad_optional_access" "bad_variant_access" "variant" "compare" "algorithm" "algorithm_container" "graphcycles_internal" "kernel_timeout_internal" "synchronization" "thread_pool" "any_invocable" "bind_front" "function_ref" "atomic_hook" "log_severity" "raw_logging_internal" "spinlock_wait" "config" "dynamic_annotations" "core_headers" "malloc_internal" "base_internal" "base" "throw_delegate" "pretty_function" "endian" "bits" "exponential_biased" "periodic_sampler" "scoped_set_env" "type_traits" "meta" "random_random" "random_bit_gen_ref" "random_distributions" "random_seed_gen_exception" "random_seed_sequences" "random_internal_traits" "random_internal_distribution_caller" "random_internal_distributions" "random_internal_fast_uniform_bits" "random_internal_seed_material" "random_internal_pool_urbg" "random_internal_explicit_seed_seq" "random_internal_sequence_urbg" "random_internal_salted_seed_seq" "random_internal_iostream_state_saver" "random_internal_generate_real" "random_internal_wide_multiply" "random_internal_fastmath" "random_internal_nonsecure_base" "random_internal_pcg_engine" "random_internal_randen_engine" "random_internal_platform" "random_internal_randen" "random_internal_randen_slow" "random_internal_randen_hwaes" "random_internal_randen_hwaes_impl" "random_internal_uniform_helper" "status" "time" "civil_time" "time_zone" "container" "btree" "compressed_tuple" "fixed_array" "inlined_vector_internal" "inlined_vector" "counting_allocator" "flat_hash_map" "flat_hash_set" "node_hash_map" "node_hash_set" "container_memory" "hash_function_defaults" "hash_policy_traits" "hashtablez_sampler" "hashtable_debug" "hashtable_debug_hooks" "node_slot_policy" "raw_hash_map" "container_common" "raw_hash_set" "layout" "tracked" "sample_recorder" ) function(absl_internal_dll_contains) cmake_parse_arguments(ABSL_INTERNAL_DLL "" "OUTPUT;TARGET" "" ${ARGN} ) STRING(REGEX REPLACE "^absl::" "" _target ${ABSL_INTERNAL_DLL_TARGET}) list(FIND ABSL_INTERNAL_DLL_TARGETS "${_target}" _index) if (${_index} GREATER -1) set(${ABSL_INTERNAL_DLL_OUTPUT} 1 PARENT_SCOPE) else() set(${ABSL_INTERNAL_DLL_OUTPUT} 0 PARENT_SCOPE) endif() endfunction() function(absl_internal_dll_targets) cmake_parse_arguments(ABSL_INTERNAL_DLL "" "OUTPUT" "DEPS" ${ARGN} ) set(_deps "") foreach(dep IN LISTS ABSL_INTERNAL_DLL_DEPS) absl_internal_dll_contains(TARGET ${dep} OUTPUT _contains) if (_contains) list(APPEND _deps abseil_dll) else() list(APPEND _deps ${dep}) endif() endforeach() # Because we may have added the DLL multiple times list(REMOVE_DUPLICATES _deps) set(${ABSL_INTERNAL_DLL_OUTPUT} "${_deps}" PARENT_SCOPE) endfunction() function(absl_make_dll) add_library( abseil_dll SHARED "${ABSL_INTERNAL_DLL_FILES}" ) target_link_libraries( abseil_dll PRIVATE ${ABSL_DEFAULT_LINKOPTS} ) set_property(TARGET abseil_dll PROPERTY LINKER_LANGUAGE "CXX") target_include_directories( abseil_dll PUBLIC "$" $ ) target_compile_options( abseil_dll PRIVATE ${ABSL_DEFAULT_COPTS} ) target_compile_definitions( abseil_dll PRIVATE ABSL_BUILD_DLL NOMINMAX INTERFACE ${ABSL_CC_LIB_DEFINES} ) install(TARGETS abseil_dll EXPORT ${PROJECT_NAME}Targets RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} ) endfunction() abseil-20220623.1/CMake/AbseilHelpers.cmake000066400000000000000000000347241430371345100200710ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # include(CMakeParseArguments) include(AbseilConfigureCopts) include(AbseilDll) # The IDE folder for Abseil that will be used if Abseil is included in a CMake # project that sets # set_property(GLOBAL PROPERTY USE_FOLDERS ON) # For example, Visual Studio supports folders. if(NOT DEFINED ABSL_IDE_FOLDER) set(ABSL_IDE_FOLDER Abseil) endif() # absl_cc_library() # # CMake function to imitate Bazel's cc_library rule. # # Parameters: # NAME: name of target (see Note) # HDRS: List of public header files for the library # SRCS: List of source files for the library # DEPS: List of other libraries to be linked in to the binary targets # COPTS: List of private compile options # DEFINES: List of public defines # LINKOPTS: List of link options # PUBLIC: Add this so that this library will be exported under absl:: # Also in IDE, target will appear in Abseil folder while non PUBLIC will be in Abseil/internal. # TESTONLY: When added, this target will only be built if both # BUILD_TESTING=ON and ABSL_BUILD_TESTING=ON. # # Note: # By default, absl_cc_library will always create a library named absl_${NAME}, # and alias target absl::${NAME}. The absl:: form should always be used. # This is to reduce namespace pollution. # # absl_cc_library( # NAME # awesome # HDRS # "a.h" # SRCS # "a.cc" # ) # absl_cc_library( # NAME # fantastic_lib # SRCS # "b.cc" # DEPS # absl::awesome # not "awesome" ! # PUBLIC # ) # # absl_cc_library( # NAME # main_lib # ... # DEPS # absl::fantastic_lib # ) # # TODO: Implement "ALWAYSLINK" function(absl_cc_library) cmake_parse_arguments(ABSL_CC_LIB "DISABLE_INSTALL;PUBLIC;TESTONLY" "NAME" "HDRS;SRCS;COPTS;DEFINES;LINKOPTS;DEPS" ${ARGN} ) if(NOT ABSL_CC_LIB_PUBLIC AND ABSL_CC_LIB_TESTONLY AND NOT (BUILD_TESTING AND ABSL_BUILD_TESTING)) return() endif() if(ABSL_ENABLE_INSTALL) set(_NAME "${ABSL_CC_LIB_NAME}") else() set(_NAME "absl_${ABSL_CC_LIB_NAME}") endif() # Check if this is a header-only library # Note that as of February 2019, many popular OS's (for example, Ubuntu # 16.04 LTS) only come with cmake 3.5 by default. For this reason, we can't # use list(FILTER...) set(ABSL_CC_SRCS "${ABSL_CC_LIB_SRCS}") foreach(src_file IN LISTS ABSL_CC_SRCS) if(${src_file} MATCHES ".*\\.(h|inc)") list(REMOVE_ITEM ABSL_CC_SRCS "${src_file}") endif() endforeach() if(ABSL_CC_SRCS STREQUAL "") set(ABSL_CC_LIB_IS_INTERFACE 1) else() set(ABSL_CC_LIB_IS_INTERFACE 0) endif() # Determine this build target's relationship to the DLL. It's one of four things: # 1. "dll" -- This target is part of the DLL # 2. "dll_dep" -- This target is not part of the DLL, but depends on the DLL. # Note that we assume any target not in the DLL depends on the # DLL. This is not a technical necessity but a convenience # which happens to be true, because nearly every target is # part of the DLL. # 3. "shared" -- This is a shared library, perhaps on a non-windows platform # where DLL doesn't make sense. # 4. "static" -- This target does not depend on the DLL and should be built # statically. if (${ABSL_BUILD_DLL}) if(ABSL_ENABLE_INSTALL) absl_internal_dll_contains(TARGET ${_NAME} OUTPUT _in_dll) else() absl_internal_dll_contains(TARGET ${ABSL_CC_LIB_NAME} OUTPUT _in_dll) endif() if (${_in_dll}) # This target should be replaced by the DLL set(_build_type "dll") set(ABSL_CC_LIB_IS_INTERFACE 1) else() # Building a DLL, but this target is not part of the DLL set(_build_type "dll_dep") endif() elseif(BUILD_SHARED_LIBS) set(_build_type "shared") else() set(_build_type "static") endif() # Generate a pkg-config file for every library: if((_build_type STREQUAL "static" OR _build_type STREQUAL "shared") AND ABSL_ENABLE_INSTALL) if(NOT ABSL_CC_LIB_TESTONLY) if(absl_VERSION) set(PC_VERSION "${absl_VERSION}") else() set(PC_VERSION "head") endif() foreach(dep ${ABSL_CC_LIB_DEPS}) if(${dep} MATCHES "^absl::(.*)") # Join deps with commas. if(PC_DEPS) set(PC_DEPS "${PC_DEPS},") endif() set(PC_DEPS "${PC_DEPS} absl_${CMAKE_MATCH_1} = ${PC_VERSION}") endif() endforeach() foreach(cflag ${ABSL_CC_LIB_COPTS}) if(${cflag} MATCHES "^(-Wno|/wd)") # These flags are needed to suppress warnings that might fire in our headers. set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") elseif(${cflag} MATCHES "^(-W|/w[1234eo])") # Don't impose our warnings on others. else() set(PC_CFLAGS "${PC_CFLAGS} ${cflag}") endif() endforeach() string(REPLACE ";" " " PC_LINKOPTS "${ABSL_CC_LIB_LINKOPTS}") FILE(GENERATE OUTPUT "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" CONTENT "\ prefix=${CMAKE_INSTALL_PREFIX}\n\ exec_prefix=\${prefix}\n\ libdir=${CMAKE_INSTALL_FULL_LIBDIR}\n\ includedir=${CMAKE_INSTALL_FULL_INCLUDEDIR}\n\ \n\ Name: absl_${_NAME}\n\ Description: Abseil ${_NAME} library\n\ URL: https://abseil.io/\n\ Version: ${PC_VERSION}\n\ Requires:${PC_DEPS}\n\ Libs: -L\${libdir} ${PC_LINKOPTS} $<$>:-labsl_${_NAME}>\n\ Cflags: -I\${includedir}${PC_CFLAGS}\n") INSTALL(FILES "${CMAKE_BINARY_DIR}/lib/pkgconfig/absl_${_NAME}.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") endif() endif() if(NOT ABSL_CC_LIB_IS_INTERFACE) if(_build_type STREQUAL "dll_dep") # This target depends on the DLL. When adding dependencies to this target, # any depended-on-target which is contained inside the DLL is replaced # with a dependency on the DLL. add_library(${_NAME} STATIC "") target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) absl_internal_dll_targets( DEPS ${ABSL_CC_LIB_DEPS} OUTPUT _dll_deps ) target_link_libraries(${_NAME} PUBLIC ${_dll_deps} PRIVATE ${ABSL_CC_LIB_LINKOPTS} ${ABSL_DEFAULT_LINKOPTS} ) if (ABSL_CC_LIB_TESTONLY) set(_gtest_link_define "GTEST_LINKED_AS_SHARED_LIBRARY=1") else() set(_gtest_link_define) endif() target_compile_definitions(${_NAME} PUBLIC ABSL_CONSUME_DLL "${_gtest_link_define}" ) elseif(_build_type STREQUAL "static" OR _build_type STREQUAL "shared") add_library(${_NAME} "") target_sources(${_NAME} PRIVATE ${ABSL_CC_LIB_SRCS} ${ABSL_CC_LIB_HDRS}) target_link_libraries(${_NAME} PUBLIC ${ABSL_CC_LIB_DEPS} PRIVATE ${ABSL_CC_LIB_LINKOPTS} ${ABSL_DEFAULT_LINKOPTS} ) else() message(FATAL_ERROR "Invalid build type: ${_build_type}") endif() # Linker language can be inferred from sources, but in the case of DLLs we # don't have any .cc files so it would be ambiguous. We could set it # explicitly only in the case of DLLs but, because "CXX" is always the # correct linker language for static or for shared libraries, we set it # unconditionally. set_property(TARGET ${_NAME} PROPERTY LINKER_LANGUAGE "CXX") target_include_directories(${_NAME} PUBLIC "$" $ ) target_compile_options(${_NAME} PRIVATE ${ABSL_CC_LIB_COPTS}) target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_LIB_DEFINES}) # Add all Abseil targets to a a folder in the IDE for organization. if(ABSL_CC_LIB_PUBLIC) set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}) elseif(ABSL_CC_LIB_TESTONLY) set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) else() set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/internal) endif() if(ABSL_PROPAGATE_CXX_STD) # Abseil libraries require C++11 as the current minimum standard. # Top-level application CMake projects should ensure a consistent C++ # standard for all compiled sources by setting CMAKE_CXX_STANDARD. target_compile_features(${_NAME} PUBLIC cxx_std_11) else() # Note: This is legacy (before CMake 3.8) behavior. Setting the # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is # initialized by CMAKE_CXX_STANDARD) should have no real effect, since # that is the default value anyway. # # CXX_STANDARD_REQUIRED does guard against the top-level CMake project # not having enabled CMAKE_CXX_STANDARD_REQUIRED (which prevents # "decaying" to an older standard if the requested one isn't available). set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) endif() # When being installed, we lose the absl_ prefix. We want to put it back # to have properly named lib files. This is a no-op when we are not being # installed. if(ABSL_ENABLE_INSTALL) set_target_properties(${_NAME} PROPERTIES OUTPUT_NAME "absl_${_NAME}" SOVERSION "2206.0.0" ) endif() else() # Generating header-only library add_library(${_NAME} INTERFACE) target_include_directories(${_NAME} INTERFACE "$" $ ) if (_build_type STREQUAL "dll") set(ABSL_CC_LIB_DEPS abseil_dll) endif() target_link_libraries(${_NAME} INTERFACE ${ABSL_CC_LIB_DEPS} ${ABSL_CC_LIB_LINKOPTS} ${ABSL_DEFAULT_LINKOPTS} ) target_compile_definitions(${_NAME} INTERFACE ${ABSL_CC_LIB_DEFINES}) if(ABSL_PROPAGATE_CXX_STD) # Abseil libraries require C++11 as the current minimum standard. # Top-level application CMake projects should ensure a consistent C++ # standard for all compiled sources by setting CMAKE_CXX_STANDARD. target_compile_features(${_NAME} INTERFACE cxx_std_11) # (INTERFACE libraries can't have the CXX_STANDARD property set, so there # is no legacy behavior else case). endif() endif() # TODO currently we don't install googletest alongside abseil sources, so # installed abseil can't be tested. if(NOT ABSL_CC_LIB_TESTONLY AND ABSL_ENABLE_INSTALL) install(TARGETS ${_NAME} EXPORT ${PROJECT_NAME}Targets RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} ) endif() add_library(absl::${ABSL_CC_LIB_NAME} ALIAS ${_NAME}) endfunction() # absl_cc_test() # # CMake function to imitate Bazel's cc_test rule. # # Parameters: # NAME: name of target (see Usage below) # SRCS: List of source files for the binary # DEPS: List of other libraries to be linked in to the binary targets # COPTS: List of private compile options # DEFINES: List of public defines # LINKOPTS: List of link options # # Note: # By default, absl_cc_test will always create a binary named absl_${NAME}. # This will also add it to ctest list as absl_${NAME}. # # Usage: # absl_cc_library( # NAME # awesome # HDRS # "a.h" # SRCS # "a.cc" # PUBLIC # ) # # absl_cc_test( # NAME # awesome_test # SRCS # "awesome_test.cc" # DEPS # absl::awesome # GTest::gmock # GTest::gtest_main # ) function(absl_cc_test) if(NOT (BUILD_TESTING AND ABSL_BUILD_TESTING)) return() endif() cmake_parse_arguments(ABSL_CC_TEST "" "NAME" "SRCS;COPTS;DEFINES;LINKOPTS;DEPS" ${ARGN} ) set(_NAME "absl_${ABSL_CC_TEST_NAME}") add_executable(${_NAME} "") target_sources(${_NAME} PRIVATE ${ABSL_CC_TEST_SRCS}) target_include_directories(${_NAME} PUBLIC ${ABSL_COMMON_INCLUDE_DIRS} PRIVATE ${GMOCK_INCLUDE_DIRS} ${GTEST_INCLUDE_DIRS} ) if (${ABSL_BUILD_DLL}) target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_TEST_DEFINES} ABSL_CONSUME_DLL GTEST_LINKED_AS_SHARED_LIBRARY=1 ) # Replace dependencies on targets inside the DLL with abseil_dll itself. absl_internal_dll_targets( DEPS ${ABSL_CC_TEST_DEPS} OUTPUT ABSL_CC_TEST_DEPS ) else() target_compile_definitions(${_NAME} PUBLIC ${ABSL_CC_TEST_DEFINES} ) endif() target_compile_options(${_NAME} PRIVATE ${ABSL_CC_TEST_COPTS} ) target_link_libraries(${_NAME} PUBLIC ${ABSL_CC_TEST_DEPS} PRIVATE ${ABSL_CC_TEST_LINKOPTS} ) # Add all Abseil targets to a folder in the IDE for organization. set_property(TARGET ${_NAME} PROPERTY FOLDER ${ABSL_IDE_FOLDER}/test) if(ABSL_PROPAGATE_CXX_STD) # Abseil libraries require C++11 as the current minimum standard. # Top-level application CMake projects should ensure a consistent C++ # standard for all compiled sources by setting CMAKE_CXX_STANDARD. target_compile_features(${_NAME} PUBLIC cxx_std_11) else() # Note: This is legacy (before CMake 3.8) behavior. Setting the # target-level CXX_STANDARD property to ABSL_CXX_STANDARD (which is # initialized by CMAKE_CXX_STANDARD) should have no real effect, since # that is the default value anyway. # # CXX_STANDARD_REQUIRED does guard against the top-level CMake project # not having enabled CMAKE_CXX_STANDARD_REQUIRED (which prevents # "decaying" to an older standard if the requested one isn't available). set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD ${ABSL_CXX_STANDARD}) set_property(TARGET ${_NAME} PROPERTY CXX_STANDARD_REQUIRED ON) endif() add_test(NAME ${_NAME} COMMAND ${_NAME}) endfunction() function(check_target my_target) if(NOT TARGET ${my_target}) message(FATAL_ERROR " ABSL: compiling absl requires a ${my_target} CMake target in your project, see CMake/README.md for more details") endif(NOT TARGET ${my_target}) endfunction() abseil-20220623.1/CMake/Googletest/000077500000000000000000000000001430371345100164475ustar00rootroot00000000000000abseil-20220623.1/CMake/Googletest/CMakeLists.txt.in000066400000000000000000000005731430371345100216210ustar00rootroot00000000000000cmake_minimum_required(VERSION 2.8.2) project(googletest-external NONE) include(ExternalProject) ExternalProject_Add(googletest URL "${absl_gtest_download_url}" # May be empty SOURCE_DIR "${absl_gtest_src_dir}" BINARY_DIR "${absl_gtest_build_dir}" CONFIGURE_COMMAND "" BUILD_COMMAND "" INSTALL_COMMAND "" TEST_COMMAND "" ) abseil-20220623.1/CMake/Googletest/DownloadGTest.cmake000066400000000000000000000031601430371345100221670ustar00rootroot00000000000000# Integrates googletest at configure time. Based on the instructions at # https://github.com/google/googletest/tree/master/googletest#incorporating-into-an-existing-cmake-project # Set up the external googletest project, downloading the latest from Github # master if requested. configure_file( ${CMAKE_CURRENT_LIST_DIR}/CMakeLists.txt.in ${CMAKE_BINARY_DIR}/googletest-external/CMakeLists.txt ) set(ABSL_SAVE_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS}) set(ABSL_SAVE_CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) if (BUILD_SHARED_LIBS) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DGTEST_CREATE_SHARED_LIBRARY=1") endif() # Configure and build the googletest source. execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" . RESULT_VARIABLE result WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-external ) if(result) message(FATAL_ERROR "CMake step for googletest failed: ${result}") endif() execute_process(COMMAND ${CMAKE_COMMAND} --build . RESULT_VARIABLE result WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/googletest-external) if(result) message(FATAL_ERROR "Build step for googletest failed: ${result}") endif() set(CMAKE_CXX_FLAGS ${ABSL_SAVE_CMAKE_CXX_FLAGS}) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${ABSL_SAVE_CMAKE_RUNTIME_OUTPUT_DIRECTORY}) # Prevent overriding the parent project's compiler/linker settings on Windows set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) # Add googletest directly to our build. This defines the gtest and gtest_main # targets. add_subdirectory(${absl_gtest_src_dir} ${absl_gtest_build_dir} EXCLUDE_FROM_ALL) abseil-20220623.1/CMake/README.md000066400000000000000000000157311430371345100156210ustar00rootroot00000000000000# Abseil CMake Build Instructions Abseil comes with a CMake build script ([CMakeLists.txt](../CMakeLists.txt)) that can be used on a wide range of platforms ("C" stands for cross-platform.). If you don't have CMake installed already, you can download it for free from . CMake works by generating native makefiles or build projects that can be used in the compiler environment of your choice. For API/ABI compatibility reasons, we strongly recommend building Abseil in a subdirectory of your project or as an embedded dependency. ## Incorporating Abseil Into a CMake Project The recommendations below are similar to those for using CMake within the googletest framework () ### Step-by-Step Instructions 1. If you want to build the Abseil tests, integrate the Abseil dependency [Google Test](https://github.com/google/googletest) into your CMake project. To disable Abseil tests, you have to pass either `-DBUILD_TESTING=OFF` or `-DABSL_BUILD_TESTING=OFF` when configuring your project with CMake. 2. Download Abseil and copy it into a subdirectory in your CMake project or add Abseil as a [git submodule](https://git-scm.com/docs/git-submodule) in your CMake project. 3. You can then use the CMake command [`add_subdirectory()`](https://cmake.org/cmake/help/latest/command/add_subdirectory.html) to include Abseil directly in your CMake project. 4. Add the **absl::** target you wish to use to the [`target_link_libraries()`](https://cmake.org/cmake/help/latest/command/target_link_libraries.html) section of your executable or of your library.
Here is a short CMakeLists.txt example of an application project using Abseil. ```cmake cmake_minimum_required(VERSION 3.8.2) project(my_app_project) # Pick the C++ standard to compile with. # Abseil currently supports C++11, C++14, and C++17. set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD_REQUIRED ON) add_subdirectory(abseil-cpp) add_executable(my_exe source.cpp) target_link_libraries(my_exe absl::base absl::synchronization absl::strings) ``` Note that if you are developing a library designed for use by other clients, you should instead leave `CMAKE_CXX_STANDARD` unset (or only set if being built as the current top-level CMake project) and configure the minimum required C++ standard at the target level. If you require a later minimum C++ standard than Abseil does, it's a good idea to also enforce that `CMAKE_CXX_STANDARD` (which will control Abseil library targets) is set to at least that minimum. For example: ```cmake cmake_minimum_required(VERSION 3.8.2) project(my_lib_project) # Leave C++ standard up to the root application, so set it only if this is the # current top-level CMake project. if(CMAKE_SOURCE_DIR STREQUAL my_lib_project_SOURCE_DIR) set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) endif() add_subdirectory(abseil-cpp) add_library(my_lib source.cpp) target_link_libraries(my_lib absl::base absl::synchronization absl::strings) # Enforce that my_lib requires C++17. Important to document for clients that they # must set CMAKE_CXX_STANDARD to 17 or higher for proper Abseil ABI compatibility # (since otherwise, Abseil library targets could be compiled with a lower C++ # standard than my_lib). target_compile_features(my_lib PUBLIC cxx_std_17) if(CMAKE_CXX_STANDARD LESS 17) message(FATAL_ERROR "my_lib_project requires CMAKE_CXX_STANDARD >= 17 (got: ${CMAKE_CXX_STANDARD})") endif() ``` Then the top-level application project that uses your library is responsible for setting a consistent `CMAKE_CXX_STANDARD` that is sufficiently high. ### Running Abseil Tests with CMake Use the `-DABSL_BUILD_TESTING=ON` flag to run Abseil tests. Note that BUILD_TESTING must also be on (the default). You will need to provide Abseil with a Googletest dependency. There are two options for how to do this: * Use `-DABSL_USE_GOOGLETEST_HEAD`. This will automatically download the latest Googletest source into the build directory at configure time. Googletest will then be compiled directly alongside Abseil's tests. * Manually integrate Googletest with your build. See https://github.com/google/googletest/blob/master/googletest/README.md#using-cmake for more information on using Googletest in a CMake project. For example, to run just the Abseil tests, you could use this script: ``` cd path/to/abseil-cpp mkdir build cd build cmake -DABSL_BUILD_TESTING=ON -DABSL_USE_GOOGLETEST_HEAD=ON .. make -j ctest ``` Currently, we only run our tests with CMake in a Linux environment, but we are working on the rest of our supported platforms. See https://github.com/abseil/abseil-cpp/projects/1 and https://github.com/abseil/abseil-cpp/issues/109 for more information. ### Available Abseil CMake Public Targets Here's a non-exhaustive list of Abseil CMake public targets: ```cmake absl::algorithm absl::base absl::debugging absl::flat_hash_map absl::flags absl::memory absl::meta absl::numeric absl::random_random absl::strings absl::synchronization absl::time absl::utility ``` ## Traditional CMake Set-Up For larger projects, it may make sense to use the traditional CMake set-up where you build and install projects separately. First, you'd need to build and install Google Test: ``` cmake -S /source/googletest -B /build/googletest -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/installation/dir -DBUILD_GMOCK=ON cmake --build /build/googletest --target install ``` Then you need to configure and build Abseil. Make sure you enable `ABSL_USE_EXTERNAL_GOOGLETEST` and `ABSL_FIND_GOOGLETEST`. You also need to enable `ABSL_ENABLE_INSTALL` so that you can install Abseil itself. ``` cmake -S /source/abseil-cpp -B /build/abseil-cpp -DCMAKE_PREFIX_PATH=/installation/dir -DCMAKE_INSTALL_PREFIX=/installation/dir -DABSL_ENABLE_INSTALL=ON -DABSL_USE_EXTERNAL_GOOGLETEST=ON -DABSL_FIND_GOOGLETEST=ON cmake --build /temporary/build/abseil-cpp ``` (`CMAKE_PREFIX_PATH` is where you already have Google Test installed; `CMAKE_INSTALL_PREFIX` is where you want to have Abseil installed; they can be different.) Run the tests: ``` ctest --test-dir /temporary/build/abseil-cpp ``` And finally install: ``` cmake --build /temporary/build/abseil-cpp --target install ``` # CMake Option Synposis ## Enable Standard CMake Installation `-DABSL_ENABLE_INSTALL=ON` ## Google Test Options `-DABSL_BUILD_TESTING=ON` must be set to enable testing - Have Abseil download and build Google Test for you: `-DABSL_USE_EXTERNAL_GOOGLETEST=OFF` (default) - Download and build latest Google Test: `-DABSL_USE_GOOGLETEST_HEAD=ON` - Download specific Google Test version (ZIP archive): `-DABSL_GOOGLETEST_DOWNLOAD_URL=https://.../version.zip` - Use Google Test from specific local directory: `-DABSL_LOCAL_GOOGLETEST_DIR=/path/to/googletest` - Use Google Test included elsewhere in your project: `-DABSL_USE_EXTERNAL_GOOGLETEST=ON` - Use standard CMake `find_package(CTest)` to find installed Google Test: `-DABSL_USE_EXTERNAL_GOOGLETEST=ON -DABSL_FIND_GOOGLETEST=ON` abseil-20220623.1/CMake/abslConfig.cmake.in000066400000000000000000000002601430371345100200070ustar00rootroot00000000000000# absl CMake configuration file. include(CMakeFindDependencyMacro) find_dependency(Threads) @PACKAGE_INIT@ include ("${CMAKE_CURRENT_LIST_DIR}/@PROJECT_NAME@Targets.cmake") abseil-20220623.1/CMake/install_test_project/000077500000000000000000000000001430371345100205665ustar00rootroot00000000000000abseil-20220623.1/CMake/install_test_project/CMakeLists.txt000066400000000000000000000015011430371345100233230ustar00rootroot00000000000000# # Copyright 2019 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # A simple CMakeLists.txt for testing cmake installation cmake_minimum_required(VERSION 3.5) project(absl_cmake_testing CXX) add_executable(simple simple.cc) find_package(absl REQUIRED) target_link_libraries(simple absl::strings absl::config) abseil-20220623.1/CMake/install_test_project/simple.cc000066400000000000000000000021311430371345100223630ustar00rootroot00000000000000// // Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include "absl/base/config.h" #include "absl/strings/substitute.h" #if !defined(ABSL_LTS_RELEASE_VERSION) || ABSL_LTS_RELEASE_VERSION != 99998877 #error ABSL_LTS_RELEASE_VERSION is not set correctly. #endif #if !defined(ABSL_LTS_RELEASE_PATCH_LEVEL) || ABSL_LTS_RELEASE_PATCH_LEVEL != 0 #error ABSL_LTS_RELEASE_PATCH_LEVEL is not set correctly. #endif int main(int argc, char** argv) { for (int i = 0; i < argc; ++i) { std::cout << absl::Substitute("Arg $0: $1\n", i, argv[i]); } } abseil-20220623.1/CMake/install_test_project/test.sh000077500000000000000000000056371430371345100221170ustar00rootroot00000000000000#!/bin/bash # # Copyright 2019 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Unit and integration tests for Abseil LTS CMake installation # Fail on any error. Treat unset variables an error. Print commands as executed. set -euox pipefail absl_dir=/abseil-cpp absl_build_dir=/buildfs googletest_builddir=/googletest_builddir project_dir="${absl_dir}"/CMake/install_test_project project_build_dir=/buildfs/project-build build_shared_libs="OFF" if [ "${LINK_TYPE:-}" = "DYNAMIC" ]; then build_shared_libs="ON" fi # Build and install GoogleTest mkdir "${googletest_builddir}" pushd "${googletest_builddir}" curl -L "${ABSL_GOOGLETEST_DOWNLOAD_URL}" --output "${ABSL_GOOGLETEST_COMMIT}".zip unzip "${ABSL_GOOGLETEST_COMMIT}".zip pushd "googletest-${ABSL_GOOGLETEST_COMMIT}" mkdir build pushd build cmake -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS="${build_shared_libs}" .. make -j $(nproc) make install ldconfig popd popd popd # Run the LTS transformations ./create_lts.py 99998877 # Build and install Abseil pushd "${absl_build_dir}" cmake "${absl_dir}" \ -DABSL_USE_EXTERNAL_GOOGLETEST=ON \ -DABSL_FIND_GOOGLETEST=ON \ -DCMAKE_BUILD_TYPE=Release \ -DABSL_BUILD_TESTING=ON \ -DBUILD_SHARED_LIBS="${build_shared_libs}" make -j $(nproc) ctest -j $(nproc) --output-on-failure make install ldconfig popd # Test the project against the installed Abseil mkdir -p "${project_build_dir}" pushd "${project_build_dir}" cmake "${project_dir}" cmake --build . --target simple output="$(${project_build_dir}/simple "printme" 2>&1)" if [[ "${output}" != *"Arg 1: printme"* ]]; then echo "Faulty output on simple project:" echo "${output}" exit 1 fi popd if ! grep absl::strings "/usr/local/lib/cmake/absl/abslTargets.cmake"; then cat "/usr/local/lib/cmake/absl/abslTargets.cmake" echo "CMake targets named incorrectly" exit 1 fi pushd "${HOME}" cat > hello-abseil.cc << EOF #include #include "absl/strings/str_format.h" int main(int argc, char **argv) { absl::PrintF("Hello Abseil!\n"); return EXIT_SUCCESS; } EOF if [ "${LINK_TYPE:-}" != "DYNAMIC" ]; then pc_args=($(pkg-config --cflags --libs --static absl_str_format)) g++ -static -o hello-abseil hello-abseil.cc "${pc_args[@]}" else pc_args=($(pkg-config --cflags --libs absl_str_format)) g++ -o hello-abseil hello-abseil.cc "${pc_args[@]}" fi hello="$(./hello-abseil)" [[ "${hello}" == "Hello Abseil!" ]] popd echo "Install test complete!" exit 0 abseil-20220623.1/CMakeLists.txt000066400000000000000000000164151430371345100161220ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Most widely used distributions have cmake 3.5 or greater available as of March # 2019. A notable exception is RHEL-7 (CentOS7). You can install a current # version of CMake by first installing Extra Packages for Enterprise Linux # (https://fedoraproject.org/wiki/EPEL#Extra_Packages_for_Enterprise_Linux_.28EPEL.29) # and then issuing `yum install cmake3` on the command line. cmake_minimum_required(VERSION 3.5) # Compiler id for Apple Clang is now AppleClang. if (POLICY CMP0025) cmake_policy(SET CMP0025 NEW) endif (POLICY CMP0025) # if command can use IN_LIST if (POLICY CMP0057) cmake_policy(SET CMP0057 NEW) endif (POLICY CMP0057) # Project version variables are the empty string if version is unspecified if (POLICY CMP0048) cmake_policy(SET CMP0048 NEW) endif (POLICY CMP0048) # option() honor variables if (POLICY CMP0077) cmake_policy(SET CMP0077 NEW) endif (POLICY CMP0077) # Allow the user to specify the MSVC runtime if (POLICY CMP0091) cmake_policy(SET CMP0091 NEW) endif (POLICY CMP0091) project(absl LANGUAGES CXX VERSION 20220623) include(CTest) # Output directory is correct by default for most build setups. However, when # building Abseil as a DLL, it is important to have the DLL in the same # directory as the executable using it. Thus, we put all executables in a single # /bin directory. set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) # when absl is included as subproject (i.e. using add_subdirectory(abseil-cpp)) # in the source tree of a project that uses it, install rules are disabled. if(NOT CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) option(ABSL_ENABLE_INSTALL "Enable install rule" OFF) else() option(ABSL_ENABLE_INSTALL "Enable install rule" ON) endif() option(ABSL_PROPAGATE_CXX_STD "Use CMake C++ standard meta features (e.g. cxx_std_11) that propagate to targets that link to Abseil" OFF) # TODO: Default to ON for CMake 3.8 and greater. if((${CMAKE_VERSION} VERSION_GREATER_EQUAL 3.8) AND (NOT ABSL_PROPAGATE_CXX_STD)) message(WARNING "A future Abseil release will default ABSL_PROPAGATE_CXX_STD to ON for CMake 3.8 and up. We recommend enabling this option to ensure your project still builds correctly.") endif() list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}/CMake ${CMAKE_CURRENT_LIST_DIR}/absl/copts ) include(CMakePackageConfigHelpers) include(GNUInstallDirs) include(AbseilDll) include(AbseilHelpers) ## ## Using absl targets ## ## all public absl targets are ## exported with the absl:: prefix ## ## e.g absl::base absl::synchronization absl::strings .... ## ## DO NOT rely on the internal targets outside of the prefix # include current path list(APPEND ABSL_COMMON_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}) if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") set(ABSL_USING_CLANG ON) else() set(ABSL_USING_CLANG OFF) endif() # find dependencies ## pthread find_package(Threads REQUIRED) include(CMakeDependentOption) option(ABSL_BUILD_TESTING "If ON, Abseil will build all of Abseil's own tests." OFF) option(ABSL_USE_EXTERNAL_GOOGLETEST "If ON, Abseil will assume that the targets for GoogleTest are already provided by the including project. This makes sense when Abseil is used with add_subdirectory." OFF) cmake_dependent_option(ABSL_FIND_GOOGLETEST "If ON, Abseil will use find_package(GTest) rather than assuming that GoogleTest is already provided by the including project." ON "ABSL_USE_EXTERNAL_GOOGLETEST" OFF) option(ABSL_USE_GOOGLETEST_HEAD "If ON, abseil will download HEAD from GoogleTest at config time." OFF) set(ABSL_GOOGLETEST_DOWNLOAD_URL "" CACHE STRING "If set, download GoogleTest from this URL") set(ABSL_LOCAL_GOOGLETEST_DIR "/usr/src/googletest" CACHE PATH "If ABSL_USE_GOOGLETEST_HEAD is OFF and ABSL_GOOGLETEST_URL is not set, specifies the directory of a local GoogleTest checkout." ) if(BUILD_TESTING AND ABSL_BUILD_TESTING) ## check targets if (ABSL_USE_EXTERNAL_GOOGLETEST) if (ABSL_FIND_GOOGLETEST) find_package(GTest REQUIRED) elseif(NOT TARGET GTest::gtest) if(TARGET gtest) # When Google Test is included directly rather than through find_package, the aliases are missing. add_library(GTest::gtest ALIAS gtest) add_library(GTest::gtest_main ALIAS gtest_main) add_library(GTest::gmock ALIAS gmock) add_library(GTest::gmock_main ALIAS gmock_main) else() message(FATAL_ERROR "ABSL_USE_EXTERNAL_GOOGLETEST is ON and ABSL_FIND_GOOGLETEST is OFF, which means that the top-level project must build the Google Test project. However, the target gtest was not found.") endif() endif() else() set(absl_gtest_build_dir ${CMAKE_BINARY_DIR}/googletest-build) if(ABSL_USE_GOOGLETEST_HEAD AND ABSL_GOOGLETEST_DOWNLOAD_URL) message(FATAL_ERROR "Do not set both ABSL_USE_GOOGLETEST_HEAD and ABSL_GOOGLETEST_DOWNLOAD_URL") endif() if(ABSL_USE_GOOGLETEST_HEAD) set(absl_gtest_download_url "https://github.com/google/googletest/archive/main.zip") elseif(ABSL_GOOGLETEST_DOWNLOAD_URL) set(absl_gtest_download_url ${ABSL_GOOGLETEST_DOWNLOAD_URL}) endif() if(absl_gtest_download_url) set(absl_gtest_src_dir ${CMAKE_BINARY_DIR}/googletest-src) else() set(absl_gtest_src_dir ${ABSL_LOCAL_GOOGLETEST_DIR}) endif() include(CMake/Googletest/DownloadGTest.cmake) endif() check_target(GTest::gtest) check_target(GTest::gtest_main) check_target(GTest::gmock) check_target(GTest::gmock_main) endif() add_subdirectory(absl) if(ABSL_ENABLE_INSTALL) # install as a subdirectory only install(EXPORT ${PROJECT_NAME}Targets NAMESPACE absl:: DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" ) configure_package_config_file( CMake/abslConfig.cmake.in "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" ) install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" ) # Abseil only has a version in LTS releases. This mechanism is accomplished # Abseil's internal Copybara (https://github.com/google/copybara) workflows and # isn't visible in the CMake buildsystem itself. if(absl_VERSION) write_basic_package_version_file( "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" COMPATIBILITY ExactVersion ) install(FILES "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake" DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}" ) endif() # absl_VERSION install(DIRECTORY absl DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} FILES_MATCHING PATTERN "*.inc" PATTERN "*.h" PATTERN "copts" EXCLUDE PATTERN "testdata" EXCLUDE ) endif() # ABSL_ENABLE_INSTALL abseil-20220623.1/CONTRIBUTING.md000066400000000000000000000153171430371345100156130ustar00rootroot00000000000000# How to Contribute to Abseil We'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow. NOTE: If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) ## Contributor License Agreement Contributions to this project must be accompanied by a Contributor License Agreement. You (or your employer) retain the copyright to your contribution, this simply gives us permission to use and redistribute your contributions as part of the project. Head over to to see your current agreements on file or to sign a new one. You generally only need to submit a CLA once, so if you've already submitted one (even if it was for a different project), you probably don't need to do it again. ## Contribution Guidelines Potential contributors sometimes ask us if the Abseil project is the appropriate home for their utility library code or for specific functions implementing missing portions of the standard. Often, the answer to this question is "no". We’d like to articulate our thinking on this issue so that our choices can be understood by everyone and so that contributors can have a better intuition about whether Abseil might be interested in adopting a new library. ### Priorities Although our mission is to augment the C++ standard library, our goal is not to provide a full forward-compatible implementation of the latest standard. For us to consider a library for inclusion in Abseil, it is not enough that a library is useful. We generally choose to release a library when it meets at least one of the following criteria: * **Widespread usage** - Using our internal codebase to help gauge usage, most of the libraries we've released have tens of thousands of users. * **Anticipated widespread usage** - Pre-adoption of some standard-compliant APIs may not have broad adoption initially but can be expected to pick up usage when it replaces legacy APIs. `absl::from_chars`, for example, replaces existing code that converts strings to numbers and will therefore likely see usage growth. * **High impact** - APIs that provide a key solution to a specific problem, such as `absl::FixedArray`, have higher impact than usage numbers may signal and are released because of their importance. * **Direct support for a library that falls under one of the above** - When we want access to a smaller library as an implementation detail for a higher-priority library we plan to release, we may release it, as we did with portions of `absl/meta/type_traits.h`. One consequence of this is that the presence of a library in Abseil does not necessarily mean that other similar libraries would be a high priority. ### API Freeze Consequences Via the [Abseil Compatibility Guidelines](https://abseil.io/about/compatibility), we have promised a large degree of API stability. In particular, we will not make backward-incompatible changes to released APIs without also shipping a tool or process that can upgrade our users' code. We are not yet at the point of easily releasing such tools. Therefore, at this time, shipping a library establishes an API contract which is borderline unchangeable. (We can add new functionality, but we cannot easily change existing behavior.) This constraint forces us to very carefully review all APIs that we ship. ## Coding Style To keep the source consistent, readable, diffable and easy to merge, we use a fairly rigid coding style, as defined by the [google-styleguide](https://github.com/google/styleguide) project. All patches will be expected to conform to the style outlined [here](https://google.github.io/styleguide/cppguide.html). ## Guidelines for Pull Requests * If you are a Googler, it is preferable to first create an internal CL and have it reviewed and submitted. The code propagation process will deliver the change to GitHub. * Create **small PRs** that are narrowly focused on **addressing a single concern**. We often receive PRs that are trying to fix several things at a time, but if only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. * For speculative changes, consider opening an [Abseil issue](https://github.com/abseil/abseil-cpp/issues) and discussing it first. If you are suggesting a behavioral or API change, consider starting with an [Abseil proposal template](ABSEIL_ISSUE_TEMPLATE.md). * Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a GitHub issue if it exists. * Don't fix code style and formatting unless you are already changing that line to address an issue. Formatting of modified lines may be done using `git clang-format`. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. * Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. * Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). * Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). * **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** (see below) * Exceptions to the rules can be made if there's a compelling reason for doing so. That is - the rules are here to serve us, not the other way around, and the rules need to be serving their intended purpose to be valuable. * All submissions, including submissions by project members, require review. ## Running Tests If you have [Bazel](https://bazel.build/) installed, use `bazel test --test_tag_filters="-benchmark" ...` to run the unit tests. If you are running the Linux operating system and have [Docker](https://www.docker.com/) installed, you can also run the `linux_*.sh` scripts under the `ci/`(https://github.com/abseil/abseil-cpp/tree/master/ci) directory to test Abseil under a variety of conditions. ## Abseil Committers The current members of the Abseil engineering team are the only committers at present. ## Release Process Abseil lives at head, where latest-and-greatest code can be found. abseil-20220623.1/FAQ.md000066400000000000000000000215121430371345100143050ustar00rootroot00000000000000# Abseil FAQ ## Is Abseil the right home for my utility library? Most often the answer to the question is "no." As both the [About Abseil](https://abseil.io/about/) page and our [contributing guidelines](https://github.com/abseil/abseil-cpp/blob/master/CONTRIBUTING.md#contribution-guidelines) explain, Abseil contains a variety of core C++ library code that is widely used at [Google](https://www.google.com/). As such, Abseil's primary purpose is to be used as a dependency by Google's open source C++ projects. While we do hope that Abseil is also useful to the C++ community at large, this added constraint also means that we are unlikely to accept a contribution of utility code that isn't already widely used by Google. ## How to I set the C++ dialect used to build Abseil? The short answer is that whatever mechanism you choose, you need to make sure that you set this option consistently at the global level for your entire project. If, for example, you want to set the C++ dialect to C++17, with [Bazel](https://bazel/build/) as the build system and `gcc` or `clang` as the compiler, there several ways to do this: * Pass `--cxxopt=-std=c++17` on the command line (for example, `bazel build --cxxopt=-std=c++17 ...`) * Set the environment variable `BAZEL_CXXOPTS` (for example, `BAZEL_CXXOPTS=-std=c++17`) * Add `build --cxxopt=-std=c++17` to your [`.bazelrc` file](https://docs.bazel.build/versions/master/guide.html#bazelrc) If you are using CMake as the build system, you'll need to add a line like `set(CMAKE_CXX_STANDARD 17)` to your top level `CMakeLists.txt` file. If you are developing a library designed to be used by other clients, you should instead leave `CMAKE_CXX_STANDARD` unset and configure the minimum C++ standard required by each of your library targets via `target_compile_features`. See the [CMake build instructions](https://github.com/abseil/abseil-cpp/blob/master/CMake/README.md) for more information. For a longer answer to this question and to understand why some other approaches don't work, see the answer to ["What is ABI and why don't you recommend using a pre-compiled version of Abseil?"](#what-is-abi-and-why-dont-you-recommend-using-a-pre-compiled-version-of-abseil) ## What is ABI and why don't you recommend using a pre-compiled version of Abseil? For the purposes of this discussion, you can think of [ABI](https://en.wikipedia.org/wiki/Application_binary_interface) as the compiled representation of the interfaces in code. This is in contrast to [API](https://en.wikipedia.org/wiki/Application_programming_interface), which you can think of as the interfaces as defined by the code itself. [Abseil has a strong promise of API compatibility, but does not make any promise of ABI compatibility](https://abseil.io/about/compatibility). Let's take a look at what this means in practice. You might be tempted to do something like this in a [Bazel](https://bazel.build/) `BUILD` file: ``` # DON'T DO THIS!!! cc_library( name = "my_library", srcs = ["my_library.cc"], copts = ["-std=c++17"], # May create a mixed-mode compile! deps = ["@com_google_absl//absl/strings"], ) ``` Applying `-std=c++17` to an individual target in your `BUILD` file is going to compile that specific target in C++17 mode, but it isn't going to ensure the Abseil library is built in C++17 mode, since the Abseil library itself is a different build target. If your code includes an Abseil header, then your program may contain conflicting definitions of the same class/function/variable/enum, etc. As a rule, all compile options that affect the ABI of a program need to be applied to the entire build on a global basis. C++ has something called the [One Definition Rule](https://en.wikipedia.org/wiki/One_Definition_Rule) (ODR). C++ doesn't allow multiple definitions of the same class/function/variable/enum, etc. ODR violations sometimes result in linker errors, but linkers do not always catch violations. Uncaught ODR violations can result in strange runtime behaviors or crashes that can be hard to debug. If you build the Abseil library and your code using different compile options that affect ABI, there is a good chance you will run afoul of the One Definition Rule. Examples of GCC compile options that affect ABI include (but aren't limited to) language dialect (e.g. `-std=`), optimization level (e.g. `-O2`), code generation flags (e.g. `-fexceptions`), and preprocessor defines (e.g. `-DNDEBUG`). If you use a pre-compiled version of Abseil, (for example, from your Linux distribution package manager or from something like [vcpkg](https://github.com/microsoft/vcpkg)) you have to be very careful to ensure ABI compatibility across the components of your program. The only way you can be sure your program is going to be correct regarding ABI is to ensure you've used the exact same compile options as were used to build the pre-compiled library. This does not mean that Abseil cannot work as part of a Linux distribution since a knowledgeable binary packager will have ensured that all packages have been built with consistent compile options. This is one of the reasons we warn against - though do not outright reject - using Abseil as a pre-compiled library. Another possible way that you might afoul of ABI issues is if you accidentally include two versions of Abseil in your program. Multiple versions of Abseil can end up within the same binary if your program uses the Abseil library and another library also transitively depends on Abseil (resulting in what is sometimes called the diamond dependency problem). In cases such as this you must structure your build so that all libraries use the same version of Abseil. [Abseil's strong promise of API compatibility between releases](https://abseil.io/about/compatibility) means the latest "HEAD" release of Abseil is almost certainly the right choice if you are doing as we recommend and building all of your code from source. For these reasons we recommend you avoid pre-compiled code and build the Abseil library yourself in a consistent manner with the rest of your code. ## What is "live at head" and how do I do it? From Abseil's point-of-view, "live at head" means that every Abseil source release (which happens on an almost daily basis) is either API compatible with the previous release, or comes with an automated tool that you can run over code to make it compatible. In practice, the need to use an automated tool is extremely rare. This means that upgrading from one source release to another should be a routine practice that can and should be performed often. We recommend you update to the [latest commit in the `master` branch of Abseil](https://github.com/abseil/abseil-cpp/commits/master) as often as possible. Not only will you pick up bug fixes more quickly, but if you have good automated testing, you will catch and be able to fix any [Hyrum's Law](https://www.hyrumslaw.com/) dependency problems on an incremental basis instead of being overwhelmed by them and having difficulty isolating them if you wait longer between updates. If you are using the [Bazel](https://bazel.build/) build system and its [external dependencies](https://docs.bazel.build/versions/master/external.html) feature, updating the [`http_archive`](https://docs.bazel.build/versions/master/repo/http.html#http_archive) rule in your [`WORKSPACE`](https://docs.bazel.build/versions/master/be/workspace.html) for `com_google_abseil` to point to the [latest commit in the `master` branch of Abseil](https://github.com/abseil/abseil-cpp/commits/master) is all you need to do. For example, on February 11, 2020, the latest commit to the master branch was `98eb410c93ad059f9bba1bf43f5bb916fc92a5ea`. To update to this commit, you would add the following snippet to your `WORKSPACE` file: ``` http_archive( name = "com_google_absl", urls = ["https://github.com/abseil/abseil-cpp/archive/98eb410c93ad059f9bba1bf43f5bb916fc92a5ea.zip"], # 2020-02-11T18:50:53Z strip_prefix = "abseil-cpp-98eb410c93ad059f9bba1bf43f5bb916fc92a5ea", sha256 = "aabf6c57e3834f8dc3873a927f37eaf69975d4b28117fc7427dfb1c661542a87", ) ``` To get the `sha256` of this URL, run `curl -sL --output - https://github.com/abseil/abseil-cpp/archive/98eb410c93ad059f9bba1bf43f5bb916fc92a5ea.zip | sha256sum -`. You can commit the updated `WORKSPACE` file to your source control every time you update, and if you have good automated testing, you might even consider automating this. One thing we don't recommend is using GitHub's `master.zip` files (for example [https://github.com/abseil/abseil-cpp/archive/master.zip](https://github.com/abseil/abseil-cpp/archive/master.zip)), which are always the latest commit in the `master` branch, to implement live at head. Since these `master.zip` URLs are not versioned, you will lose build reproducibility. In addition, some build systems, including Bazel, will simply cache this file, which means you won't actually be updating to the latest release until your cache is cleared or invalidated. abseil-20220623.1/LICENSE000066400000000000000000000261411430371345100143640ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. abseil-20220623.1/README.md000066400000000000000000000134751430371345100146440ustar00rootroot00000000000000# Abseil - C++ Common Libraries The repository contains the Abseil C++ library code. Abseil is an open-source collection of C++ code (compliant to C++11) designed to augment the C++ standard library. ## Table of Contents - [About Abseil](#about) - [Quickstart](#quickstart) - [Building Abseil](#build) - [Support](#support) - [Codemap](#codemap) - [Releases](#releases) - [License](#license) - [Links](#links) ## About Abseil Abseil is an open-source collection of C++ library code designed to augment the C++ standard library. The Abseil library code is collected from Google's own C++ code base, has been extensively tested and used in production, and is the same code we depend on in our daily coding lives. In some cases, Abseil provides pieces missing from the C++ standard; in others, Abseil provides alternatives to the standard for special needs we've found through usage in the Google code base. We denote those cases clearly within the library code we provide you. Abseil is not meant to be a competitor to the standard library; we've just found that many of these utilities serve a purpose within our code base, and we now want to provide those resources to the C++ community as a whole. ## Quickstart If you want to just get started, make sure you at least run through the [Abseil Quickstart](https://abseil.io/docs/cpp/quickstart). The Quickstart contains information about setting up your development environment, downloading the Abseil code, running tests, and getting a simple binary working. ## Building Abseil [Bazel](https://bazel.build) and [CMake](https://cmake.org/) are the official build systems for Abseil. See the [quickstart](https://abseil.io/docs/cpp/quickstart) for more information on building Abseil using the Bazel build system. If you require CMake support, please check the [CMake build instructions](CMake/README.md) and [CMake Quickstart](https://abseil.io/docs/cpp/quickstart-cmake). ## Support Abseil is officially supported on many platforms. See the [Abseil platform support guide](https://abseil.io/docs/cpp/platforms/platforms) for details on supported operating systems, compilers, CPUs, etc. ## Codemap Abseil contains the following C++ library components: * [`base`](absl/base/) Abseil Fundamentals
The `base` library contains initialization code and other code which all other Abseil code depends on. Code within `base` may not depend on any other code (other than the C++ standard library). * [`algorithm`](absl/algorithm/)
The `algorithm` library contains additions to the C++ `` library and container-based versions of such algorithms. * [`cleanup`](absl/cleanup/)
The `cleanup` library contains the control-flow-construct-like type `absl::Cleanup` which is used for executing a callback on scope exit. * [`container`](absl/container/)
The `container` library contains additional STL-style containers, including Abseil's unordered "Swiss table" containers. * [`debugging`](absl/debugging/)
The `debugging` library contains code useful for enabling leak checks, and stacktrace and symbolization utilities. * [`hash`](absl/hash/)
The `hash` library contains the hashing framework and default hash functor implementations for hashable types in Abseil. * [`memory`](absl/memory/)
The `memory` library contains C++11-compatible versions of `std::make_unique()` and related memory management facilities. * [`meta`](absl/meta/)
The `meta` library contains C++11-compatible versions of type checks available within C++14 and C++17 versions of the C++ `` library. * [`numeric`](absl/numeric/)
The `numeric` library contains C++11-compatible 128-bit integers. * [`profiling`](absl/profiling/)
The `profiling` library contains utility code for profiling C++ entities. It is currently a private dependency of other Abseil libraries. * [`status`](absl/status/)
The `status` contains abstractions for error handling, specifically `absl::Status` and `absl::StatusOr`. * [`strings`](absl/strings/)
The `strings` library contains a variety of strings routines and utilities, including a C++11-compatible version of the C++17 `std::string_view` type. * [`synchronization`](absl/synchronization/)
The `synchronization` library contains concurrency primitives (Abseil's `absl::Mutex` class, an alternative to `std::mutex`) and a variety of synchronization abstractions. * [`time`](absl/time/)
The `time` library contains abstractions for computing with absolute points in time, durations of time, and formatting and parsing time within time zones. * [`types`](absl/types/)
The `types` library contains non-container utility types, like a C++11-compatible version of the C++17 `std::optional` type. * [`utility`](absl/utility/)
The `utility` library contains utility and helper code. ## Releases Abseil recommends users "live-at-head" (update to the latest commit from the master branch as often as possible). However, we realize this philosophy doesn't work for every project, so we also provide [Long Term Support Releases](https://github.com/abseil/abseil-cpp/releases) to which we backport fixes for severe bugs. See our [release management](https://abseil.io/about/releases) document for more details. ## License The Abseil C++ library is licensed under the terms of the Apache license. See [LICENSE](LICENSE) for more information. ## Links For more information about Abseil: * Consult our [Abseil Introduction](https://abseil.io/about/intro) * Read [Why Adopt Abseil](https://abseil.io/about/philosophy) to understand our design philosophy. * Peruse our [Abseil Compatibility Guarantees](https://abseil.io/about/compatibility) to understand both what we promise to you, and what we expect of you in return. abseil-20220623.1/UPGRADES.md000066400000000000000000000012021430371345100151020ustar00rootroot00000000000000# C++ Upgrade Tools Abseil may occassionally release API-breaking changes. As noted in our [Compatibility Guidelines][compatibility-guide], we will aim to provide a tool to do the work of effecting such API-breaking changes, when absolutely necessary. These tools will be listed on the [C++ Upgrade Tools][upgrade-tools] guide on https://abseil.io. For more information, the [C++ Automated Upgrade Guide][api-upgrades-guide] outlines this process. [compatibility-guide]: https://abseil.io/about/compatibility [api-upgrades-guide]: https://abseil.io/docs/cpp/tools/api-upgrades [upgrade-tools]: https://abseil.io/docs/cpp/tools/upgrades/ abseil-20220623.1/WORKSPACE000066400000000000000000000047741430371345100146500ustar00rootroot00000000000000# # Copyright 2019 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # workspace(name = "com_google_absl") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") # GoogleTest/GoogleMock framework. Used by most unit-tests. http_archive( name = "com_google_googletest", sha256 = "ce7366fe57eb49928311189cb0e40e0a8bf3d3682fca89af30d884c25e983786", strip_prefix = "googletest-release-1.12.0", # Keep this URL in sync with ABSL_GOOGLETEST_COMMIT in ci/cmake_common.sh. urls = ["https://github.com/google/googletest/archive/refs/tags/release-1.12.0.zip"], ) # RE2 (the regular expression library used by GoogleTest) # Note this must use a commit from the `abseil` branch of the RE2 project. # https://github.com/google/re2/tree/abseil http_archive( name = "com_googlesource_code_re2", sha256 = "0a890c2aa0bb05b2ce906a15efb520d0f5ad4c7d37b8db959c43772802991887", strip_prefix = "re2-a427f10b9fb4622dd6d8643032600aa1b50fbd12", urls = ["https://github.com/google/re2/archive/a427f10b9fb4622dd6d8643032600aa1b50fbd12.zip"], # 2022-06-09 ) # Google benchmark. http_archive( name = "com_github_google_benchmark", # 2021-09-20T09:19:51Z sha256 = "62e2f2e6d8a744d67e4bbc212fcfd06647080de4253c97ad5c6749e09faf2cb0", strip_prefix = "benchmark-0baacde3618ca617da95375e0af13ce1baadea47", urls = ["https://github.com/google/benchmark/archive/0baacde3618ca617da95375e0af13ce1baadea47.zip"], ) # Bazel Skylib. http_archive( name = "bazel_skylib", urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz"], sha256 = "f7be3474d42aae265405a592bb7da8e171919d74c16f082a5457840f06054728", ) # Bazel platform rules. http_archive( name = "platforms", sha256 = "a879ea428c6d56ab0ec18224f976515948822451473a80d06c2e50af0bbe5121", strip_prefix = "platforms-da5541f26b7de1dc8e04c075c99df5351742a4a2", urls = ["https://github.com/bazelbuild/platforms/archive/da5541f26b7de1dc8e04c075c99df5351742a4a2.zip"], # 2022-05-27 ) abseil-20220623.1/absl/000077500000000000000000000000001430371345100142745ustar00rootroot00000000000000abseil-20220623.1/absl/BUILD.bazel000066400000000000000000000046311430371345100161560ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load("@bazel_skylib//lib:selects.bzl", "selects") package(default_visibility = ["//visibility:public"]) licenses(["notice"]) config_setting( name = "clang_compiler", flag_values = { "@bazel_tools//tools/cpp:compiler": "clang", }, visibility = [":__subpackages__"], ) config_setting( name = "msvc_compiler", flag_values = { "@bazel_tools//tools/cpp:compiler": "msvc-cl", }, visibility = [":__subpackages__"], ) config_setting( name = "clang-cl_compiler", flag_values = { "@bazel_tools//tools/cpp:compiler": "clang-cl", }, visibility = [":__subpackages__"], ) config_setting( name = "osx", constraint_values = [ "@platforms//os:osx", ], ) config_setting( name = "ios", constraint_values = [ "@platforms//os:ios", ], ) config_setting( name = "ppc", values = { "cpu": "ppc", }, visibility = [":__subpackages__"], ) config_setting( name = "cpu_wasm", values = { "cpu": "wasm", }, visibility = [":__subpackages__"], ) config_setting( name = "cpu_wasm32", values = { "cpu": "wasm32", }, visibility = [":__subpackages__"], ) config_setting( name = "platforms_wasm32", constraint_values = [ "@platforms//cpu:wasm32", ], visibility = [":__subpackages__"], ) config_setting( name = "platforms_wasm64", constraint_values = [ "@platforms//cpu:wasm64", ], visibility = [":__subpackages__"], ) selects.config_setting_group( name = "wasm", match_any = [ ":cpu_wasm", ":cpu_wasm32", ":platforms_wasm32", ":platforms_wasm64", ], visibility = [":__subpackages__"], ) config_setting( name = "fuchsia", values = { "cpu": "fuchsia", }, visibility = [":__subpackages__"], ) abseil-20220623.1/absl/CMakeLists.txt000066400000000000000000000021561430371345100170400ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # add_subdirectory(base) add_subdirectory(algorithm) add_subdirectory(cleanup) add_subdirectory(container) add_subdirectory(debugging) add_subdirectory(flags) add_subdirectory(functional) add_subdirectory(hash) add_subdirectory(memory) add_subdirectory(meta) add_subdirectory(numeric) add_subdirectory(profiling) add_subdirectory(random) add_subdirectory(status) add_subdirectory(strings) add_subdirectory(synchronization) add_subdirectory(time) add_subdirectory(types) add_subdirectory(utility) if (${ABSL_BUILD_DLL}) absl_make_dll() endif() abseil-20220623.1/absl/abseil.podspec.gen.py000077500000000000000000000163211430371345100203170ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- """This script generates abseil.podspec from all BUILD.bazel files. This is expected to run on abseil git repository with Bazel 1.0 on Linux. It recursively analyzes BUILD.bazel files using query command of Bazel to dump its build rules in XML format. From these rules, it constructs podspec structure. """ import argparse import collections import os import re import subprocess import xml.etree.ElementTree # Template of root podspec. SPEC_TEMPLATE = """ # This file has been automatically generated from a script. # Please make modifications to `abseil.podspec.gen.py` instead. Pod::Spec.new do |s| s.name = 'abseil' s.version = '${version}' s.summary = 'Abseil Common Libraries (C++) from Google' s.homepage = 'https://abseil.io' s.license = 'Apache License, Version 2.0' s.authors = { 'Abseil Team' => 'abseil-io@googlegroups.com' } s.source = { :git => 'https://github.com/abseil/abseil-cpp.git', :tag => '${tag}', } s.module_name = 'absl' s.header_mappings_dir = 'absl' s.header_dir = 'absl' s.libraries = 'c++' s.compiler_flags = '-Wno-everything' s.pod_target_xcconfig = { 'USER_HEADER_SEARCH_PATHS' => '$(inherited) "$(PODS_TARGET_SRCROOT)"', 'USE_HEADERMAP' => 'NO', 'ALWAYS_SEARCH_USER_PATHS' => 'NO', } s.ios.deployment_target = '9.0' s.osx.deployment_target = '10.10' s.tvos.deployment_target = '9.0' s.watchos.deployment_target = '2.0' """ # Rule object representing the rule of Bazel BUILD. Rule = collections.namedtuple( "Rule", "type name package srcs hdrs textual_hdrs deps visibility testonly") def get_elem_value(elem, name): """Returns the value of XML element with the given name.""" for child in elem: if child.attrib.get("name") != name: continue if child.tag == "string": return child.attrib.get("value") if child.tag == "boolean": return child.attrib.get("value") == "true" if child.tag == "list": return [nested_child.attrib.get("value") for nested_child in child] raise "Cannot recognize tag: " + child.tag return None def normalize_paths(paths): """Returns the list of normalized path.""" # e.g. ["//absl/strings:dir/header.h"] -> ["absl/strings/dir/header.h"] return [path.lstrip("/").replace(":", "/") for path in paths] def parse_rule(elem, package): """Returns a rule from bazel XML rule.""" return Rule( type=elem.attrib["class"], name=get_elem_value(elem, "name"), package=package, srcs=normalize_paths(get_elem_value(elem, "srcs") or []), hdrs=normalize_paths(get_elem_value(elem, "hdrs") or []), textual_hdrs=normalize_paths(get_elem_value(elem, "textual_hdrs") or []), deps=get_elem_value(elem, "deps") or [], visibility=get_elem_value(elem, "visibility") or [], testonly=get_elem_value(elem, "testonly") or False) def read_build(package): """Runs bazel query on given package file and returns all cc rules.""" result = subprocess.check_output( ["bazel", "query", package + ":all", "--output", "xml"]) root = xml.etree.ElementTree.fromstring(result) return [ parse_rule(elem, package) for elem in root if elem.tag == "rule" and elem.attrib["class"].startswith("cc_") ] def collect_rules(root_path): """Collects and returns all rules from root path recursively.""" rules = [] for cur, _, _ in os.walk(root_path): build_path = os.path.join(cur, "BUILD.bazel") if os.path.exists(build_path): rules.extend(read_build("//" + cur)) return rules def relevant_rule(rule): """Returns true if a given rule is relevant when generating a podspec.""" return ( # cc_library only (ignore cc_test, cc_binary) rule.type == "cc_library" and # ignore empty rule (rule.hdrs + rule.textual_hdrs + rule.srcs) and # ignore test-only rule not rule.testonly) def get_spec_var(depth): """Returns the name of variable for spec with given depth.""" return "s" if depth == 0 else "s{}".format(depth) def get_spec_name(label): """Converts the label of bazel rule to the name of podspec.""" assert label.startswith("//absl/"), "{} doesn't start with //absl/".format( label) # e.g. //absl/apple/banana -> abseil/apple/banana return "abseil/" + label[7:] def write_podspec(f, rules, args): """Writes a podspec from given rules and args.""" rule_dir = build_rule_directory(rules)["abseil"] # Write root part with given arguments spec = re.sub(r"\$\{(\w+)\}", lambda x: args[x.group(1)], SPEC_TEMPLATE).lstrip() f.write(spec) # Write all target rules write_podspec_map(f, rule_dir, 0) f.write("end\n") def build_rule_directory(rules): """Builds a tree-style rule directory from given rules.""" rule_dir = {} for rule in rules: cur = rule_dir for frag in get_spec_name(rule.package).split("/"): cur = cur.setdefault(frag, {}) cur[rule.name] = rule return rule_dir def write_podspec_map(f, cur_map, depth): """Writes podspec from rule map recursively.""" for key, value in sorted(cur_map.items()): indent = " " * (depth + 1) f.write("{indent}{var0}.subspec '{key}' do |{var1}|\n".format( indent=indent, key=key, var0=get_spec_var(depth), var1=get_spec_var(depth + 1))) if isinstance(value, dict): write_podspec_map(f, value, depth + 1) else: write_podspec_rule(f, value, depth + 1) f.write("{indent}end\n".format(indent=indent)) def write_podspec_rule(f, rule, depth): """Writes podspec from given rule.""" indent = " " * (depth + 1) spec_var = get_spec_var(depth) # Puts all files in hdrs, textual_hdrs, and srcs into source_files. # Since CocoaPods treats header_files a bit differently from bazel, # this won't generate a header_files field so that all source_files # are considered as header files. srcs = sorted(set(rule.hdrs + rule.textual_hdrs + rule.srcs)) write_indented_list( f, "{indent}{var}.source_files = ".format(indent=indent, var=spec_var), srcs) # Writes dependencies of this rule. for dep in sorted(rule.deps): name = get_spec_name(dep.replace(":", "/")) f.write("{indent}{var}.dependency '{dep}'\n".format( indent=indent, var=spec_var, dep=name)) def write_indented_list(f, leading, values): """Writes leading values in an indented style.""" f.write(leading) f.write((",\n" + " " * len(leading)).join("'{}'".format(v) for v in values)) f.write("\n") def generate(args): """Generates a podspec file from all BUILD files under absl directory.""" rules = filter(relevant_rule, collect_rules("absl")) with open(args.output, "wt") as f: write_podspec(f, rules, vars(args)) def main(): parser = argparse.ArgumentParser( description="Generates abseil.podspec from BUILD.bazel") parser.add_argument( "-v", "--version", help="The version of podspec", required=True) parser.add_argument( "-t", "--tag", default=None, help="The name of git tag (default: version)") parser.add_argument( "-o", "--output", default="abseil.podspec", help="The name of output file (default: abseil.podspec)") args = parser.parse_args() if args.tag is None: args.tag = args.version generate(args) if __name__ == "__main__": main() abseil-20220623.1/absl/algorithm/000077500000000000000000000000001430371345100162625ustar00rootroot00000000000000abseil-20220623.1/absl/algorithm/BUILD.bazel000066400000000000000000000042031430371345100201370ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", "ABSL_DEFAULT_LINKOPTS", "ABSL_TEST_COPTS", ) package(default_visibility = ["//visibility:public"]) licenses(["notice"]) cc_library( name = "algorithm", hdrs = ["algorithm.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:config", ], ) cc_test( name = "algorithm_test", size = "small", srcs = ["algorithm_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":algorithm", "//absl/base:config", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "algorithm_benchmark", srcs = ["equal_benchmark.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], deps = [ ":algorithm", "//absl/base:core_headers", "@com_github_google_benchmark//:benchmark_main", ], ) cc_library( name = "container", hdrs = [ "container.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":algorithm", "//absl/base:core_headers", "//absl/meta:type_traits", ], ) cc_test( name = "container_test", srcs = ["container_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":container", "//absl/base", "//absl/base:core_headers", "//absl/memory", "//absl/types:span", "@com_google_googletest//:gtest_main", ], ) abseil-20220623.1/absl/algorithm/CMakeLists.txt000066400000000000000000000024371430371345100210300ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # absl_cc_library( NAME algorithm HDRS "algorithm.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config PUBLIC ) absl_cc_test( NAME algorithm_test SRCS "algorithm_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::algorithm absl::config GTest::gmock_main ) absl_cc_library( NAME algorithm_container HDRS "container.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::algorithm absl::core_headers absl::meta PUBLIC ) absl_cc_test( NAME container_test SRCS "container_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::algorithm_container absl::base absl::core_headers absl::memory absl::span GTest::gmock_main ) abseil-20220623.1/absl/algorithm/algorithm.h000066400000000000000000000142251430371345100204250ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: algorithm.h // ----------------------------------------------------------------------------- // // This header file contains Google extensions to the standard C++ // header. #ifndef ABSL_ALGORITHM_ALGORITHM_H_ #define ABSL_ALGORITHM_ALGORITHM_H_ #include #include #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace algorithm_internal { // Performs comparisons with operator==, similar to C++14's `std::equal_to<>`. struct EqualTo { template bool operator()(const T& a, const U& b) const { return a == b; } }; template bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred pred, std::input_iterator_tag, std::input_iterator_tag) { while (true) { if (first1 == last1) return first2 == last2; if (first2 == last2) return false; if (!pred(*first1, *first2)) return false; ++first1; ++first2; } } template bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred&& pred, std::random_access_iterator_tag, std::random_access_iterator_tag) { return (last1 - first1 == last2 - first2) && std::equal(first1, last1, first2, std::forward(pred)); } // When we are using our own internal predicate that just applies operator==, we // forward to the non-predicate form of std::equal. This enables an optimization // in libstdc++ that can result in std::memcmp being used for integer types. template bool EqualImpl(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, algorithm_internal::EqualTo /* unused */, std::random_access_iterator_tag, std::random_access_iterator_tag) { return (last1 - first1 == last2 - first2) && std::equal(first1, last1, first2); } template It RotateImpl(It first, It middle, It last, std::true_type) { return std::rotate(first, middle, last); } template It RotateImpl(It first, It middle, It last, std::false_type) { std::rotate(first, middle, last); return std::next(first, std::distance(middle, last)); } } // namespace algorithm_internal // equal() // // Compares the equality of two ranges specified by pairs of iterators, using // the given predicate, returning true iff for each corresponding iterator i1 // and i2 in the first and second range respectively, pred(*i1, *i2) == true // // This comparison takes at most min(`last1` - `first1`, `last2` - `first2`) // invocations of the predicate. Additionally, if InputIter1 and InputIter2 are // both random-access iterators, and `last1` - `first1` != `last2` - `first2`, // then the predicate is never invoked and the function returns false. // // This is a C++11-compatible implementation of C++14 `std::equal`. See // https://en.cppreference.com/w/cpp/algorithm/equal for more information. template bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2, Pred&& pred) { return algorithm_internal::EqualImpl( first1, last1, first2, last2, std::forward(pred), typename std::iterator_traits::iterator_category{}, typename std::iterator_traits::iterator_category{}); } // Overload of equal() that performs comparison of two ranges specified by pairs // of iterators using operator==. template bool equal(InputIter1 first1, InputIter1 last1, InputIter2 first2, InputIter2 last2) { return absl::equal(first1, last1, first2, last2, algorithm_internal::EqualTo{}); } // linear_search() // // Performs a linear search for `value` using the iterator `first` up to // but not including `last`, returning true if [`first`, `last`) contains an // element equal to `value`. // // A linear search is of O(n) complexity which is guaranteed to make at most // n = (`last` - `first`) comparisons. A linear search over short containers // may be faster than a binary search, even when the container is sorted. template bool linear_search(InputIterator first, InputIterator last, const EqualityComparable& value) { return std::find(first, last, value) != last; } // rotate() // // Performs a left rotation on a range of elements (`first`, `last`) such that // `middle` is now the first element. `rotate()` returns an iterator pointing to // the first element before rotation. This function is exactly the same as // `std::rotate`, but fixes a bug in gcc // <= 4.9 where `std::rotate` returns `void` instead of an iterator. // // The complexity of this algorithm is the same as that of `std::rotate`, but if // `ForwardIterator` is not a random-access iterator, then `absl::rotate` // performs an additional pass over the range to construct the return value. template ForwardIterator rotate(ForwardIterator first, ForwardIterator middle, ForwardIterator last) { return algorithm_internal::RotateImpl( first, middle, last, std::is_same()); } ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_ALGORITHM_ALGORITHM_H_ abseil-20220623.1/absl/algorithm/algorithm_test.cc000066400000000000000000000140151430371345100216170ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/algorithm/algorithm.h" #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/config.h" namespace { TEST(EqualTest, DefaultComparisonRandomAccess) { std::vector v1{1, 2, 3}; std::vector v2 = v1; std::vector v3 = {1, 2}; std::vector v4 = {1, 2, 4}; EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end())); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end())); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end())); } TEST(EqualTest, DefaultComparison) { std::list lst1{1, 2, 3}; std::list lst2 = lst1; std::list lst3{1, 2}; std::list lst4{1, 2, 4}; EXPECT_TRUE(absl::equal(lst1.begin(), lst1.end(), lst2.begin(), lst2.end())); EXPECT_FALSE(absl::equal(lst1.begin(), lst1.end(), lst3.begin(), lst3.end())); EXPECT_FALSE(absl::equal(lst1.begin(), lst1.end(), lst4.begin(), lst4.end())); } TEST(EqualTest, EmptyRange) { std::vector v1{1, 2, 3}; std::vector empty1; std::vector empty2; // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105705 #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wnonnull" #endif EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), empty1.begin(), empty1.end())); #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #pragma GCC diagnostic pop #endif EXPECT_FALSE(absl::equal(empty1.begin(), empty1.end(), v1.begin(), v1.end())); EXPECT_TRUE( absl::equal(empty1.begin(), empty1.end(), empty2.begin(), empty2.end())); } TEST(EqualTest, MixedIterTypes) { std::vector v1{1, 2, 3}; std::list lst1{v1.begin(), v1.end()}; std::list lst2{1, 2, 4}; std::list lst3{1, 2}; EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), lst1.begin(), lst1.end())); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), lst2.begin(), lst2.end())); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), lst3.begin(), lst3.end())); } TEST(EqualTest, MixedValueTypes) { std::vector v1{1, 2, 3}; std::vector v2{1, 2, 3}; std::vector v3{1, 2}; std::vector v4{1, 2, 4}; EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end())); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end())); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end())); } TEST(EqualTest, WeirdIterators) { std::vector v1{true, false}; std::vector v2 = v1; std::vector v3{true}; std::vector v4{true, true, true}; EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end())); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end())); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end())); } TEST(EqualTest, CustomComparison) { int n[] = {1, 2, 3, 4}; std::vector v1{&n[0], &n[1], &n[2]}; std::vector v2 = v1; std::vector v3{&n[0], &n[1], &n[3]}; std::vector v4{&n[0], &n[1]}; auto eq = [](int* a, int* b) { return *a == *b; }; EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), eq)); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end(), eq)); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v4.begin(), v4.end(), eq)); } TEST(EqualTest, MoveOnlyPredicate) { std::vector v1{1, 2, 3}; std::vector v2{4, 5, 6}; // move-only equality predicate struct Eq { Eq() = default; Eq(Eq &&) = default; Eq(const Eq &) = delete; Eq &operator=(const Eq &) = delete; bool operator()(const int a, const int b) const { return a == b; } }; EXPECT_TRUE(absl::equal(v1.begin(), v1.end(), v1.begin(), v1.end(), Eq())); EXPECT_FALSE(absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), Eq())); } struct CountingTrivialPred { int* count; bool operator()(int, int) const { ++*count; return true; } }; TEST(EqualTest, RandomAccessComplexity) { std::vector v1{1, 1, 3}; std::vector v2 = v1; std::vector v3{1, 2}; do { int count = 0; absl::equal(v1.begin(), v1.end(), v2.begin(), v2.end(), CountingTrivialPred{&count}); EXPECT_LE(count, 3); } while (std::next_permutation(v2.begin(), v2.end())); int count = 0; absl::equal(v1.begin(), v1.end(), v3.begin(), v3.end(), CountingTrivialPred{&count}); EXPECT_EQ(count, 0); } class LinearSearchTest : public testing::Test { protected: LinearSearchTest() : container_{1, 2, 3} {} static bool Is3(int n) { return n == 3; } static bool Is4(int n) { return n == 4; } std::vector container_; }; TEST_F(LinearSearchTest, linear_search) { EXPECT_TRUE(absl::linear_search(container_.begin(), container_.end(), 3)); EXPECT_FALSE(absl::linear_search(container_.begin(), container_.end(), 4)); } TEST_F(LinearSearchTest, linear_searchConst) { const std::vector *const const_container = &container_; EXPECT_TRUE( absl::linear_search(const_container->begin(), const_container->end(), 3)); EXPECT_FALSE( absl::linear_search(const_container->begin(), const_container->end(), 4)); } TEST(RotateTest, Rotate) { std::vector v{0, 1, 2, 3, 4}; EXPECT_EQ(*absl::rotate(v.begin(), v.begin() + 2, v.end()), 0); EXPECT_THAT(v, testing::ElementsAreArray({2, 3, 4, 0, 1})); std::list l{0, 1, 2, 3, 4}; EXPECT_EQ(*absl::rotate(l.begin(), std::next(l.begin(), 3), l.end()), 0); EXPECT_THAT(l, testing::ElementsAreArray({3, 4, 0, 1, 2})); } } // namespace abseil-20220623.1/absl/algorithm/container.h000066400000000000000000002307361430371345100204300ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: container.h // ----------------------------------------------------------------------------- // // This header file provides Container-based versions of algorithmic functions // within the C++ standard library. The following standard library sets of // functions are covered within this file: // // * Algorithmic functions // * Algorithmic functions // * functions // // The standard library functions operate on iterator ranges; the functions // within this API operate on containers, though many return iterator ranges. // // All functions within this API are named with a `c_` prefix. Calls such as // `absl::c_xx(container, ...) are equivalent to std:: functions such as // `std::xx(std::begin(cont), std::end(cont), ...)`. Functions that act on // iterators but not conceptually on iterator ranges (e.g. `std::iter_swap`) // have no equivalent here. // // For template parameter and variable naming, `C` indicates the container type // to which the function is applied, `Pred` indicates the predicate object type // to be used by the function and `T` indicates the applicable element type. #ifndef ABSL_ALGORITHM_CONTAINER_H_ #define ABSL_ALGORITHM_CONTAINER_H_ #include #include #include #include #include #include #include #include #include #include "absl/algorithm/algorithm.h" #include "absl/base/macros.h" #include "absl/meta/type_traits.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_algorithm_internal { // NOTE: it is important to defer to ADL lookup for building with C++ modules, // especially for headers like which are not visible from this file // but specialize std::begin and std::end. using std::begin; using std::end; // The type of the iterator given by begin(c) (possibly std::begin(c)). // ContainerIter> gives vector::const_iterator, // while ContainerIter> gives vector::iterator. template using ContainerIter = decltype(begin(std::declval())); // An MSVC bug involving template parameter substitution requires us to use // decltype() here instead of just std::pair. template using ContainerIterPairType = decltype(std::make_pair(ContainerIter(), ContainerIter())); template using ContainerDifferenceType = decltype(std::distance(std::declval>(), std::declval>())); template using ContainerPointerType = typename std::iterator_traits>::pointer; // container_algorithm_internal::c_begin and // container_algorithm_internal::c_end are abbreviations for proper ADL // lookup of std::begin and std::end, i.e. // using std::begin; // using std::end; // std::foo(begin(c), end(c)); // becomes // std::foo(container_algorithm_internal::begin(c), // container_algorithm_internal::end(c)); // These are meant for internal use only. template ContainerIter c_begin(C& c) { return begin(c); } template ContainerIter c_end(C& c) { return end(c); } template struct IsUnorderedContainer : std::false_type {}; template struct IsUnorderedContainer< std::unordered_map> : std::true_type {}; template struct IsUnorderedContainer> : std::true_type {}; // container_algorithm_internal::c_size. It is meant for internal use only. template auto c_size(C& c) -> decltype(c.size()) { return c.size(); } template constexpr std::size_t c_size(T (&)[N]) { return N; } } // namespace container_algorithm_internal // PUBLIC API //------------------------------------------------------------------------------ // Abseil algorithm.h functions //------------------------------------------------------------------------------ // c_linear_search() // // Container-based version of absl::linear_search() for performing a linear // search within a container. template bool c_linear_search(const C& c, EqualityComparable&& value) { return linear_search(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); } //------------------------------------------------------------------------------ // algorithms //------------------------------------------------------------------------------ // c_distance() // // Container-based version of the `std::distance()` function to // return the number of elements within a container. template container_algorithm_internal::ContainerDifferenceType c_distance( const C& c) { return std::distance(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } //------------------------------------------------------------------------------ // Non-modifying sequence operations //------------------------------------------------------------------------------ // c_all_of() // // Container-based version of the `std::all_of()` function to // test if all elements within a container satisfy a condition. template bool c_all_of(const C& c, Pred&& pred) { return std::all_of(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } // c_any_of() // // Container-based version of the `std::any_of()` function to // test if any element in a container fulfills a condition. template bool c_any_of(const C& c, Pred&& pred) { return std::any_of(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } // c_none_of() // // Container-based version of the `std::none_of()` function to // test if no elements in a container fulfill a condition. template bool c_none_of(const C& c, Pred&& pred) { return std::none_of(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } // c_for_each() // // Container-based version of the `std::for_each()` function to // apply a function to a container's elements. template decay_t c_for_each(C&& c, Function&& f) { return std::for_each(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(f)); } // c_find() // // Container-based version of the `std::find()` function to find // the first element containing the passed value within a container value. template container_algorithm_internal::ContainerIter c_find(C& c, T&& value) { return std::find(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); } // c_find_if() // // Container-based version of the `std::find_if()` function to find // the first element in a container matching the given condition. template container_algorithm_internal::ContainerIter c_find_if(C& c, Pred&& pred) { return std::find_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } // c_find_if_not() // // Container-based version of the `std::find_if_not()` function to // find the first element in a container not matching the given condition. template container_algorithm_internal::ContainerIter c_find_if_not(C& c, Pred&& pred) { return std::find_if_not(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } // c_find_end() // // Container-based version of the `std::find_end()` function to // find the last subsequence within a container. template container_algorithm_internal::ContainerIter c_find_end( Sequence1& sequence, Sequence2& subsequence) { return std::find_end(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence)); } // Overload of c_find_end() for using a predicate evaluation other than `==` as // the function's test condition. template container_algorithm_internal::ContainerIter c_find_end( Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { return std::find_end(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence), std::forward(pred)); } // c_find_first_of() // // Container-based version of the `std::find_first_of()` function to // find the first element within the container that is also within the options // container. template container_algorithm_internal::ContainerIter c_find_first_of(C1& container, C2& options) { return std::find_first_of(container_algorithm_internal::c_begin(container), container_algorithm_internal::c_end(container), container_algorithm_internal::c_begin(options), container_algorithm_internal::c_end(options)); } // Overload of c_find_first_of() for using a predicate evaluation other than // `==` as the function's test condition. template container_algorithm_internal::ContainerIter c_find_first_of( C1& container, C2& options, BinaryPredicate&& pred) { return std::find_first_of(container_algorithm_internal::c_begin(container), container_algorithm_internal::c_end(container), container_algorithm_internal::c_begin(options), container_algorithm_internal::c_end(options), std::forward(pred)); } // c_adjacent_find() // // Container-based version of the `std::adjacent_find()` function to // find equal adjacent elements within a container. template container_algorithm_internal::ContainerIter c_adjacent_find( Sequence& sequence) { return std::adjacent_find(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // Overload of c_adjacent_find() for using a predicate evaluation other than // `==` as the function's test condition. template container_algorithm_internal::ContainerIter c_adjacent_find( Sequence& sequence, BinaryPredicate&& pred) { return std::adjacent_find(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(pred)); } // c_count() // // Container-based version of the `std::count()` function to count // values that match within a container. template container_algorithm_internal::ContainerDifferenceType c_count( const C& c, T&& value) { return std::count(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); } // c_count_if() // // Container-based version of the `std::count_if()` function to // count values matching a condition within a container. template container_algorithm_internal::ContainerDifferenceType c_count_if( const C& c, Pred&& pred) { return std::count_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } // c_mismatch() // // Container-based version of the `std::mismatch()` function to // return the first element where two ordered containers differ. Applies `==` to // the first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). template container_algorithm_internal::ContainerIterPairType c_mismatch(C1& c1, C2& c2) { auto first1 = container_algorithm_internal::c_begin(c1); auto last1 = container_algorithm_internal::c_end(c1); auto first2 = container_algorithm_internal::c_begin(c2); auto last2 = container_algorithm_internal::c_end(c2); for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { // Negates equality because Cpp17EqualityComparable doesn't require clients // to overload both `operator==` and `operator!=`. if (!(*first1 == *first2)) { break; } } return std::make_pair(first1, first2); } // Overload of c_mismatch() for using a predicate evaluation other than `==` as // the function's test condition. Applies `pred`to the first N elements of `c1` // and `c2`, where N = min(size(c1), size(c2)). template container_algorithm_internal::ContainerIterPairType c_mismatch(C1& c1, C2& c2, BinaryPredicate pred) { auto first1 = container_algorithm_internal::c_begin(c1); auto last1 = container_algorithm_internal::c_end(c1); auto first2 = container_algorithm_internal::c_begin(c2); auto last2 = container_algorithm_internal::c_end(c2); for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { if (!pred(*first1, *first2)) { break; } } return std::make_pair(first1, first2); } // c_equal() // // Container-based version of the `std::equal()` function to // test whether two containers are equal. // // NOTE: the semantics of c_equal() are slightly different than those of // equal(): while the latter iterates over the second container only up to the // size of the first container, c_equal() also checks whether the container // sizes are equal. This better matches expectations about c_equal() based on // its signature. // // Example: // vector v1 = <1, 2, 3>; // vector v2 = <1, 2, 3, 4>; // equal(std::begin(v1), std::end(v1), std::begin(v2)) returns true // c_equal(v1, v2) returns false template bool c_equal(const C1& c1, const C2& c2) { return ((container_algorithm_internal::c_size(c1) == container_algorithm_internal::c_size(c2)) && std::equal(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2))); } // Overload of c_equal() for using a predicate evaluation other than `==` as // the function's test condition. template bool c_equal(const C1& c1, const C2& c2, BinaryPredicate&& pred) { return ((container_algorithm_internal::c_size(c1) == container_algorithm_internal::c_size(c2)) && std::equal(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), std::forward(pred))); } // c_is_permutation() // // Container-based version of the `std::is_permutation()` function // to test whether a container is a permutation of another. template bool c_is_permutation(const C1& c1, const C2& c2) { using std::begin; using std::end; return c1.size() == c2.size() && std::is_permutation(begin(c1), end(c1), begin(c2)); } // Overload of c_is_permutation() for using a predicate evaluation other than // `==` as the function's test condition. template bool c_is_permutation(const C1& c1, const C2& c2, BinaryPredicate&& pred) { using std::begin; using std::end; return c1.size() == c2.size() && std::is_permutation(begin(c1), end(c1), begin(c2), std::forward(pred)); } // c_search() // // Container-based version of the `std::search()` function to search // a container for a subsequence. template container_algorithm_internal::ContainerIter c_search( Sequence1& sequence, Sequence2& subsequence) { return std::search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence)); } // Overload of c_search() for using a predicate evaluation other than // `==` as the function's test condition. template container_algorithm_internal::ContainerIter c_search( Sequence1& sequence, Sequence2& subsequence, BinaryPredicate&& pred) { return std::search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(subsequence), container_algorithm_internal::c_end(subsequence), std::forward(pred)); } // c_search_n() // // Container-based version of the `std::search_n()` function to // search a container for the first sequence of N elements. template container_algorithm_internal::ContainerIter c_search_n( Sequence& sequence, Size count, T&& value) { return std::search_n(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), count, std::forward(value)); } // Overload of c_search_n() for using a predicate evaluation other than // `==` as the function's test condition. template container_algorithm_internal::ContainerIter c_search_n( Sequence& sequence, Size count, T&& value, BinaryPredicate&& pred) { return std::search_n(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), count, std::forward(value), std::forward(pred)); } //------------------------------------------------------------------------------ // Modifying sequence operations //------------------------------------------------------------------------------ // c_copy() // // Container-based version of the `std::copy()` function to copy a // container's elements into an iterator. template OutputIterator c_copy(const InputSequence& input, OutputIterator output) { return std::copy(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output); } // c_copy_n() // // Container-based version of the `std::copy_n()` function to copy a // container's first N elements into an iterator. template OutputIterator c_copy_n(const C& input, Size n, OutputIterator output) { return std::copy_n(container_algorithm_internal::c_begin(input), n, output); } // c_copy_if() // // Container-based version of the `std::copy_if()` function to copy // a container's elements satisfying some condition into an iterator. template OutputIterator c_copy_if(const InputSequence& input, OutputIterator output, Pred&& pred) { return std::copy_if(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(pred)); } // c_copy_backward() // // Container-based version of the `std::copy_backward()` function to // copy a container's elements in reverse order into an iterator. template BidirectionalIterator c_copy_backward(const C& src, BidirectionalIterator dest) { return std::copy_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } // c_move() // // Container-based version of the `std::move()` function to move // a container's elements into an iterator. template OutputIterator c_move(C&& src, OutputIterator dest) { return std::move(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } // c_move_backward() // // Container-based version of the `std::move_backward()` function to // move a container's elements into an iterator in reverse order. template BidirectionalIterator c_move_backward(C&& src, BidirectionalIterator dest) { return std::move_backward(container_algorithm_internal::c_begin(src), container_algorithm_internal::c_end(src), dest); } // c_swap_ranges() // // Container-based version of the `std::swap_ranges()` function to // swap a container's elements with another container's elements. Swaps the // first N elements of `c1` and `c2`, where N = min(size(c1), size(c2)). template container_algorithm_internal::ContainerIter c_swap_ranges(C1& c1, C2& c2) { auto first1 = container_algorithm_internal::c_begin(c1); auto last1 = container_algorithm_internal::c_end(c1); auto first2 = container_algorithm_internal::c_begin(c2); auto last2 = container_algorithm_internal::c_end(c2); using std::swap; for (; first1 != last1 && first2 != last2; ++first1, (void)++first2) { swap(*first1, *first2); } return first2; } // c_transform() // // Container-based version of the `std::transform()` function to // transform a container's elements using the unary operation, storing the // result in an iterator pointing to the last transformed element in the output // range. template OutputIterator c_transform(const InputSequence& input, OutputIterator output, UnaryOp&& unary_op) { return std::transform(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output, std::forward(unary_op)); } // Overload of c_transform() for performing a transformation using a binary // predicate. Applies `binary_op` to the first N elements of `c1` and `c2`, // where N = min(size(c1), size(c2)). template OutputIterator c_transform(const InputSequence1& input1, const InputSequence2& input2, OutputIterator output, BinaryOp&& binary_op) { auto first1 = container_algorithm_internal::c_begin(input1); auto last1 = container_algorithm_internal::c_end(input1); auto first2 = container_algorithm_internal::c_begin(input2); auto last2 = container_algorithm_internal::c_end(input2); for (; first1 != last1 && first2 != last2; ++first1, (void)++first2, ++output) { *output = binary_op(*first1, *first2); } return output; } // c_replace() // // Container-based version of the `std::replace()` function to // replace a container's elements of some value with a new value. The container // is modified in place. template void c_replace(Sequence& sequence, const T& old_value, const T& new_value) { std::replace(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), old_value, new_value); } // c_replace_if() // // Container-based version of the `std::replace_if()` function to // replace a container's elements of some value with a new value based on some // condition. The container is modified in place. template void c_replace_if(C& c, Pred&& pred, T&& new_value) { std::replace_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred), std::forward(new_value)); } // c_replace_copy() // // Container-based version of the `std::replace_copy()` function to // replace a container's elements of some value with a new value and return the // results within an iterator. template OutputIterator c_replace_copy(const C& c, OutputIterator result, T&& old_value, T&& new_value) { return std::replace_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(old_value), std::forward(new_value)); } // c_replace_copy_if() // // Container-based version of the `std::replace_copy_if()` function // to replace a container's elements of some value with a new value based on // some condition, and return the results within an iterator. template OutputIterator c_replace_copy_if(const C& c, OutputIterator result, Pred&& pred, T&& new_value) { return std::replace_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred), std::forward(new_value)); } // c_fill() // // Container-based version of the `std::fill()` function to fill a // container with some value. template void c_fill(C& c, T&& value) { std::fill(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(value)); } // c_fill_n() // // Container-based version of the `std::fill_n()` function to fill // the first N elements in a container with some value. template void c_fill_n(C& c, Size n, T&& value) { std::fill_n(container_algorithm_internal::c_begin(c), n, std::forward(value)); } // c_generate() // // Container-based version of the `std::generate()` function to // assign a container's elements to the values provided by the given generator. template void c_generate(C& c, Generator&& gen) { std::generate(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(gen)); } // c_generate_n() // // Container-based version of the `std::generate_n()` function to // assign a container's first N elements to the values provided by the given // generator. template container_algorithm_internal::ContainerIter c_generate_n(C& c, Size n, Generator&& gen) { return std::generate_n(container_algorithm_internal::c_begin(c), n, std::forward(gen)); } // Note: `c_xx()` container versions for `remove()`, `remove_if()`, // and `unique()` are omitted, because it's not clear whether or not such // functions should call erase on their supplied sequences afterwards. Either // behavior would be surprising for a different set of users. // c_remove_copy() // // Container-based version of the `std::remove_copy()` function to // copy a container's elements while removing any elements matching the given // `value`. template OutputIterator c_remove_copy(const C& c, OutputIterator result, T&& value) { return std::remove_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(value)); } // c_remove_copy_if() // // Container-based version of the `std::remove_copy_if()` function // to copy a container's elements while removing any elements matching the given // condition. template OutputIterator c_remove_copy_if(const C& c, OutputIterator result, Pred&& pred) { return std::remove_copy_if(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); } // c_unique_copy() // // Container-based version of the `std::unique_copy()` function to // copy a container's elements while removing any elements containing duplicate // values. template OutputIterator c_unique_copy(const C& c, OutputIterator result) { return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result); } // Overload of c_unique_copy() for using a predicate evaluation other than // `==` for comparing uniqueness of the element values. template OutputIterator c_unique_copy(const C& c, OutputIterator result, BinaryPredicate&& pred) { return std::unique_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), result, std::forward(pred)); } // c_reverse() // // Container-based version of the `std::reverse()` function to // reverse a container's elements. template void c_reverse(Sequence& sequence) { std::reverse(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // c_reverse_copy() // // Container-based version of the `std::reverse()` function to // reverse a container's elements and write them to an iterator range. template OutputIterator c_reverse_copy(const C& sequence, OutputIterator result) { return std::reverse_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), result); } // c_rotate() // // Container-based version of the `std::rotate()` function to // shift a container's elements leftward such that the `middle` element becomes // the first element in the container. template > Iterator c_rotate(C& sequence, Iterator middle) { return absl::rotate(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence)); } // c_rotate_copy() // // Container-based version of the `std::rotate_copy()` function to // shift a container's elements leftward such that the `middle` element becomes // the first element in a new iterator range. template OutputIterator c_rotate_copy( const C& sequence, container_algorithm_internal::ContainerIter middle, OutputIterator result) { return std::rotate_copy(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), result); } // c_shuffle() // // Container-based version of the `std::shuffle()` function to // randomly shuffle elements within the container using a `gen()` uniform random // number generator. template void c_shuffle(RandomAccessContainer& c, UniformRandomBitGenerator&& gen) { std::shuffle(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(gen)); } //------------------------------------------------------------------------------ // Partition functions //------------------------------------------------------------------------------ // c_is_partitioned() // // Container-based version of the `std::is_partitioned()` function // to test whether all elements in the container for which `pred` returns `true` // precede those for which `pred` is `false`. template bool c_is_partitioned(const C& c, Pred&& pred) { return std::is_partitioned(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } // c_partition() // // Container-based version of the `std::partition()` function // to rearrange all elements in a container in such a way that all elements for // which `pred` returns `true` precede all those for which it returns `false`, // returning an iterator to the first element of the second group. template container_algorithm_internal::ContainerIter c_partition(C& c, Pred&& pred) { return std::partition(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } // c_stable_partition() // // Container-based version of the `std::stable_partition()` function // to rearrange all elements in a container in such a way that all elements for // which `pred` returns `true` precede all those for which it returns `false`, // preserving the relative ordering between the two groups. The function returns // an iterator to the first element of the second group. template container_algorithm_internal::ContainerIter c_stable_partition(C& c, Pred&& pred) { return std::stable_partition(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } // c_partition_copy() // // Container-based version of the `std::partition_copy()` function // to partition a container's elements and return them into two iterators: one // for which `pred` returns `true`, and one for which `pred` returns `false.` template std::pair c_partition_copy( const C& c, OutputIterator1 out_true, OutputIterator2 out_false, Pred&& pred) { return std::partition_copy(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), out_true, out_false, std::forward(pred)); } // c_partition_point() // // Container-based version of the `std::partition_point()` function // to return the first element of an already partitioned container for which // the given `pred` is not `true`. template container_algorithm_internal::ContainerIter c_partition_point(C& c, Pred&& pred) { return std::partition_point(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(pred)); } //------------------------------------------------------------------------------ // Sorting functions //------------------------------------------------------------------------------ // c_sort() // // Container-based version of the `std::sort()` function // to sort elements in ascending order of their values. template void c_sort(C& c) { std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } // Overload of c_sort() for performing a `comp` comparison other than the // default `operator<`. template void c_sort(C& c, LessThan&& comp) { std::sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); } // c_stable_sort() // // Container-based version of the `std::stable_sort()` function // to sort elements in ascending order of their values, preserving the order // of equivalents. template void c_stable_sort(C& c) { std::stable_sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } // Overload of c_stable_sort() for performing a `comp` comparison other than the // default `operator<`. template void c_stable_sort(C& c, LessThan&& comp) { std::stable_sort(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); } // c_is_sorted() // // Container-based version of the `std::is_sorted()` function // to evaluate whether the given container is sorted in ascending order. template bool c_is_sorted(const C& c) { return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } // c_is_sorted() overload for performing a `comp` comparison other than the // default `operator<`. template bool c_is_sorted(const C& c, LessThan&& comp) { return std::is_sorted(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); } // c_partial_sort() // // Container-based version of the `std::partial_sort()` function // to rearrange elements within a container such that elements before `middle` // are sorted in ascending order. template void c_partial_sort( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter middle) { std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence)); } // Overload of c_partial_sort() for performing a `comp` comparison other than // the default `operator<`. template void c_partial_sort( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter middle, LessThan&& comp) { std::partial_sort(container_algorithm_internal::c_begin(sequence), middle, container_algorithm_internal::c_end(sequence), std::forward(comp)); } // c_partial_sort_copy() // // Container-based version of the `std::partial_sort_copy()` // function to sort the elements in the given range `result` within the larger // `sequence` in ascending order (and using `result` as the output parameter). // At most min(result.last - result.first, sequence.last - sequence.first) // elements from the sequence will be stored in the result. template container_algorithm_internal::ContainerIter c_partial_sort_copy(const C& sequence, RandomAccessContainer& result) { return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), container_algorithm_internal::c_end(result)); } // Overload of c_partial_sort_copy() for performing a `comp` comparison other // than the default `operator<`. template container_algorithm_internal::ContainerIter c_partial_sort_copy(const C& sequence, RandomAccessContainer& result, LessThan&& comp) { return std::partial_sort_copy(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), container_algorithm_internal::c_begin(result), container_algorithm_internal::c_end(result), std::forward(comp)); } // c_is_sorted_until() // // Container-based version of the `std::is_sorted_until()` function // to return the first element within a container that is not sorted in // ascending order as an iterator. template container_algorithm_internal::ContainerIter c_is_sorted_until(C& c) { return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } // Overload of c_is_sorted_until() for performing a `comp` comparison other than // the default `operator<`. template container_algorithm_internal::ContainerIter c_is_sorted_until( C& c, LessThan&& comp) { return std::is_sorted_until(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); } // c_nth_element() // // Container-based version of the `std::nth_element()` function // to rearrange the elements within a container such that the `nth` element // would be in that position in an ordered sequence; other elements may be in // any order, except that all preceding `nth` will be less than that element, // and all following `nth` will be greater than that element. template void c_nth_element( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter nth) { std::nth_element(container_algorithm_internal::c_begin(sequence), nth, container_algorithm_internal::c_end(sequence)); } // Overload of c_nth_element() for performing a `comp` comparison other than // the default `operator<`. template void c_nth_element( RandomAccessContainer& sequence, container_algorithm_internal::ContainerIter nth, LessThan&& comp) { std::nth_element(container_algorithm_internal::c_begin(sequence), nth, container_algorithm_internal::c_end(sequence), std::forward(comp)); } //------------------------------------------------------------------------------ // Binary Search //------------------------------------------------------------------------------ // c_lower_bound() // // Container-based version of the `std::lower_bound()` function // to return an iterator pointing to the first element in a sorted container // which does not compare less than `value`. template container_algorithm_internal::ContainerIter c_lower_bound( Sequence& sequence, T&& value) { return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); } // Overload of c_lower_bound() for performing a `comp` comparison other than // the default `operator<`. template container_algorithm_internal::ContainerIter c_lower_bound( Sequence& sequence, T&& value, LessThan&& comp) { return std::lower_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), std::forward(comp)); } // c_upper_bound() // // Container-based version of the `std::upper_bound()` function // to return an iterator pointing to the first element in a sorted container // which is greater than `value`. template container_algorithm_internal::ContainerIter c_upper_bound( Sequence& sequence, T&& value) { return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); } // Overload of c_upper_bound() for performing a `comp` comparison other than // the default `operator<`. template container_algorithm_internal::ContainerIter c_upper_bound( Sequence& sequence, T&& value, LessThan&& comp) { return std::upper_bound(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), std::forward(comp)); } // c_equal_range() // // Container-based version of the `std::equal_range()` function // to return an iterator pair pointing to the first and last elements in a // sorted container which compare equal to `value`. template container_algorithm_internal::ContainerIterPairType c_equal_range(Sequence& sequence, T&& value) { return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); } // Overload of c_equal_range() for performing a `comp` comparison other than // the default `operator<`. template container_algorithm_internal::ContainerIterPairType c_equal_range(Sequence& sequence, T&& value, LessThan&& comp) { return std::equal_range(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), std::forward(comp)); } // c_binary_search() // // Container-based version of the `std::binary_search()` function // to test if any element in the sorted container contains a value equivalent to // 'value'. template bool c_binary_search(Sequence&& sequence, T&& value) { return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); } // Overload of c_binary_search() for performing a `comp` comparison other than // the default `operator<`. template bool c_binary_search(Sequence&& sequence, T&& value, LessThan&& comp) { return std::binary_search(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value), std::forward(comp)); } //------------------------------------------------------------------------------ // Merge functions //------------------------------------------------------------------------------ // c_merge() // // Container-based version of the `std::merge()` function // to merge two sorted containers into a single sorted iterator. template OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result) { return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), result); } // Overload of c_merge() for performing a `comp` comparison other than // the default `operator<`. template OutputIterator c_merge(const C1& c1, const C2& c2, OutputIterator result, LessThan&& comp) { return std::merge(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), result, std::forward(comp)); } // c_inplace_merge() // // Container-based version of the `std::inplace_merge()` function // to merge a supplied iterator `middle` into a container. template void c_inplace_merge(C& c, container_algorithm_internal::ContainerIter middle) { std::inplace_merge(container_algorithm_internal::c_begin(c), middle, container_algorithm_internal::c_end(c)); } // Overload of c_inplace_merge() for performing a merge using a `comp` other // than `operator<`. template void c_inplace_merge(C& c, container_algorithm_internal::ContainerIter middle, LessThan&& comp) { std::inplace_merge(container_algorithm_internal::c_begin(c), middle, container_algorithm_internal::c_end(c), std::forward(comp)); } // c_includes() // // Container-based version of the `std::includes()` function // to test whether a sorted container `c1` entirely contains another sorted // container `c2`. template bool c_includes(const C1& c1, const C2& c2) { return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2)); } // Overload of c_includes() for performing a merge using a `comp` other than // `operator<`. template bool c_includes(const C1& c1, const C2& c2, LessThan&& comp) { return std::includes(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), std::forward(comp)); } // c_set_union() // // Container-based version of the `std::set_union()` function // to return an iterator containing the union of two containers; duplicate // values are not copied into the output. template ::value, void>::type, typename = typename std::enable_if< !container_algorithm_internal::IsUnorderedContainer::value, void>::type> OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output) { return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); } // Overload of c_set_union() for performing a merge using a `comp` other than // `operator<`. template ::value, void>::type, typename = typename std::enable_if< !container_algorithm_internal::IsUnorderedContainer::value, void>::type> OutputIterator c_set_union(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { return std::set_union(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); } // c_set_intersection() // // Container-based version of the `std::set_intersection()` function // to return an iterator containing the intersection of two sorted containers. template ::value, void>::type, typename = typename std::enable_if< !container_algorithm_internal::IsUnorderedContainer::value, void>::type> OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output) { // In debug builds, ensure that both containers are sorted with respect to the // default comparator. std::set_intersection requires the containers be sorted // using operator<. assert(absl::c_is_sorted(c1)); assert(absl::c_is_sorted(c2)); return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); } // Overload of c_set_intersection() for performing a merge using a `comp` other // than `operator<`. template ::value, void>::type, typename = typename std::enable_if< !container_algorithm_internal::IsUnorderedContainer::value, void>::type> OutputIterator c_set_intersection(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { // In debug builds, ensure that both containers are sorted with respect to the // default comparator. std::set_intersection requires the containers be sorted // using the same comparator. assert(absl::c_is_sorted(c1, comp)); assert(absl::c_is_sorted(c2, comp)); return std::set_intersection(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); } // c_set_difference() // // Container-based version of the `std::set_difference()` function // to return an iterator containing elements present in the first container but // not in the second. template ::value, void>::type, typename = typename std::enable_if< !container_algorithm_internal::IsUnorderedContainer::value, void>::type> OutputIterator c_set_difference(const C1& c1, const C2& c2, OutputIterator output) { return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); } // Overload of c_set_difference() for performing a merge using a `comp` other // than `operator<`. template ::value, void>::type, typename = typename std::enable_if< !container_algorithm_internal::IsUnorderedContainer::value, void>::type> OutputIterator c_set_difference(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { return std::set_difference(container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); } // c_set_symmetric_difference() // // Container-based version of the `std::set_symmetric_difference()` // function to return an iterator containing elements present in either one // container or the other, but not both. template ::value, void>::type, typename = typename std::enable_if< !container_algorithm_internal::IsUnorderedContainer::value, void>::type> OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output) { return std::set_symmetric_difference( container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output); } // Overload of c_set_symmetric_difference() for performing a merge using a // `comp` other than `operator<`. template ::value, void>::type, typename = typename std::enable_if< !container_algorithm_internal::IsUnorderedContainer::value, void>::type> OutputIterator c_set_symmetric_difference(const C1& c1, const C2& c2, OutputIterator output, LessThan&& comp) { return std::set_symmetric_difference( container_algorithm_internal::c_begin(c1), container_algorithm_internal::c_end(c1), container_algorithm_internal::c_begin(c2), container_algorithm_internal::c_end(c2), output, std::forward(comp)); } //------------------------------------------------------------------------------ // Heap functions //------------------------------------------------------------------------------ // c_push_heap() // // Container-based version of the `std::push_heap()` function // to push a value onto a container heap. template void c_push_heap(RandomAccessContainer& sequence) { std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // Overload of c_push_heap() for performing a push operation on a heap using a // `comp` other than `operator<`. template void c_push_heap(RandomAccessContainer& sequence, LessThan&& comp) { std::push_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); } // c_pop_heap() // // Container-based version of the `std::pop_heap()` function // to pop a value from a heap container. template void c_pop_heap(RandomAccessContainer& sequence) { std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // Overload of c_pop_heap() for performing a pop operation on a heap using a // `comp` other than `operator<`. template void c_pop_heap(RandomAccessContainer& sequence, LessThan&& comp) { std::pop_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); } // c_make_heap() // // Container-based version of the `std::make_heap()` function // to make a container a heap. template void c_make_heap(RandomAccessContainer& sequence) { std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // Overload of c_make_heap() for performing heap comparisons using a // `comp` other than `operator<` template void c_make_heap(RandomAccessContainer& sequence, LessThan&& comp) { std::make_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); } // c_sort_heap() // // Container-based version of the `std::sort_heap()` function // to sort a heap into ascending order (after which it is no longer a heap). template void c_sort_heap(RandomAccessContainer& sequence) { std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // Overload of c_sort_heap() for performing heap comparisons using a // `comp` other than `operator<` template void c_sort_heap(RandomAccessContainer& sequence, LessThan&& comp) { std::sort_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); } // c_is_heap() // // Container-based version of the `std::is_heap()` function // to check whether the given container is a heap. template bool c_is_heap(const RandomAccessContainer& sequence) { return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // Overload of c_is_heap() for performing heap comparisons using a // `comp` other than `operator<` template bool c_is_heap(const RandomAccessContainer& sequence, LessThan&& comp) { return std::is_heap(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); } // c_is_heap_until() // // Container-based version of the `std::is_heap_until()` function // to find the first element in a given container which is not in heap order. template container_algorithm_internal::ContainerIter c_is_heap_until(RandomAccessContainer& sequence) { return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // Overload of c_is_heap_until() for performing heap comparisons using a // `comp` other than `operator<` template container_algorithm_internal::ContainerIter c_is_heap_until(RandomAccessContainer& sequence, LessThan&& comp) { return std::is_heap_until(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); } //------------------------------------------------------------------------------ // Min/max //------------------------------------------------------------------------------ // c_min_element() // // Container-based version of the `std::min_element()` function // to return an iterator pointing to the element with the smallest value, using // `operator<` to make the comparisons. template container_algorithm_internal::ContainerIter c_min_element( Sequence& sequence) { return std::min_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // Overload of c_min_element() for performing a `comp` comparison other than // `operator<`. template container_algorithm_internal::ContainerIter c_min_element( Sequence& sequence, LessThan&& comp) { return std::min_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); } // c_max_element() // // Container-based version of the `std::max_element()` function // to return an iterator pointing to the element with the largest value, using // `operator<` to make the comparisons. template container_algorithm_internal::ContainerIter c_max_element( Sequence& sequence) { return std::max_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence)); } // Overload of c_max_element() for performing a `comp` comparison other than // `operator<`. template container_algorithm_internal::ContainerIter c_max_element( Sequence& sequence, LessThan&& comp) { return std::max_element(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(comp)); } // c_minmax_element() // // Container-based version of the `std::minmax_element()` function // to return a pair of iterators pointing to the elements containing the // smallest and largest values, respectively, using `operator<` to make the // comparisons. template container_algorithm_internal::ContainerIterPairType c_minmax_element(C& c) { return std::minmax_element(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } // Overload of c_minmax_element() for performing `comp` comparisons other than // `operator<`. template container_algorithm_internal::ContainerIterPairType c_minmax_element(C& c, LessThan&& comp) { return std::minmax_element(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); } //------------------------------------------------------------------------------ // Lexicographical Comparisons //------------------------------------------------------------------------------ // c_lexicographical_compare() // // Container-based version of the `std::lexicographical_compare()` // function to lexicographically compare (e.g. sort words alphabetically) two // container sequences. The comparison is performed using `operator<`. Note // that capital letters ("A-Z") have ASCII values less than lowercase letters // ("a-z"). template bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2) { return std::lexicographical_compare( container_algorithm_internal::c_begin(sequence1), container_algorithm_internal::c_end(sequence1), container_algorithm_internal::c_begin(sequence2), container_algorithm_internal::c_end(sequence2)); } // Overload of c_lexicographical_compare() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. template bool c_lexicographical_compare(Sequence1&& sequence1, Sequence2&& sequence2, LessThan&& comp) { return std::lexicographical_compare( container_algorithm_internal::c_begin(sequence1), container_algorithm_internal::c_end(sequence1), container_algorithm_internal::c_begin(sequence2), container_algorithm_internal::c_end(sequence2), std::forward(comp)); } // c_next_permutation() // // Container-based version of the `std::next_permutation()` function // to rearrange a container's elements into the next lexicographically greater // permutation. template bool c_next_permutation(C& c) { return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } // Overload of c_next_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. template bool c_next_permutation(C& c, LessThan&& comp) { return std::next_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); } // c_prev_permutation() // // Container-based version of the `std::prev_permutation()` function // to rearrange a container's elements into the next lexicographically lesser // permutation. template bool c_prev_permutation(C& c) { return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c)); } // Overload of c_prev_permutation() for performing a lexicographical // comparison using a `comp` operator instead of `operator<`. template bool c_prev_permutation(C& c, LessThan&& comp) { return std::prev_permutation(container_algorithm_internal::c_begin(c), container_algorithm_internal::c_end(c), std::forward(comp)); } //------------------------------------------------------------------------------ // algorithms //------------------------------------------------------------------------------ // c_iota() // // Container-based version of the `std::iota()` function // to compute successive values of `value`, as if incremented with `++value` // after each element is written. and write them to the container. template void c_iota(Sequence& sequence, T&& value) { std::iota(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(value)); } // c_accumulate() // // Container-based version of the `std::accumulate()` function // to accumulate the element values of a container to `init` and return that // accumulation by value. // // Note: Due to a language technicality this function has return type // absl::decay_t. As a user of this function you can casually read // this as "returns T by value" and assume it does the right thing. template decay_t c_accumulate(const Sequence& sequence, T&& init) { return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init)); } // Overload of c_accumulate() for using a binary operations other than // addition for computing the accumulation. template decay_t c_accumulate(const Sequence& sequence, T&& init, BinaryOp&& binary_op) { return std::accumulate(container_algorithm_internal::c_begin(sequence), container_algorithm_internal::c_end(sequence), std::forward(init), std::forward(binary_op)); } // c_inner_product() // // Container-based version of the `std::inner_product()` function // to compute the cumulative inner product of container element pairs. // // Note: Due to a language technicality this function has return type // absl::decay_t. As a user of this function you can casually read // this as "returns T by value" and assume it does the right thing. template decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, T&& sum) { return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), std::forward(sum)); } // Overload of c_inner_product() for using binary operations other than // `operator+` (for computing the accumulation) and `operator*` (for computing // the product between the two container's element pair). template decay_t c_inner_product(const Sequence1& factors1, const Sequence2& factors2, T&& sum, BinaryOp1&& op1, BinaryOp2&& op2) { return std::inner_product(container_algorithm_internal::c_begin(factors1), container_algorithm_internal::c_end(factors1), container_algorithm_internal::c_begin(factors2), std::forward(sum), std::forward(op1), std::forward(op2)); } // c_adjacent_difference() // // Container-based version of the `std::adjacent_difference()` // function to compute the difference between each element and the one preceding // it and write it to an iterator. template OutputIt c_adjacent_difference(const InputSequence& input, OutputIt output_first) { return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); } // Overload of c_adjacent_difference() for using a binary operation other than // subtraction to compute the adjacent difference. template OutputIt c_adjacent_difference(const InputSequence& input, OutputIt output_first, BinaryOp&& op) { return std::adjacent_difference(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); } // c_partial_sum() // // Container-based version of the `std::partial_sum()` function // to compute the partial sum of the elements in a sequence and write them // to an iterator. The partial sum is the sum of all element values so far in // the sequence. template OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first) { return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first); } // Overload of c_partial_sum() for using a binary operation other than addition // to compute the "partial sum". template OutputIt c_partial_sum(const InputSequence& input, OutputIt output_first, BinaryOp&& op) { return std::partial_sum(container_algorithm_internal::c_begin(input), container_algorithm_internal::c_end(input), output_first, std::forward(op)); } ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_ALGORITHM_CONTAINER_H_ abseil-20220623.1/absl/algorithm/container_test.cc000066400000000000000000001076761430371345100216330ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/algorithm/container.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/casts.h" #include "absl/base/macros.h" #include "absl/memory/memory.h" #include "absl/types/span.h" namespace { using ::testing::Each; using ::testing::ElementsAre; using ::testing::Gt; using ::testing::IsNull; using ::testing::Lt; using ::testing::Pointee; using ::testing::Truly; using ::testing::UnorderedElementsAre; // Most of these tests just check that the code compiles, not that it // does the right thing. That's fine since the functions just forward // to the STL implementation. class NonMutatingTest : public testing::Test { protected: std::unordered_set container_ = {1, 2, 3}; std::list sequence_ = {1, 2, 3}; std::vector vector_ = {1, 2, 3}; int array_[3] = {1, 2, 3}; }; struct AccumulateCalls { void operator()(int value) { calls.push_back(value); } std::vector calls; }; bool Predicate(int value) { return value < 3; } bool BinPredicate(int v1, int v2) { return v1 < v2; } bool Equals(int v1, int v2) { return v1 == v2; } bool IsOdd(int x) { return x % 2 != 0; } TEST_F(NonMutatingTest, Distance) { EXPECT_EQ(container_.size(), absl::c_distance(container_)); EXPECT_EQ(sequence_.size(), absl::c_distance(sequence_)); EXPECT_EQ(vector_.size(), absl::c_distance(vector_)); EXPECT_EQ(ABSL_ARRAYSIZE(array_), absl::c_distance(array_)); // Works with a temporary argument. EXPECT_EQ(vector_.size(), absl::c_distance(std::vector(vector_))); } TEST_F(NonMutatingTest, Distance_OverloadedBeginEnd) { // Works with classes which have custom ADL-selected overloads of std::begin // and std::end. std::initializer_list a = {1, 2, 3}; std::valarray b = {1, 2, 3}; EXPECT_EQ(3, absl::c_distance(a)); EXPECT_EQ(3, absl::c_distance(b)); // It is assumed that other c_* functions use the same mechanism for // ADL-selecting begin/end overloads. } TEST_F(NonMutatingTest, ForEach) { AccumulateCalls c = absl::c_for_each(container_, AccumulateCalls()); // Don't rely on the unordered_set's order. std::sort(c.calls.begin(), c.calls.end()); EXPECT_EQ(vector_, c.calls); // Works with temporary container, too. AccumulateCalls c2 = absl::c_for_each(std::unordered_set(container_), AccumulateCalls()); std::sort(c2.calls.begin(), c2.calls.end()); EXPECT_EQ(vector_, c2.calls); } TEST_F(NonMutatingTest, FindReturnsCorrectType) { auto it = absl::c_find(container_, 3); EXPECT_EQ(3, *it); absl::c_find(absl::implicit_cast&>(sequence_), 3); } TEST_F(NonMutatingTest, FindIf) { absl::c_find_if(container_, Predicate); } TEST_F(NonMutatingTest, FindIfNot) { absl::c_find_if_not(container_, Predicate); } TEST_F(NonMutatingTest, FindEnd) { absl::c_find_end(sequence_, vector_); absl::c_find_end(vector_, sequence_); } TEST_F(NonMutatingTest, FindEndWithPredicate) { absl::c_find_end(sequence_, vector_, BinPredicate); absl::c_find_end(vector_, sequence_, BinPredicate); } TEST_F(NonMutatingTest, FindFirstOf) { absl::c_find_first_of(container_, sequence_); absl::c_find_first_of(sequence_, container_); } TEST_F(NonMutatingTest, FindFirstOfWithPredicate) { absl::c_find_first_of(container_, sequence_, BinPredicate); absl::c_find_first_of(sequence_, container_, BinPredicate); } TEST_F(NonMutatingTest, AdjacentFind) { absl::c_adjacent_find(sequence_); } TEST_F(NonMutatingTest, AdjacentFindWithPredicate) { absl::c_adjacent_find(sequence_, BinPredicate); } TEST_F(NonMutatingTest, Count) { EXPECT_EQ(1, absl::c_count(container_, 3)); } TEST_F(NonMutatingTest, CountIf) { EXPECT_EQ(2, absl::c_count_if(container_, Predicate)); const std::unordered_set& const_container = container_; EXPECT_EQ(2, absl::c_count_if(const_container, Predicate)); } TEST_F(NonMutatingTest, Mismatch) { // Testing necessary as absl::c_mismatch executes logic. { auto result = absl::c_mismatch(vector_, sequence_); EXPECT_EQ(result.first, vector_.end()); EXPECT_EQ(result.second, sequence_.end()); } { auto result = absl::c_mismatch(sequence_, vector_); EXPECT_EQ(result.first, sequence_.end()); EXPECT_EQ(result.second, vector_.end()); } sequence_.back() = 5; { auto result = absl::c_mismatch(vector_, sequence_); EXPECT_EQ(result.first, std::prev(vector_.end())); EXPECT_EQ(result.second, std::prev(sequence_.end())); } { auto result = absl::c_mismatch(sequence_, vector_); EXPECT_EQ(result.first, std::prev(sequence_.end())); EXPECT_EQ(result.second, std::prev(vector_.end())); } sequence_.pop_back(); { auto result = absl::c_mismatch(vector_, sequence_); EXPECT_EQ(result.first, std::prev(vector_.end())); EXPECT_EQ(result.second, sequence_.end()); } { auto result = absl::c_mismatch(sequence_, vector_); EXPECT_EQ(result.first, sequence_.end()); EXPECT_EQ(result.second, std::prev(vector_.end())); } { struct NoNotEquals { constexpr bool operator==(NoNotEquals) const { return true; } constexpr bool operator!=(NoNotEquals) const = delete; }; std::vector first; std::list second; // Check this still compiles. absl::c_mismatch(first, second); } } TEST_F(NonMutatingTest, MismatchWithPredicate) { // Testing necessary as absl::c_mismatch executes logic. { auto result = absl::c_mismatch(vector_, sequence_, BinPredicate); EXPECT_EQ(result.first, vector_.begin()); EXPECT_EQ(result.second, sequence_.begin()); } { auto result = absl::c_mismatch(sequence_, vector_, BinPredicate); EXPECT_EQ(result.first, sequence_.begin()); EXPECT_EQ(result.second, vector_.begin()); } sequence_.front() = 0; { auto result = absl::c_mismatch(vector_, sequence_, BinPredicate); EXPECT_EQ(result.first, vector_.begin()); EXPECT_EQ(result.second, sequence_.begin()); } { auto result = absl::c_mismatch(sequence_, vector_, BinPredicate); EXPECT_EQ(result.first, std::next(sequence_.begin())); EXPECT_EQ(result.second, std::next(vector_.begin())); } sequence_.clear(); { auto result = absl::c_mismatch(vector_, sequence_, BinPredicate); EXPECT_EQ(result.first, vector_.begin()); EXPECT_EQ(result.second, sequence_.end()); } { auto result = absl::c_mismatch(sequence_, vector_, BinPredicate); EXPECT_EQ(result.first, sequence_.end()); EXPECT_EQ(result.second, vector_.begin()); } } TEST_F(NonMutatingTest, Equal) { EXPECT_TRUE(absl::c_equal(vector_, sequence_)); EXPECT_TRUE(absl::c_equal(sequence_, vector_)); EXPECT_TRUE(absl::c_equal(sequence_, array_)); EXPECT_TRUE(absl::c_equal(array_, vector_)); // Test that behavior appropriately differs from that of equal(). std::vector vector_plus = {1, 2, 3}; vector_plus.push_back(4); EXPECT_FALSE(absl::c_equal(vector_plus, sequence_)); EXPECT_FALSE(absl::c_equal(sequence_, vector_plus)); EXPECT_FALSE(absl::c_equal(array_, vector_plus)); } TEST_F(NonMutatingTest, EqualWithPredicate) { EXPECT_TRUE(absl::c_equal(vector_, sequence_, Equals)); EXPECT_TRUE(absl::c_equal(sequence_, vector_, Equals)); EXPECT_TRUE(absl::c_equal(array_, sequence_, Equals)); EXPECT_TRUE(absl::c_equal(vector_, array_, Equals)); // Test that behavior appropriately differs from that of equal(). std::vector vector_plus = {1, 2, 3}; vector_plus.push_back(4); EXPECT_FALSE(absl::c_equal(vector_plus, sequence_, Equals)); EXPECT_FALSE(absl::c_equal(sequence_, vector_plus, Equals)); EXPECT_FALSE(absl::c_equal(vector_plus, array_, Equals)); } TEST_F(NonMutatingTest, IsPermutation) { auto vector_permut_ = vector_; std::next_permutation(vector_permut_.begin(), vector_permut_.end()); EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_)); EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_)); // Test that behavior appropriately differs from that of is_permutation(). std::vector vector_plus = {1, 2, 3}; vector_plus.push_back(4); EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_)); EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus)); } TEST_F(NonMutatingTest, IsPermutationWithPredicate) { auto vector_permut_ = vector_; std::next_permutation(vector_permut_.begin(), vector_permut_.end()); EXPECT_TRUE(absl::c_is_permutation(vector_permut_, sequence_, Equals)); EXPECT_TRUE(absl::c_is_permutation(sequence_, vector_permut_, Equals)); // Test that behavior appropriately differs from that of is_permutation(). std::vector vector_plus = {1, 2, 3}; vector_plus.push_back(4); EXPECT_FALSE(absl::c_is_permutation(vector_plus, sequence_, Equals)); EXPECT_FALSE(absl::c_is_permutation(sequence_, vector_plus, Equals)); } TEST_F(NonMutatingTest, Search) { absl::c_search(sequence_, vector_); absl::c_search(vector_, sequence_); absl::c_search(array_, sequence_); } TEST_F(NonMutatingTest, SearchWithPredicate) { absl::c_search(sequence_, vector_, BinPredicate); absl::c_search(vector_, sequence_, BinPredicate); } TEST_F(NonMutatingTest, SearchN) { absl::c_search_n(sequence_, 3, 1); } TEST_F(NonMutatingTest, SearchNWithPredicate) { absl::c_search_n(sequence_, 3, 1, BinPredicate); } TEST_F(NonMutatingTest, LowerBound) { std::list::iterator i = absl::c_lower_bound(sequence_, 3); ASSERT_TRUE(i != sequence_.end()); EXPECT_EQ(2, std::distance(sequence_.begin(), i)); EXPECT_EQ(3, *i); } TEST_F(NonMutatingTest, LowerBoundWithPredicate) { std::vector v(vector_); std::sort(v.begin(), v.end(), std::greater()); std::vector::iterator i = absl::c_lower_bound(v, 3, std::greater()); EXPECT_TRUE(i == v.begin()); EXPECT_EQ(3, *i); } TEST_F(NonMutatingTest, UpperBound) { std::list::iterator i = absl::c_upper_bound(sequence_, 1); ASSERT_TRUE(i != sequence_.end()); EXPECT_EQ(1, std::distance(sequence_.begin(), i)); EXPECT_EQ(2, *i); } TEST_F(NonMutatingTest, UpperBoundWithPredicate) { std::vector v(vector_); std::sort(v.begin(), v.end(), std::greater()); std::vector::iterator i = absl::c_upper_bound(v, 1, std::greater()); EXPECT_EQ(3, i - v.begin()); EXPECT_TRUE(i == v.end()); } TEST_F(NonMutatingTest, EqualRange) { std::pair::iterator, std::list::iterator> p = absl::c_equal_range(sequence_, 2); EXPECT_EQ(1, std::distance(sequence_.begin(), p.first)); EXPECT_EQ(2, std::distance(sequence_.begin(), p.second)); } TEST_F(NonMutatingTest, EqualRangeArray) { auto p = absl::c_equal_range(array_, 2); EXPECT_EQ(1, std::distance(std::begin(array_), p.first)); EXPECT_EQ(2, std::distance(std::begin(array_), p.second)); } TEST_F(NonMutatingTest, EqualRangeWithPredicate) { std::vector v(vector_); std::sort(v.begin(), v.end(), std::greater()); std::pair::iterator, std::vector::iterator> p = absl::c_equal_range(v, 2, std::greater()); EXPECT_EQ(1, std::distance(v.begin(), p.first)); EXPECT_EQ(2, std::distance(v.begin(), p.second)); } TEST_F(NonMutatingTest, BinarySearch) { EXPECT_TRUE(absl::c_binary_search(vector_, 2)); EXPECT_TRUE(absl::c_binary_search(std::vector(vector_), 2)); } TEST_F(NonMutatingTest, BinarySearchWithPredicate) { std::vector v(vector_); std::sort(v.begin(), v.end(), std::greater()); EXPECT_TRUE(absl::c_binary_search(v, 2, std::greater())); EXPECT_TRUE( absl::c_binary_search(std::vector(v), 2, std::greater())); } TEST_F(NonMutatingTest, MinElement) { std::list::iterator i = absl::c_min_element(sequence_); ASSERT_TRUE(i != sequence_.end()); EXPECT_EQ(*i, 1); } TEST_F(NonMutatingTest, MinElementWithPredicate) { std::list::iterator i = absl::c_min_element(sequence_, std::greater()); ASSERT_TRUE(i != sequence_.end()); EXPECT_EQ(*i, 3); } TEST_F(NonMutatingTest, MaxElement) { std::list::iterator i = absl::c_max_element(sequence_); ASSERT_TRUE(i != sequence_.end()); EXPECT_EQ(*i, 3); } TEST_F(NonMutatingTest, MaxElementWithPredicate) { std::list::iterator i = absl::c_max_element(sequence_, std::greater()); ASSERT_TRUE(i != sequence_.end()); EXPECT_EQ(*i, 1); } TEST_F(NonMutatingTest, LexicographicalCompare) { EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_)); std::vector v; v.push_back(1); v.push_back(2); v.push_back(4); EXPECT_TRUE(absl::c_lexicographical_compare(sequence_, v)); EXPECT_TRUE(absl::c_lexicographical_compare(std::list(sequence_), v)); } TEST_F(NonMutatingTest, LexicographicalCopmareWithPredicate) { EXPECT_FALSE(absl::c_lexicographical_compare(sequence_, sequence_, std::greater())); std::vector v; v.push_back(1); v.push_back(2); v.push_back(4); EXPECT_TRUE( absl::c_lexicographical_compare(v, sequence_, std::greater())); EXPECT_TRUE(absl::c_lexicographical_compare( std::vector(v), std::list(sequence_), std::greater())); } TEST_F(NonMutatingTest, Includes) { std::set s(vector_.begin(), vector_.end()); s.insert(4); EXPECT_TRUE(absl::c_includes(s, vector_)); } TEST_F(NonMutatingTest, IncludesWithPredicate) { std::vector v = {3, 2, 1}; std::set> s(v.begin(), v.end()); s.insert(4); EXPECT_TRUE(absl::c_includes(s, v, std::greater())); } class NumericMutatingTest : public testing::Test { protected: std::list list_ = {1, 2, 3}; std::vector output_; }; TEST_F(NumericMutatingTest, Iota) { absl::c_iota(list_, 5); std::list expected{5, 6, 7}; EXPECT_EQ(list_, expected); } TEST_F(NonMutatingTest, Accumulate) { EXPECT_EQ(absl::c_accumulate(sequence_, 4), 1 + 2 + 3 + 4); } TEST_F(NonMutatingTest, AccumulateWithBinaryOp) { EXPECT_EQ(absl::c_accumulate(sequence_, 4, std::multiplies()), 1 * 2 * 3 * 4); } TEST_F(NonMutatingTest, AccumulateLvalueInit) { int lvalue = 4; EXPECT_EQ(absl::c_accumulate(sequence_, lvalue), 1 + 2 + 3 + 4); } TEST_F(NonMutatingTest, AccumulateWithBinaryOpLvalueInit) { int lvalue = 4; EXPECT_EQ(absl::c_accumulate(sequence_, lvalue, std::multiplies()), 1 * 2 * 3 * 4); } TEST_F(NonMutatingTest, InnerProduct) { EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 1000), 1000 + 1 * 1 + 2 * 2 + 3 * 3); } TEST_F(NonMutatingTest, InnerProductWithBinaryOps) { EXPECT_EQ(absl::c_inner_product(sequence_, vector_, 10, std::multiplies(), std::plus()), 10 * (1 + 1) * (2 + 2) * (3 + 3)); } TEST_F(NonMutatingTest, InnerProductLvalueInit) { int lvalue = 1000; EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue), 1000 + 1 * 1 + 2 * 2 + 3 * 3); } TEST_F(NonMutatingTest, InnerProductWithBinaryOpsLvalueInit) { int lvalue = 10; EXPECT_EQ(absl::c_inner_product(sequence_, vector_, lvalue, std::multiplies(), std::plus()), 10 * (1 + 1) * (2 + 2) * (3 + 3)); } TEST_F(NumericMutatingTest, AdjacentDifference) { auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_)); *last = 1000; std::vector expected{1, 2 - 1, 3 - 2, 1000}; EXPECT_EQ(output_, expected); } TEST_F(NumericMutatingTest, AdjacentDifferenceWithBinaryOp) { auto last = absl::c_adjacent_difference(list_, std::back_inserter(output_), std::multiplies()); *last = 1000; std::vector expected{1, 2 * 1, 3 * 2, 1000}; EXPECT_EQ(output_, expected); } TEST_F(NumericMutatingTest, PartialSum) { auto last = absl::c_partial_sum(list_, std::back_inserter(output_)); *last = 1000; std::vector expected{1, 1 + 2, 1 + 2 + 3, 1000}; EXPECT_EQ(output_, expected); } TEST_F(NumericMutatingTest, PartialSumWithBinaryOp) { auto last = absl::c_partial_sum(list_, std::back_inserter(output_), std::multiplies()); *last = 1000; std::vector expected{1, 1 * 2, 1 * 2 * 3, 1000}; EXPECT_EQ(output_, expected); } TEST_F(NonMutatingTest, LinearSearch) { EXPECT_TRUE(absl::c_linear_search(container_, 3)); EXPECT_FALSE(absl::c_linear_search(container_, 4)); } TEST_F(NonMutatingTest, AllOf) { const std::vector& v = vector_; EXPECT_FALSE(absl::c_all_of(v, [](int x) { return x > 1; })); EXPECT_TRUE(absl::c_all_of(v, [](int x) { return x > 0; })); } TEST_F(NonMutatingTest, AnyOf) { const std::vector& v = vector_; EXPECT_TRUE(absl::c_any_of(v, [](int x) { return x > 2; })); EXPECT_FALSE(absl::c_any_of(v, [](int x) { return x > 5; })); } TEST_F(NonMutatingTest, NoneOf) { const std::vector& v = vector_; EXPECT_FALSE(absl::c_none_of(v, [](int x) { return x > 2; })); EXPECT_TRUE(absl::c_none_of(v, [](int x) { return x > 5; })); } TEST_F(NonMutatingTest, MinMaxElementLess) { std::pair::const_iterator, std::vector::const_iterator> p = absl::c_minmax_element(vector_, std::less()); EXPECT_TRUE(p.first == vector_.begin()); EXPECT_TRUE(p.second == vector_.begin() + 2); } TEST_F(NonMutatingTest, MinMaxElementGreater) { std::pair::const_iterator, std::vector::const_iterator> p = absl::c_minmax_element(vector_, std::greater()); EXPECT_TRUE(p.first == vector_.begin() + 2); EXPECT_TRUE(p.second == vector_.begin()); } TEST_F(NonMutatingTest, MinMaxElementNoPredicate) { std::pair::const_iterator, std::vector::const_iterator> p = absl::c_minmax_element(vector_); EXPECT_TRUE(p.first == vector_.begin()); EXPECT_TRUE(p.second == vector_.begin() + 2); } class SortingTest : public testing::Test { protected: std::list sorted_ = {1, 2, 3, 4}; std::list unsorted_ = {2, 4, 1, 3}; std::list reversed_ = {4, 3, 2, 1}; }; TEST_F(SortingTest, IsSorted) { EXPECT_TRUE(absl::c_is_sorted(sorted_)); EXPECT_FALSE(absl::c_is_sorted(unsorted_)); EXPECT_FALSE(absl::c_is_sorted(reversed_)); } TEST_F(SortingTest, IsSortedWithPredicate) { EXPECT_FALSE(absl::c_is_sorted(sorted_, std::greater())); EXPECT_FALSE(absl::c_is_sorted(unsorted_, std::greater())); EXPECT_TRUE(absl::c_is_sorted(reversed_, std::greater())); } TEST_F(SortingTest, IsSortedUntil) { EXPECT_EQ(1, *absl::c_is_sorted_until(unsorted_)); EXPECT_EQ(4, *absl::c_is_sorted_until(unsorted_, std::greater())); } TEST_F(SortingTest, NthElement) { std::vector unsorted = {2, 4, 1, 3}; absl::c_nth_element(unsorted, unsorted.begin() + 2); EXPECT_THAT(unsorted, ElementsAre(Lt(3), Lt(3), 3, Gt(3))); absl::c_nth_element(unsorted, unsorted.begin() + 2, std::greater()); EXPECT_THAT(unsorted, ElementsAre(Gt(2), Gt(2), 2, Lt(2))); } TEST(MutatingTest, IsPartitioned) { EXPECT_TRUE( absl::c_is_partitioned(std::vector{1, 3, 5, 2, 4, 6}, IsOdd)); EXPECT_FALSE( absl::c_is_partitioned(std::vector{1, 2, 3, 4, 5, 6}, IsOdd)); EXPECT_FALSE( absl::c_is_partitioned(std::vector{2, 4, 6, 1, 3, 5}, IsOdd)); } TEST(MutatingTest, Partition) { std::vector actual = {1, 2, 3, 4, 5}; absl::c_partition(actual, IsOdd); EXPECT_THAT(actual, Truly([](const std::vector& c) { return absl::c_is_partitioned(c, IsOdd); })); } TEST(MutatingTest, StablePartition) { std::vector actual = {1, 2, 3, 4, 5}; absl::c_stable_partition(actual, IsOdd); EXPECT_THAT(actual, ElementsAre(1, 3, 5, 2, 4)); } TEST(MutatingTest, PartitionCopy) { const std::vector initial = {1, 2, 3, 4, 5}; std::vector odds, evens; auto ends = absl::c_partition_copy(initial, back_inserter(odds), back_inserter(evens), IsOdd); *ends.first = 7; *ends.second = 6; EXPECT_THAT(odds, ElementsAre(1, 3, 5, 7)); EXPECT_THAT(evens, ElementsAre(2, 4, 6)); } TEST(MutatingTest, PartitionPoint) { const std::vector initial = {1, 3, 5, 2, 4}; auto middle = absl::c_partition_point(initial, IsOdd); EXPECT_EQ(2, *middle); } TEST(MutatingTest, CopyMiddle) { const std::vector initial = {4, -1, -2, -3, 5}; const std::list input = {1, 2, 3}; const std::vector expected = {4, 1, 2, 3, 5}; std::list test_list(initial.begin(), initial.end()); absl::c_copy(input, ++test_list.begin()); EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); std::vector test_vector = initial; absl::c_copy(input, test_vector.begin() + 1); EXPECT_EQ(expected, test_vector); } TEST(MutatingTest, CopyFrontInserter) { const std::list initial = {4, 5}; const std::list input = {1, 2, 3}; const std::list expected = {3, 2, 1, 4, 5}; std::list test_list = initial; absl::c_copy(input, std::front_inserter(test_list)); EXPECT_EQ(expected, test_list); } TEST(MutatingTest, CopyBackInserter) { const std::vector initial = {4, 5}; const std::list input = {1, 2, 3}; const std::vector expected = {4, 5, 1, 2, 3}; std::list test_list(initial.begin(), initial.end()); absl::c_copy(input, std::back_inserter(test_list)); EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); std::vector test_vector = initial; absl::c_copy(input, std::back_inserter(test_vector)); EXPECT_EQ(expected, test_vector); } TEST(MutatingTest, CopyN) { const std::vector initial = {1, 2, 3, 4, 5}; const std::vector expected = {1, 2}; std::vector actual; absl::c_copy_n(initial, 2, back_inserter(actual)); EXPECT_EQ(expected, actual); } TEST(MutatingTest, CopyIf) { const std::list input = {1, 2, 3}; std::vector output; absl::c_copy_if(input, std::back_inserter(output), [](int i) { return i != 2; }); EXPECT_THAT(output, ElementsAre(1, 3)); } TEST(MutatingTest, CopyBackward) { std::vector actual = {1, 2, 3, 4, 5}; std::vector expected = {1, 2, 1, 2, 3}; absl::c_copy_backward(absl::MakeSpan(actual.data(), 3), actual.end()); EXPECT_EQ(expected, actual); } TEST(MutatingTest, Move) { std::vector> src; src.emplace_back(absl::make_unique(1)); src.emplace_back(absl::make_unique(2)); src.emplace_back(absl::make_unique(3)); src.emplace_back(absl::make_unique(4)); src.emplace_back(absl::make_unique(5)); std::vector> dest = {}; absl::c_move(src, std::back_inserter(dest)); EXPECT_THAT(src, Each(IsNull())); EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(4), Pointee(5))); } TEST(MutatingTest, MoveBackward) { std::vector> actual; actual.emplace_back(absl::make_unique(1)); actual.emplace_back(absl::make_unique(2)); actual.emplace_back(absl::make_unique(3)); actual.emplace_back(absl::make_unique(4)); actual.emplace_back(absl::make_unique(5)); auto subrange = absl::MakeSpan(actual.data(), 3); absl::c_move_backward(subrange, actual.end()); EXPECT_THAT(actual, ElementsAre(IsNull(), IsNull(), Pointee(1), Pointee(2), Pointee(3))); } TEST(MutatingTest, MoveWithRvalue) { auto MakeRValueSrc = [] { std::vector> src; src.emplace_back(absl::make_unique(1)); src.emplace_back(absl::make_unique(2)); src.emplace_back(absl::make_unique(3)); return src; }; std::vector> dest = MakeRValueSrc(); absl::c_move(MakeRValueSrc(), std::back_inserter(dest)); EXPECT_THAT(dest, ElementsAre(Pointee(1), Pointee(2), Pointee(3), Pointee(1), Pointee(2), Pointee(3))); } TEST(MutatingTest, SwapRanges) { std::vector odds = {2, 4, 6}; std::vector evens = {1, 3, 5}; absl::c_swap_ranges(odds, evens); EXPECT_THAT(odds, ElementsAre(1, 3, 5)); EXPECT_THAT(evens, ElementsAre(2, 4, 6)); odds.pop_back(); absl::c_swap_ranges(odds, evens); EXPECT_THAT(odds, ElementsAre(2, 4)); EXPECT_THAT(evens, ElementsAre(1, 3, 6)); absl::c_swap_ranges(evens, odds); EXPECT_THAT(odds, ElementsAre(1, 3)); EXPECT_THAT(evens, ElementsAre(2, 4, 6)); } TEST_F(NonMutatingTest, Transform) { std::vector x{0, 2, 4}, y, z; auto end = absl::c_transform(x, back_inserter(y), std::negate()); EXPECT_EQ(std::vector({0, -2, -4}), y); *end = 7; EXPECT_EQ(std::vector({0, -2, -4, 7}), y); y = {1, 3, 0}; end = absl::c_transform(x, y, back_inserter(z), std::plus()); EXPECT_EQ(std::vector({1, 5, 4}), z); *end = 7; EXPECT_EQ(std::vector({1, 5, 4, 7}), z); z.clear(); y.pop_back(); end = absl::c_transform(x, y, std::back_inserter(z), std::plus()); EXPECT_EQ(std::vector({1, 5}), z); *end = 7; EXPECT_EQ(std::vector({1, 5, 7}), z); z.clear(); std::swap(x, y); end = absl::c_transform(x, y, std::back_inserter(z), std::plus()); EXPECT_EQ(std::vector({1, 5}), z); *end = 7; EXPECT_EQ(std::vector({1, 5, 7}), z); } TEST(MutatingTest, Replace) { const std::vector initial = {1, 2, 3, 1, 4, 5}; const std::vector expected = {4, 2, 3, 4, 4, 5}; std::vector test_vector = initial; absl::c_replace(test_vector, 1, 4); EXPECT_EQ(expected, test_vector); std::list test_list(initial.begin(), initial.end()); absl::c_replace(test_list, 1, 4); EXPECT_EQ(std::list(expected.begin(), expected.end()), test_list); } TEST(MutatingTest, ReplaceIf) { std::vector actual = {1, 2, 3, 4, 5}; const std::vector expected = {0, 2, 0, 4, 0}; absl::c_replace_if(actual, IsOdd, 0); EXPECT_EQ(expected, actual); } TEST(MutatingTest, ReplaceCopy) { const std::vector initial = {1, 2, 3, 1, 4, 5}; const std::vector expected = {4, 2, 3, 4, 4, 5}; std::vector actual; absl::c_replace_copy(initial, back_inserter(actual), 1, 4); EXPECT_EQ(expected, actual); } TEST(MutatingTest, Sort) { std::vector test_vector = {2, 3, 1, 4}; absl::c_sort(test_vector); EXPECT_THAT(test_vector, ElementsAre(1, 2, 3, 4)); } TEST(MutatingTest, SortWithPredicate) { std::vector test_vector = {2, 3, 1, 4}; absl::c_sort(test_vector, std::greater()); EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1)); } // For absl::c_stable_sort tests. Needs an operator< that does not cover all // fields so that the test can check the sort preserves order of equal elements. struct Element { int key; int value; friend bool operator<(const Element& e1, const Element& e2) { return e1.key < e2.key; } // Make gmock print useful diagnostics. friend std::ostream& operator<<(std::ostream& o, const Element& e) { return o << "{" << e.key << ", " << e.value << "}"; } }; MATCHER_P2(IsElement, key, value, "") { return arg.key == key && arg.value == value; } TEST(MutatingTest, StableSort) { std::vector test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}}; absl::c_stable_sort(test_vector); EXPECT_THAT(test_vector, ElementsAre(IsElement(1, 1), IsElement(1, 0), IsElement(2, 1), IsElement(2, 0), IsElement(2, 2))); } TEST(MutatingTest, StableSortWithPredicate) { std::vector test_vector = {{1, 1}, {2, 1}, {2, 0}, {1, 0}, {2, 2}}; absl::c_stable_sort(test_vector, [](const Element& e1, const Element& e2) { return e2 < e1; }); EXPECT_THAT(test_vector, ElementsAre(IsElement(2, 1), IsElement(2, 0), IsElement(2, 2), IsElement(1, 1), IsElement(1, 0))); } TEST(MutatingTest, ReplaceCopyIf) { const std::vector initial = {1, 2, 3, 4, 5}; const std::vector expected = {0, 2, 0, 4, 0}; std::vector actual; absl::c_replace_copy_if(initial, back_inserter(actual), IsOdd, 0); EXPECT_EQ(expected, actual); } TEST(MutatingTest, Fill) { std::vector actual(5); absl::c_fill(actual, 1); EXPECT_THAT(actual, ElementsAre(1, 1, 1, 1, 1)); } TEST(MutatingTest, FillN) { std::vector actual(5, 0); absl::c_fill_n(actual, 2, 1); EXPECT_THAT(actual, ElementsAre(1, 1, 0, 0, 0)); } TEST(MutatingTest, Generate) { std::vector actual(5); int x = 0; absl::c_generate(actual, [&x]() { return ++x; }); EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); } TEST(MutatingTest, GenerateN) { std::vector actual(5, 0); int x = 0; absl::c_generate_n(actual, 3, [&x]() { return ++x; }); EXPECT_THAT(actual, ElementsAre(1, 2, 3, 0, 0)); } TEST(MutatingTest, RemoveCopy) { std::vector actual; absl::c_remove_copy(std::vector{1, 2, 3}, back_inserter(actual), 2); EXPECT_THAT(actual, ElementsAre(1, 3)); } TEST(MutatingTest, RemoveCopyIf) { std::vector actual; absl::c_remove_copy_if(std::vector{1, 2, 3}, back_inserter(actual), IsOdd); EXPECT_THAT(actual, ElementsAre(2)); } TEST(MutatingTest, UniqueCopy) { std::vector actual; absl::c_unique_copy(std::vector{1, 2, 2, 2, 3, 3, 2}, back_inserter(actual)); EXPECT_THAT(actual, ElementsAre(1, 2, 3, 2)); } TEST(MutatingTest, UniqueCopyWithPredicate) { std::vector actual; absl::c_unique_copy(std::vector{1, 2, 3, -1, -2, -3, 1}, back_inserter(actual), [](int x, int y) { return (x < 0) == (y < 0); }); EXPECT_THAT(actual, ElementsAre(1, -1, 1)); } TEST(MutatingTest, Reverse) { std::vector test_vector = {1, 2, 3, 4}; absl::c_reverse(test_vector); EXPECT_THAT(test_vector, ElementsAre(4, 3, 2, 1)); std::list test_list = {1, 2, 3, 4}; absl::c_reverse(test_list); EXPECT_THAT(test_list, ElementsAre(4, 3, 2, 1)); } TEST(MutatingTest, ReverseCopy) { std::vector actual; absl::c_reverse_copy(std::vector{1, 2, 3, 4}, back_inserter(actual)); EXPECT_THAT(actual, ElementsAre(4, 3, 2, 1)); } TEST(MutatingTest, Rotate) { std::vector actual = {1, 2, 3, 4}; auto it = absl::c_rotate(actual, actual.begin() + 2); EXPECT_THAT(actual, testing::ElementsAreArray({3, 4, 1, 2})); EXPECT_EQ(*it, 1); } TEST(MutatingTest, RotateCopy) { std::vector initial = {1, 2, 3, 4}; std::vector actual; auto end = absl::c_rotate_copy(initial, initial.begin() + 2, back_inserter(actual)); *end = 5; EXPECT_THAT(actual, ElementsAre(3, 4, 1, 2, 5)); } TEST(MutatingTest, Shuffle) { std::vector actual = {1, 2, 3, 4, 5}; absl::c_shuffle(actual, std::random_device()); EXPECT_THAT(actual, UnorderedElementsAre(1, 2, 3, 4, 5)); } TEST(MutatingTest, PartialSort) { std::vector sequence{5, 3, 42, 0}; absl::c_partial_sort(sequence, sequence.begin() + 2); EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(0, 3)); absl::c_partial_sort(sequence, sequence.begin() + 2, std::greater()); EXPECT_THAT(absl::MakeSpan(sequence.data(), 2), ElementsAre(42, 5)); } TEST(MutatingTest, PartialSortCopy) { const std::vector initial = {5, 3, 42, 0}; std::vector actual(2); absl::c_partial_sort_copy(initial, actual); EXPECT_THAT(actual, ElementsAre(0, 3)); absl::c_partial_sort_copy(initial, actual, std::greater()); EXPECT_THAT(actual, ElementsAre(42, 5)); } TEST(MutatingTest, Merge) { std::vector actual; absl::c_merge(std::vector{1, 3, 5}, std::vector{2, 4}, back_inserter(actual)); EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); } TEST(MutatingTest, MergeWithComparator) { std::vector actual; absl::c_merge(std::vector{5, 3, 1}, std::vector{4, 2}, back_inserter(actual), std::greater()); EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1)); } TEST(MutatingTest, InplaceMerge) { std::vector actual = {1, 3, 5, 2, 4}; absl::c_inplace_merge(actual, actual.begin() + 3); EXPECT_THAT(actual, ElementsAre(1, 2, 3, 4, 5)); } TEST(MutatingTest, InplaceMergeWithComparator) { std::vector actual = {5, 3, 1, 4, 2}; absl::c_inplace_merge(actual, actual.begin() + 3, std::greater()); EXPECT_THAT(actual, ElementsAre(5, 4, 3, 2, 1)); } class SetOperationsTest : public testing::Test { protected: std::vector a_ = {1, 2, 3}; std::vector b_ = {1, 3, 5}; std::vector a_reversed_ = {3, 2, 1}; std::vector b_reversed_ = {5, 3, 1}; }; TEST_F(SetOperationsTest, SetUnion) { std::vector actual; absl::c_set_union(a_, b_, back_inserter(actual)); EXPECT_THAT(actual, ElementsAre(1, 2, 3, 5)); } TEST_F(SetOperationsTest, SetUnionWithComparator) { std::vector actual; absl::c_set_union(a_reversed_, b_reversed_, back_inserter(actual), std::greater()); EXPECT_THAT(actual, ElementsAre(5, 3, 2, 1)); } TEST_F(SetOperationsTest, SetIntersection) { std::vector actual; absl::c_set_intersection(a_, b_, back_inserter(actual)); EXPECT_THAT(actual, ElementsAre(1, 3)); } TEST_F(SetOperationsTest, SetIntersectionWithComparator) { std::vector actual; absl::c_set_intersection(a_reversed_, b_reversed_, back_inserter(actual), std::greater()); EXPECT_THAT(actual, ElementsAre(3, 1)); } TEST_F(SetOperationsTest, SetDifference) { std::vector actual; absl::c_set_difference(a_, b_, back_inserter(actual)); EXPECT_THAT(actual, ElementsAre(2)); } TEST_F(SetOperationsTest, SetDifferenceWithComparator) { std::vector actual; absl::c_set_difference(a_reversed_, b_reversed_, back_inserter(actual), std::greater()); EXPECT_THAT(actual, ElementsAre(2)); } TEST_F(SetOperationsTest, SetSymmetricDifference) { std::vector actual; absl::c_set_symmetric_difference(a_, b_, back_inserter(actual)); EXPECT_THAT(actual, ElementsAre(2, 5)); } TEST_F(SetOperationsTest, SetSymmetricDifferenceWithComparator) { std::vector actual; absl::c_set_symmetric_difference(a_reversed_, b_reversed_, back_inserter(actual), std::greater()); EXPECT_THAT(actual, ElementsAre(5, 2)); } TEST(HeapOperationsTest, WithoutComparator) { std::vector heap = {1, 2, 3}; EXPECT_FALSE(absl::c_is_heap(heap)); absl::c_make_heap(heap); EXPECT_TRUE(absl::c_is_heap(heap)); heap.push_back(4); EXPECT_EQ(3, absl::c_is_heap_until(heap) - heap.begin()); absl::c_push_heap(heap); EXPECT_EQ(4, heap[0]); absl::c_pop_heap(heap); EXPECT_EQ(4, heap[3]); absl::c_make_heap(heap); absl::c_sort_heap(heap); EXPECT_THAT(heap, ElementsAre(1, 2, 3, 4)); EXPECT_FALSE(absl::c_is_heap(heap)); } TEST(HeapOperationsTest, WithComparator) { using greater = std::greater; std::vector heap = {3, 2, 1}; EXPECT_FALSE(absl::c_is_heap(heap, greater())); absl::c_make_heap(heap, greater()); EXPECT_TRUE(absl::c_is_heap(heap, greater())); heap.push_back(0); EXPECT_EQ(3, absl::c_is_heap_until(heap, greater()) - heap.begin()); absl::c_push_heap(heap, greater()); EXPECT_EQ(0, heap[0]); absl::c_pop_heap(heap, greater()); EXPECT_EQ(0, heap[3]); absl::c_make_heap(heap, greater()); absl::c_sort_heap(heap, greater()); EXPECT_THAT(heap, ElementsAre(3, 2, 1, 0)); EXPECT_FALSE(absl::c_is_heap(heap, greater())); } TEST(MutatingTest, PermutationOperations) { std::vector initial = {1, 2, 3, 4}; std::vector permuted = initial; absl::c_next_permutation(permuted); EXPECT_TRUE(absl::c_is_permutation(initial, permuted)); EXPECT_TRUE(absl::c_is_permutation(initial, permuted, std::equal_to())); std::vector permuted2 = initial; absl::c_prev_permutation(permuted2, std::greater()); EXPECT_EQ(permuted, permuted2); absl::c_prev_permutation(permuted); EXPECT_EQ(initial, permuted); } } // namespace abseil-20220623.1/absl/algorithm/equal_benchmark.cc000066400000000000000000000113551430371345100217170ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include "benchmark/benchmark.h" #include "absl/algorithm/algorithm.h" namespace { // The range of sequence sizes to benchmark. constexpr int kMinBenchmarkSize = 1024; constexpr int kMaxBenchmarkSize = 8 * 1024 * 1024; // A user-defined type for use in equality benchmarks. Note that we expect // std::memcmp to win for this type: libstdc++'s std::equal only defers to // memcmp for integral types. This is because it is not straightforward to // guarantee that std::memcmp would produce a result "as-if" compared by // operator== for other types (example gotchas: NaN floats, structs with // padding). struct EightBits { explicit EightBits(int /* unused */) : data(0) {} bool operator==(const EightBits& rhs) const { return data == rhs.data; } uint8_t data; }; template void BM_absl_equal_benchmark(benchmark::State& state) { std::vector xs(state.range(0), T(0)); std::vector ys = xs; while (state.KeepRunning()) { const bool same = absl::equal(xs.begin(), xs.end(), ys.begin(), ys.end()); benchmark::DoNotOptimize(same); } } template void BM_std_equal_benchmark(benchmark::State& state) { std::vector xs(state.range(0), T(0)); std::vector ys = xs; while (state.KeepRunning()) { const bool same = std::equal(xs.begin(), xs.end(), ys.begin()); benchmark::DoNotOptimize(same); } } template void BM_memcmp_benchmark(benchmark::State& state) { std::vector xs(state.range(0), T(0)); std::vector ys = xs; while (state.KeepRunning()) { const bool same = std::memcmp(xs.data(), ys.data(), xs.size() * sizeof(T)) == 0; benchmark::DoNotOptimize(same); } } // The expectation is that the compiler should be able to elide the equality // comparison altogether for sufficiently simple types. template void BM_absl_equal_self_benchmark(benchmark::State& state) { std::vector xs(state.range(0), T(0)); while (state.KeepRunning()) { const bool same = absl::equal(xs.begin(), xs.end(), xs.begin(), xs.end()); benchmark::DoNotOptimize(same); } } BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint8_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint8_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint8_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint8_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint16_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint16_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint16_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint16_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint32_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint32_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint32_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint32_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, uint64_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_std_equal_benchmark, uint64_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_memcmp_benchmark, uint64_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, uint64_t) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_absl_equal_benchmark, EightBits) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_std_equal_benchmark, EightBits) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_memcmp_benchmark, EightBits) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); BENCHMARK_TEMPLATE(BM_absl_equal_self_benchmark, EightBits) ->Range(kMinBenchmarkSize, kMaxBenchmarkSize); } // namespace abseil-20220623.1/absl/base/000077500000000000000000000000001430371345100152065ustar00rootroot00000000000000abseil-20220623.1/absl/base/BUILD.bazel000066400000000000000000000416071430371345100170740ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", "ABSL_DEFAULT_LINKOPTS", "ABSL_TEST_COPTS", ) package(default_visibility = ["//visibility:public"]) licenses(["notice"]) cc_library( name = "atomic_hook", hdrs = ["internal/atomic_hook.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":config", ":core_headers", ], ) cc_library( name = "errno_saver", hdrs = ["internal/errno_saver.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [":config"], ) cc_library( name = "log_severity", srcs = ["log_severity.cc"], hdrs = ["log_severity.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":config", ":core_headers", ], ) cc_library( name = "raw_logging_internal", srcs = ["internal/raw_logging.cc"], hdrs = ["internal/raw_logging.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":atomic_hook", ":config", ":core_headers", ":errno_saver", ":log_severity", ], ) cc_library( name = "spinlock_wait", srcs = [ "internal/spinlock_akaros.inc", "internal/spinlock_linux.inc", "internal/spinlock_posix.inc", "internal/spinlock_wait.cc", "internal/spinlock_win32.inc", ], hdrs = ["internal/spinlock_wait.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl/base:__pkg__", ], deps = [ ":base_internal", ":core_headers", ":errno_saver", ], ) cc_library( name = "config", hdrs = [ "config.h", "options.h", "policy_checks.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, ) cc_library( name = "dynamic_annotations", srcs = [ "internal/dynamic_annotations.h", ], hdrs = [ "dynamic_annotations.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":config", ":core_headers", ], ) cc_library( name = "core_headers", srcs = [ "internal/thread_annotations.h", ], hdrs = [ "attributes.h", "const_init.h", "macros.h", "optimization.h", "port.h", "thread_annotations.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":config", ], ) cc_library( name = "malloc_internal", srcs = [ "internal/low_level_alloc.cc", ], hdrs = [ "internal/direct_mmap.h", "internal/low_level_alloc.h", ], copts = ABSL_DEFAULT_COPTS + select({ "//conditions:default": [], }), linkopts = select({ "//absl:msvc_compiler": [], "//absl:clang-cl_compiler": [], "//absl:wasm": [], "//conditions:default": ["-pthread"], }) + ABSL_DEFAULT_LINKOPTS, visibility = [ "//visibility:public", ], deps = [ ":base", ":base_internal", ":config", ":core_headers", ":dynamic_annotations", ":raw_logging_internal", ], ) cc_library( name = "base_internal", hdrs = [ "internal/hide_ptr.h", "internal/identity.h", "internal/inline_variable.h", "internal/invoke.h", "internal/scheduling_mode.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":config", "//absl/meta:type_traits", ], ) cc_library( name = "base", srcs = [ "internal/cycleclock.cc", "internal/spinlock.cc", "internal/sysinfo.cc", "internal/thread_identity.cc", "internal/unscaledcycleclock.cc", ], hdrs = [ "call_once.h", "casts.h", "internal/cycleclock.h", "internal/low_level_scheduling.h", "internal/per_thread_tls.h", "internal/spinlock.h", "internal/sysinfo.h", "internal/thread_identity.h", "internal/tsan_mutex_interface.h", "internal/unscaledcycleclock.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = select({ "//absl:msvc_compiler": [ "-DEFAULTLIB:advapi32.lib", ], "//absl:clang-cl_compiler": [ "-DEFAULTLIB:advapi32.lib", ], "//absl:wasm": [], "//conditions:default": ["-pthread"], }) + ABSL_DEFAULT_LINKOPTS, deps = [ ":atomic_hook", ":base_internal", ":config", ":core_headers", ":dynamic_annotations", ":log_severity", ":raw_logging_internal", ":spinlock_wait", "//absl/meta:type_traits", ], ) cc_library( name = "atomic_hook_test_helper", testonly = 1, srcs = ["internal/atomic_hook_test_helper.cc"], hdrs = ["internal/atomic_hook_test_helper.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":atomic_hook", ":core_headers", ], ) cc_test( name = "atomic_hook_test", size = "small", srcs = ["internal/atomic_hook_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":atomic_hook", ":atomic_hook_test_helper", ":core_headers", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "bit_cast_test", size = "small", srcs = [ "bit_cast_test.cc", ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":base", ":core_headers", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "throw_delegate", srcs = ["internal/throw_delegate.cc"], hdrs = ["internal/throw_delegate.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":config", ":raw_logging_internal", ], ) cc_test( name = "throw_delegate_test", srcs = ["throw_delegate_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":config", ":throw_delegate", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "errno_saver_test", size = "small", srcs = ["internal/errno_saver_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":errno_saver", ":strerror", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "exception_testing", testonly = 1, hdrs = ["internal/exception_testing.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":config", "@com_google_googletest//:gtest", ], ) cc_library( name = "pretty_function", hdrs = ["internal/pretty_function.h"], linkopts = ABSL_DEFAULT_LINKOPTS, visibility = ["//absl:__subpackages__"], ) cc_library( name = "exception_safety_testing", testonly = 1, srcs = ["internal/exception_safety_testing.cc"], hdrs = ["internal/exception_safety_testing.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":config", ":pretty_function", "//absl/memory", "//absl/meta:type_traits", "//absl/strings", "//absl/utility", "@com_google_googletest//:gtest", ], ) cc_test( name = "exception_safety_testing_test", srcs = ["exception_safety_testing_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":exception_safety_testing", "//absl/memory", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "inline_variable_test", size = "small", srcs = [ "inline_variable_test.cc", "inline_variable_test_a.cc", "inline_variable_test_b.cc", "internal/inline_variable_testing.h", ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":base_internal", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "invoke_test", size = "small", srcs = ["invoke_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":base_internal", "//absl/memory", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) # Common test library made available for use in non-absl code that overrides # AbslInternalSpinLockDelay and AbslInternalSpinLockWake. cc_library( name = "spinlock_test_common", testonly = 1, srcs = ["spinlock_test_common.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":base", ":base_internal", ":config", ":core_headers", "//absl/synchronization", "@com_google_googletest//:gtest", ], alwayslink = 1, ) cc_test( name = "spinlock_test", size = "medium", srcs = ["spinlock_test_common.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = [ "no_test_wasm", ], deps = [ ":base", ":base_internal", ":config", ":core_headers", "//absl/synchronization", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "spinlock_benchmark_common", testonly = 1, srcs = ["internal/spinlock_benchmark.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl/base:__pkg__", ], deps = [ ":base", ":base_internal", ":raw_logging_internal", "//absl/synchronization", "@com_github_google_benchmark//:benchmark_main", ], alwayslink = 1, ) cc_binary( name = "spinlock_benchmark", testonly = 1, copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], visibility = ["//visibility:private"], deps = [ ":spinlock_benchmark_common", ], ) cc_library( name = "endian", hdrs = [ "internal/endian.h", "internal/unaligned_access.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":base", ":config", ":core_headers", ], ) cc_test( name = "endian_test", srcs = ["internal/endian_test.cc"], copts = ABSL_TEST_COPTS, deps = [ ":config", ":endian", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "config_test", srcs = ["config_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":config", "//absl/synchronization:thread_pool", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "call_once_test", srcs = ["call_once_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":base", ":core_headers", "//absl/synchronization", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "raw_logging_test", srcs = ["raw_logging_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":raw_logging_internal", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "sysinfo_test", size = "small", srcs = ["internal/sysinfo_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":base", "//absl/synchronization", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "low_level_alloc_test", size = "medium", srcs = ["internal/low_level_alloc_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = [ "no_test_ios_x86_64", "no_test_wasm", ], deps = [ ":malloc_internal", "//absl/container:node_hash_map", ], ) cc_test( name = "thread_identity_test", size = "small", srcs = ["internal/thread_identity_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = [ "no_test_wasm", ], deps = [ ":base", ":core_headers", "//absl/synchronization", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "thread_identity_benchmark", srcs = ["internal/thread_identity_benchmark.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], visibility = ["//visibility:private"], deps = [ ":base", "//absl/synchronization", "@com_github_google_benchmark//:benchmark_main", ], ) cc_library( name = "scoped_set_env", testonly = 1, srcs = ["internal/scoped_set_env.cc"], hdrs = ["internal/scoped_set_env.h"], linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":config", ":raw_logging_internal", ], ) cc_test( name = "scoped_set_env_test", size = "small", srcs = ["internal/scoped_set_env_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":scoped_set_env", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "log_severity_test", size = "small", srcs = ["log_severity_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":log_severity", "//absl/flags:flag_internal", "//absl/flags:marshalling", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "strerror", srcs = ["internal/strerror.cc"], hdrs = ["internal/strerror.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":config", ":core_headers", ":errno_saver", ], ) cc_test( name = "strerror_test", size = "small", srcs = ["internal/strerror_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":strerror", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) cc_binary( name = "strerror_benchmark", testonly = 1, srcs = ["internal/strerror_benchmark.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], visibility = ["//visibility:private"], deps = [ ":strerror", "@com_github_google_benchmark//:benchmark_main", ], ) cc_library( name = "fast_type_id", hdrs = ["internal/fast_type_id.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":config", ], ) cc_test( name = "fast_type_id_test", size = "small", srcs = ["internal/fast_type_id_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":fast_type_id", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "prefetch", hdrs = ["internal/prefetch.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = [ ":config", ], ) cc_test( name = "prefetch_test", size = "small", srcs = ["internal/prefetch_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":prefetch", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "unique_small_name_test", size = "small", srcs = ["internal/unique_small_name_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, linkstatic = 1, deps = [ ":core_headers", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "optimization_test", size = "small", srcs = ["optimization_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":core_headers", "//absl/types:optional", "@com_google_googletest//:gtest_main", ], ) abseil-20220623.1/absl/base/CMakeLists.txt000066400000000000000000000263351430371345100177570ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # find_library(LIBRT rt) # Internal-only target, do not depend on directly. absl_cc_library( NAME atomic_hook HDRS "internal/atomic_hook.h" DEPS absl::config absl::core_headers COPTS ${ABSL_DEFAULT_COPTS} ) # Internal-only target, do not depend on directly. absl_cc_library( NAME errno_saver HDRS "internal/errno_saver.h" DEPS absl::config COPTS ${ABSL_DEFAULT_COPTS} ) absl_cc_library( NAME log_severity HDRS "log_severity.h" SRCS "log_severity.cc" DEPS absl::core_headers COPTS ${ABSL_DEFAULT_COPTS} ) # Internal-only target, do not depend on directly. absl_cc_library( NAME raw_logging_internal HDRS "internal/raw_logging.h" SRCS "internal/raw_logging.cc" DEPS absl::atomic_hook absl::config absl::core_headers absl::errno_saver absl::log_severity COPTS ${ABSL_DEFAULT_COPTS} ) # Internal-only target, do not depend on directly. absl_cc_library( NAME spinlock_wait HDRS "internal/spinlock_wait.h" SRCS "internal/spinlock_akaros.inc" "internal/spinlock_linux.inc" "internal/spinlock_posix.inc" "internal/spinlock_wait.cc" "internal/spinlock_win32.inc" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::base_internal absl::core_headers absl::errno_saver ) absl_cc_library( NAME config HDRS "config.h" "options.h" "policy_checks.h" COPTS ${ABSL_DEFAULT_COPTS} PUBLIC ) absl_cc_library( NAME dynamic_annotations HDRS "dynamic_annotations.h" SRCS "internal/dynamic_annotations.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config PUBLIC ) absl_cc_library( NAME core_headers HDRS "attributes.h" "const_init.h" "macros.h" "optimization.h" "port.h" "thread_annotations.h" "internal/thread_annotations.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config PUBLIC ) # Internal-only target, do not depend on directly. absl_cc_library( NAME malloc_internal HDRS "internal/direct_mmap.h" "internal/low_level_alloc.h" SRCS "internal/low_level_alloc.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::base absl::base_internal absl::config absl::core_headers absl::dynamic_annotations absl::raw_logging_internal Threads::Threads ) # Internal-only target, do not depend on directly. absl_cc_library( NAME base_internal HDRS "internal/hide_ptr.h" "internal/identity.h" "internal/inline_variable.h" "internal/invoke.h" "internal/scheduling_mode.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config absl::type_traits ) absl_cc_library( NAME base HDRS "call_once.h" "casts.h" "internal/cycleclock.h" "internal/low_level_scheduling.h" "internal/per_thread_tls.h" "internal/spinlock.h" "internal/sysinfo.h" "internal/thread_identity.h" "internal/tsan_mutex_interface.h" "internal/unscaledcycleclock.h" SRCS "internal/cycleclock.cc" "internal/spinlock.cc" "internal/sysinfo.cc" "internal/thread_identity.cc" "internal/unscaledcycleclock.cc" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} $<$:-lrt> $<$:"advapi32"> DEPS absl::atomic_hook absl::base_internal absl::config absl::core_headers absl::dynamic_annotations absl::log_severity absl::raw_logging_internal absl::spinlock_wait absl::type_traits Threads::Threads PUBLIC ) # Internal-only target, do not depend on directly. absl_cc_library( NAME throw_delegate HDRS "internal/throw_delegate.h" SRCS "internal/throw_delegate.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config absl::raw_logging_internal ) # Internal-only target, do not depend on directly. absl_cc_library( NAME exception_testing HDRS "internal/exception_testing.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config GTest::gtest TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME pretty_function HDRS "internal/pretty_function.h" COPTS ${ABSL_DEFAULT_COPTS} ) # Internal-only target, do not depend on directly. absl_cc_library( NAME exception_safety_testing HDRS "internal/exception_safety_testing.h" SRCS "internal/exception_safety_testing.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::config absl::pretty_function absl::memory absl::meta absl::strings absl::utility GTest::gtest TESTONLY ) absl_cc_test( NAME absl_exception_safety_testing_test SRCS "exception_safety_testing_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::exception_safety_testing absl::memory GTest::gtest_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME atomic_hook_test_helper SRCS "internal/atomic_hook_test_helper.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::atomic_hook absl::core_headers TESTONLY ) absl_cc_test( NAME atomic_hook_test SRCS "internal/atomic_hook_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::atomic_hook_test_helper absl::atomic_hook absl::core_headers GTest::gmock GTest::gtest_main ) absl_cc_test( NAME bit_cast_test SRCS "bit_cast_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base absl::core_headers GTest::gtest_main ) absl_cc_test( NAME errno_saver_test SRCS "internal/errno_saver_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::errno_saver absl::strerror GTest::gmock GTest::gtest_main ) absl_cc_test( NAME throw_delegate_test SRCS "throw_delegate_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base absl::config absl::throw_delegate GTest::gtest_main ) absl_cc_test( NAME inline_variable_test SRCS "internal/inline_variable_testing.h" "inline_variable_test.cc" "inline_variable_test_a.cc" "inline_variable_test_b.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base_internal GTest::gtest_main ) absl_cc_test( NAME invoke_test SRCS "invoke_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base_internal absl::memory absl::strings GTest::gmock GTest::gtest_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME spinlock_test_common SRCS "spinlock_test_common.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base absl::config absl::base_internal absl::core_headers absl::synchronization GTest::gtest TESTONLY ) # On bazel BUILD this target use "alwayslink = 1" which is not implemented here absl_cc_test( NAME spinlock_test SRCS "spinlock_test_common.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base absl::base_internal absl::config absl::core_headers absl::synchronization GTest::gtest_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME endian HDRS "internal/endian.h" "internal/unaligned_access.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::base absl::config absl::core_headers PUBLIC ) absl_cc_test( NAME endian_test SRCS "internal/endian_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base absl::config absl::endian GTest::gtest_main ) absl_cc_test( NAME config_test SRCS "config_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::config absl::synchronization GTest::gtest_main ) absl_cc_test( NAME call_once_test SRCS "call_once_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base absl::core_headers absl::synchronization GTest::gtest_main ) absl_cc_test( NAME raw_logging_test SRCS "raw_logging_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::raw_logging_internal absl::strings GTest::gtest_main ) absl_cc_test( NAME sysinfo_test SRCS "internal/sysinfo_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base absl::synchronization GTest::gtest_main ) absl_cc_test( NAME low_level_alloc_test SRCS "internal/low_level_alloc_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::malloc_internal absl::node_hash_map Threads::Threads ) absl_cc_test( NAME thread_identity_test SRCS "internal/thread_identity_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base absl::core_headers absl::synchronization Threads::Threads GTest::gtest_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME scoped_set_env SRCS "internal/scoped_set_env.cc" HDRS "internal/scoped_set_env.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config absl::raw_logging_internal ) absl_cc_test( NAME scoped_set_env_test SRCS "internal/scoped_set_env_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::scoped_set_env GTest::gtest_main ) absl_cc_test( NAME cmake_thread_test SRCS "internal/cmake_thread_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::base ) absl_cc_test( NAME log_severity_test SRCS "log_severity_test.cc" DEPS absl::flags_internal absl::flags_marshalling absl::log_severity absl::strings GTest::gmock GTest::gtest_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME strerror SRCS "internal/strerror.cc" HDRS "internal/strerror.h" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS absl::config absl::core_headers absl::errno_saver ) absl_cc_test( NAME strerror_test SRCS "internal/strerror_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::strerror absl::strings GTest::gmock GTest::gtest_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME fast_type_id HDRS "internal/fast_type_id.h" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS absl::config ) absl_cc_test( NAME fast_type_id_test SRCS "internal/fast_type_id_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::fast_type_id GTest::gtest_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME prefetch HDRS "internal/prefetch.h" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS absl::config ) absl_cc_test( NAME prefetch_test SRCS "internal/prefetch_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::prefetch GTest::gtest_main ) absl_cc_test( NAME optimization_test SRCS "optimization_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::core_headers absl::optional GTest::gtest_main ) abseil-20220623.1/absl/base/attributes.h000066400000000000000000000730621430371345100175550ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // This header file defines macros for declaring attributes for functions, // types, and variables. // // These macros are used within Abseil and allow the compiler to optimize, where // applicable, certain function calls. // // Most macros here are exposing GCC or Clang features, and are stubbed out for // other compilers. // // GCC attributes documentation: // https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Function-Attributes.html // https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Variable-Attributes.html // https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/Type-Attributes.html // // Most attributes in this file are already supported by GCC 4.7. However, some // of them are not supported in older version of Clang. Thus, we check // `__has_attribute()` first. If the check fails, we check if we are on GCC and // assume the attribute exists on GCC (which is verified on GCC 4.7). #ifndef ABSL_BASE_ATTRIBUTES_H_ #define ABSL_BASE_ATTRIBUTES_H_ #include "absl/base/config.h" // ABSL_HAVE_ATTRIBUTE // // A function-like feature checking macro that is a wrapper around // `__has_attribute`, which is defined by GCC 5+ and Clang and evaluates to a // nonzero constant integer if the attribute is supported or 0 if not. // // It evaluates to zero if `__has_attribute` is not defined by the compiler. // // GCC: https://gcc.gnu.org/gcc-5/changes.html // Clang: https://clang.llvm.org/docs/LanguageExtensions.html #ifdef __has_attribute #define ABSL_HAVE_ATTRIBUTE(x) __has_attribute(x) #else #define ABSL_HAVE_ATTRIBUTE(x) 0 #endif // ABSL_HAVE_CPP_ATTRIBUTE // // A function-like feature checking macro that accepts C++11 style attributes. // It's a wrapper around `__has_cpp_attribute`, defined by ISO C++ SD-6 // (https://en.cppreference.com/w/cpp/experimental/feature_test). If we don't // find `__has_cpp_attribute`, will evaluate to 0. #if defined(__cplusplus) && defined(__has_cpp_attribute) // NOTE: requiring __cplusplus above should not be necessary, but // works around https://bugs.llvm.org/show_bug.cgi?id=23435. #define ABSL_HAVE_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) #else #define ABSL_HAVE_CPP_ATTRIBUTE(x) 0 #endif // ----------------------------------------------------------------------------- // Function Attributes // ----------------------------------------------------------------------------- // // GCC: https://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html // Clang: https://clang.llvm.org/docs/AttributeReference.html // ABSL_PRINTF_ATTRIBUTE // ABSL_SCANF_ATTRIBUTE // // Tells the compiler to perform `printf` format string checking if the // compiler supports it; see the 'format' attribute in // . // // Note: As the GCC manual states, "[s]ince non-static C++ methods // have an implicit 'this' argument, the arguments of such methods // should be counted from two, not one." #if ABSL_HAVE_ATTRIBUTE(format) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) \ __attribute__((__format__(__printf__, string_index, first_to_check))) #define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) \ __attribute__((__format__(__scanf__, string_index, first_to_check))) #else #define ABSL_PRINTF_ATTRIBUTE(string_index, first_to_check) #define ABSL_SCANF_ATTRIBUTE(string_index, first_to_check) #endif // ABSL_ATTRIBUTE_ALWAYS_INLINE // ABSL_ATTRIBUTE_NOINLINE // // Forces functions to either inline or not inline. Introduced in gcc 3.1. #if ABSL_HAVE_ATTRIBUTE(always_inline) || \ (defined(__GNUC__) && !defined(__clang__)) #define ABSL_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline)) #define ABSL_HAVE_ATTRIBUTE_ALWAYS_INLINE 1 #else #define ABSL_ATTRIBUTE_ALWAYS_INLINE #endif #if ABSL_HAVE_ATTRIBUTE(noinline) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_ATTRIBUTE_NOINLINE __attribute__((noinline)) #define ABSL_HAVE_ATTRIBUTE_NOINLINE 1 #else #define ABSL_ATTRIBUTE_NOINLINE #endif // ABSL_ATTRIBUTE_NO_TAIL_CALL // // Prevents the compiler from optimizing away stack frames for functions which // end in a call to another function. #if ABSL_HAVE_ATTRIBUTE(disable_tail_calls) #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_ATTRIBUTE_NO_TAIL_CALL __attribute__((disable_tail_calls)) #elif defined(__GNUC__) && !defined(__clang__) && !defined(__e2k__) #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 1 #define ABSL_ATTRIBUTE_NO_TAIL_CALL \ __attribute__((optimize("no-optimize-sibling-calls"))) #else #define ABSL_ATTRIBUTE_NO_TAIL_CALL #define ABSL_HAVE_ATTRIBUTE_NO_TAIL_CALL 0 #endif // ABSL_ATTRIBUTE_WEAK // // Tags a function as weak for the purposes of compilation and linking. // Weak attributes did not work properly in LLVM's Windows backend before // 9.0.0, so disable them there. See https://bugs.llvm.org/show_bug.cgi?id=37598 // for further information. // The MinGW compiler doesn't complain about the weak attribute until the link // step, presumably because Windows doesn't use ELF binaries. #if (ABSL_HAVE_ATTRIBUTE(weak) || \ (defined(__GNUC__) && !defined(__clang__))) && \ (!defined(_WIN32) || (defined(__clang__) && __clang_major__ >= 9)) && \ !defined(__MINGW32__) #undef ABSL_ATTRIBUTE_WEAK #define ABSL_ATTRIBUTE_WEAK __attribute__((weak)) #define ABSL_HAVE_ATTRIBUTE_WEAK 1 #else #define ABSL_ATTRIBUTE_WEAK #define ABSL_HAVE_ATTRIBUTE_WEAK 0 #endif // ABSL_ATTRIBUTE_NONNULL // // Tells the compiler either (a) that a particular function parameter // should be a non-null pointer, or (b) that all pointer arguments should // be non-null. // // Note: As the GCC manual states, "[s]ince non-static C++ methods // have an implicit 'this' argument, the arguments of such methods // should be counted from two, not one." // // Args are indexed starting at 1. // // For non-static class member functions, the implicit `this` argument // is arg 1, and the first explicit argument is arg 2. For static class member // functions, there is no implicit `this`, and the first explicit argument is // arg 1. // // Example: // // /* arg_a cannot be null, but arg_b can */ // void Function(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(1); // // class C { // /* arg_a cannot be null, but arg_b can */ // void Method(void* arg_a, void* arg_b) ABSL_ATTRIBUTE_NONNULL(2); // // /* arg_a cannot be null, but arg_b can */ // static void StaticMethod(void* arg_a, void* arg_b) // ABSL_ATTRIBUTE_NONNULL(1); // }; // // If no arguments are provided, then all pointer arguments should be non-null. // // /* No pointer arguments may be null. */ // void Function(void* arg_a, void* arg_b, int arg_c) ABSL_ATTRIBUTE_NONNULL(); // // NOTE: The GCC nonnull attribute actually accepts a list of arguments, but // ABSL_ATTRIBUTE_NONNULL does not. #if ABSL_HAVE_ATTRIBUTE(nonnull) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_ATTRIBUTE_NONNULL(arg_index) __attribute__((nonnull(arg_index))) #else #define ABSL_ATTRIBUTE_NONNULL(...) #endif // ABSL_ATTRIBUTE_NORETURN // // Tells the compiler that a given function never returns. #if ABSL_HAVE_ATTRIBUTE(noreturn) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_ATTRIBUTE_NORETURN __attribute__((noreturn)) #elif defined(_MSC_VER) #define ABSL_ATTRIBUTE_NORETURN __declspec(noreturn) #else #define ABSL_ATTRIBUTE_NORETURN #endif // ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS // // Tells the AddressSanitizer (or other memory testing tools) to ignore a given // function. Useful for cases when a function reads random locations on stack, // calls _exit from a cloned subprocess, deliberately accesses buffer // out of bounds or does other scary things with memory. // NOTE: GCC supports AddressSanitizer(asan) since 4.8. // https://gcc.gnu.org/gcc-4.8/changes.html #if ABSL_HAVE_ATTRIBUTE(no_sanitize_address) #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address)) #elif defined(_MSC_VER) && _MSC_VER >= 1928 // https://docs.microsoft.com/en-us/cpp/cpp/no-sanitize-address #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS __declspec(no_sanitize_address) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS #endif // ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY // // Tells the MemorySanitizer to relax the handling of a given function. All "Use // of uninitialized value" warnings from such functions will be suppressed, and // all values loaded from memory will be considered fully initialized. This // attribute is similar to the ABSL_ATTRIBUTE_NO_SANITIZE_ADDRESS attribute // above, but deals with initialized-ness rather than addressability issues. // NOTE: MemorySanitizer(msan) is supported by Clang but not GCC. #if ABSL_HAVE_ATTRIBUTE(no_sanitize_memory) #define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory)) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY #endif // ABSL_ATTRIBUTE_NO_SANITIZE_THREAD // // Tells the ThreadSanitizer to not instrument a given function. // NOTE: GCC supports ThreadSanitizer(tsan) since 4.8. // https://gcc.gnu.org/gcc-4.8/changes.html #if ABSL_HAVE_ATTRIBUTE(no_sanitize_thread) #define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD __attribute__((no_sanitize_thread)) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_THREAD #endif // ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED // // Tells the UndefinedSanitizer to ignore a given function. Useful for cases // where certain behavior (eg. division by zero) is being used intentionally. // NOTE: GCC supports UndefinedBehaviorSanitizer(ubsan) since 4.9. // https://gcc.gnu.org/gcc-4.9/changes.html #if ABSL_HAVE_ATTRIBUTE(no_sanitize_undefined) #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ __attribute__((no_sanitize_undefined)) #elif ABSL_HAVE_ATTRIBUTE(no_sanitize) #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED \ __attribute__((no_sanitize("undefined"))) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_UNDEFINED #endif // ABSL_ATTRIBUTE_NO_SANITIZE_CFI // // Tells the ControlFlowIntegrity sanitizer to not instrument a given function. // See https://clang.llvm.org/docs/ControlFlowIntegrity.html for details. #if ABSL_HAVE_ATTRIBUTE(no_sanitize) #define ABSL_ATTRIBUTE_NO_SANITIZE_CFI __attribute__((no_sanitize("cfi"))) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_CFI #endif // ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK // // Tells the SafeStack to not instrument a given function. // See https://clang.llvm.org/docs/SafeStack.html for details. #if ABSL_HAVE_ATTRIBUTE(no_sanitize) #define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK \ __attribute__((no_sanitize("safe-stack"))) #else #define ABSL_ATTRIBUTE_NO_SANITIZE_SAFESTACK #endif // ABSL_ATTRIBUTE_RETURNS_NONNULL // // Tells the compiler that a particular function never returns a null pointer. #if ABSL_HAVE_ATTRIBUTE(returns_nonnull) #define ABSL_ATTRIBUTE_RETURNS_NONNULL __attribute__((returns_nonnull)) #else #define ABSL_ATTRIBUTE_RETURNS_NONNULL #endif // ABSL_HAVE_ATTRIBUTE_SECTION // // Indicates whether labeled sections are supported. Weak symbol support is // a prerequisite. Labeled sections are not supported on Darwin/iOS. #ifdef ABSL_HAVE_ATTRIBUTE_SECTION #error ABSL_HAVE_ATTRIBUTE_SECTION cannot be directly set #elif (ABSL_HAVE_ATTRIBUTE(section) || \ (defined(__GNUC__) && !defined(__clang__))) && \ !defined(__APPLE__) && ABSL_HAVE_ATTRIBUTE_WEAK #define ABSL_HAVE_ATTRIBUTE_SECTION 1 // ABSL_ATTRIBUTE_SECTION // // Tells the compiler/linker to put a given function into a section and define // `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. // This functionality is supported by GNU linker. Any function annotated with // `ABSL_ATTRIBUTE_SECTION` must not be inlined, or it will be placed into // whatever section its caller is placed into. // #ifndef ABSL_ATTRIBUTE_SECTION #define ABSL_ATTRIBUTE_SECTION(name) \ __attribute__((section(#name))) __attribute__((noinline)) #endif // ABSL_ATTRIBUTE_SECTION_VARIABLE // // Tells the compiler/linker to put a given variable into a section and define // `__start_ ## name` and `__stop_ ## name` symbols to bracket the section. // This functionality is supported by GNU linker. #ifndef ABSL_ATTRIBUTE_SECTION_VARIABLE #ifdef _AIX // __attribute__((section(#name))) on AIX is achived by using the `.csect` psudo // op which includes an additional integer as part of its syntax indcating // alignment. If data fall under different alignments then you might get a // compilation error indicating a `Section type conflict`. #define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) #else #define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) __attribute__((section(#name))) #endif #endif // ABSL_DECLARE_ATTRIBUTE_SECTION_VARS // // A weak section declaration to be used as a global declaration // for ABSL_ATTRIBUTE_SECTION_START|STOP(name) to compile and link // even without functions with ABSL_ATTRIBUTE_SECTION(name). // ABSL_DEFINE_ATTRIBUTE_SECTION should be in the exactly one file; it's // a no-op on ELF but not on Mach-O. // #ifndef ABSL_DECLARE_ATTRIBUTE_SECTION_VARS #define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) \ extern char __start_##name[] ABSL_ATTRIBUTE_WEAK; \ extern char __stop_##name[] ABSL_ATTRIBUTE_WEAK #endif #ifndef ABSL_DEFINE_ATTRIBUTE_SECTION_VARS #define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) #define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) #endif // ABSL_ATTRIBUTE_SECTION_START // // Returns `void*` pointers to start/end of a section of code with // functions having ABSL_ATTRIBUTE_SECTION(name). // Returns 0 if no such functions exist. // One must ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) for this to compile and // link. // #define ABSL_ATTRIBUTE_SECTION_START(name) \ (reinterpret_cast(__start_##name)) #define ABSL_ATTRIBUTE_SECTION_STOP(name) \ (reinterpret_cast(__stop_##name)) #else // !ABSL_HAVE_ATTRIBUTE_SECTION #define ABSL_HAVE_ATTRIBUTE_SECTION 0 // provide dummy definitions #define ABSL_ATTRIBUTE_SECTION(name) #define ABSL_ATTRIBUTE_SECTION_VARIABLE(name) #define ABSL_INIT_ATTRIBUTE_SECTION_VARS(name) #define ABSL_DEFINE_ATTRIBUTE_SECTION_VARS(name) #define ABSL_DECLARE_ATTRIBUTE_SECTION_VARS(name) #define ABSL_ATTRIBUTE_SECTION_START(name) (reinterpret_cast(0)) #define ABSL_ATTRIBUTE_SECTION_STOP(name) (reinterpret_cast(0)) #endif // ABSL_ATTRIBUTE_SECTION // ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC // // Support for aligning the stack on 32-bit x86. #if ABSL_HAVE_ATTRIBUTE(force_align_arg_pointer) || \ (defined(__GNUC__) && !defined(__clang__)) #if defined(__i386__) #define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC \ __attribute__((force_align_arg_pointer)) #define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) #elif defined(__x86_64__) #define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (1) #define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC #else // !__i386__ && !__x86_64 #define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) #define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC #endif // __i386__ #else #define ABSL_ATTRIBUTE_STACK_ALIGN_FOR_OLD_LIBC #define ABSL_REQUIRE_STACK_ALIGN_TRAMPOLINE (0) #endif // ABSL_MUST_USE_RESULT // // Tells the compiler to warn about unused results. // // For code or headers that are assured to only build with C++17 and up, prefer // just using the standard `[[nodiscard]]` directly over this macro. // // When annotating a function, it must appear as the first part of the // declaration or definition. The compiler will warn if the return value from // such a function is unused: // // ABSL_MUST_USE_RESULT Sprocket* AllocateSprocket(); // AllocateSprocket(); // Triggers a warning. // // When annotating a class, it is equivalent to annotating every function which // returns an instance. // // class ABSL_MUST_USE_RESULT Sprocket {}; // Sprocket(); // Triggers a warning. // // Sprocket MakeSprocket(); // MakeSprocket(); // Triggers a warning. // // Note that references and pointers are not instances: // // Sprocket* SprocketPointer(); // SprocketPointer(); // Does *not* trigger a warning. // // ABSL_MUST_USE_RESULT allows using cast-to-void to suppress the unused result // warning. For that, warn_unused_result is used only for clang but not for gcc. // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66425 // // Note: past advice was to place the macro after the argument list. // // TODO(b/176172494): Use ABSL_HAVE_CPP_ATTRIBUTE(nodiscard) when all code is // compliant with the stricter [[nodiscard]]. #if defined(__clang__) && ABSL_HAVE_ATTRIBUTE(warn_unused_result) #define ABSL_MUST_USE_RESULT __attribute__((warn_unused_result)) #else #define ABSL_MUST_USE_RESULT #endif // ABSL_ATTRIBUTE_HOT, ABSL_ATTRIBUTE_COLD // // Tells GCC that a function is hot or cold. GCC can use this information to // improve static analysis, i.e. a conditional branch to a cold function // is likely to be not-taken. // This annotation is used for function declarations. // // Example: // // int foo() ABSL_ATTRIBUTE_HOT; #if ABSL_HAVE_ATTRIBUTE(hot) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_ATTRIBUTE_HOT __attribute__((hot)) #else #define ABSL_ATTRIBUTE_HOT #endif #if ABSL_HAVE_ATTRIBUTE(cold) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_ATTRIBUTE_COLD __attribute__((cold)) #else #define ABSL_ATTRIBUTE_COLD #endif // ABSL_XRAY_ALWAYS_INSTRUMENT, ABSL_XRAY_NEVER_INSTRUMENT, ABSL_XRAY_LOG_ARGS // // We define the ABSL_XRAY_ALWAYS_INSTRUMENT and ABSL_XRAY_NEVER_INSTRUMENT // macro used as an attribute to mark functions that must always or never be // instrumented by XRay. Currently, this is only supported in Clang/LLVM. // // For reference on the LLVM XRay instrumentation, see // http://llvm.org/docs/XRay.html. // // A function with the XRAY_ALWAYS_INSTRUMENT macro attribute in its declaration // will always get the XRay instrumentation sleds. These sleds may introduce // some binary size and runtime overhead and must be used sparingly. // // These attributes only take effect when the following conditions are met: // // * The file/target is built in at least C++11 mode, with a Clang compiler // that supports XRay attributes. // * The file/target is built with the -fxray-instrument flag set for the // Clang/LLVM compiler. // * The function is defined in the translation unit (the compiler honors the // attribute in either the definition or the declaration, and must match). // // There are cases when, even when building with XRay instrumentation, users // might want to control specifically which functions are instrumented for a // particular build using special-case lists provided to the compiler. These // special case lists are provided to Clang via the // -fxray-always-instrument=... and -fxray-never-instrument=... flags. The // attributes in source take precedence over these special-case lists. // // To disable the XRay attributes at build-time, users may define // ABSL_NO_XRAY_ATTRIBUTES. Do NOT define ABSL_NO_XRAY_ATTRIBUTES on specific // packages/targets, as this may lead to conflicting definitions of functions at // link-time. // // XRay isn't currently supported on Android: // https://github.com/android/ndk/issues/368 #if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_always_instrument) && \ !defined(ABSL_NO_XRAY_ATTRIBUTES) && !defined(__ANDROID__) #define ABSL_XRAY_ALWAYS_INSTRUMENT [[clang::xray_always_instrument]] #define ABSL_XRAY_NEVER_INSTRUMENT [[clang::xray_never_instrument]] #if ABSL_HAVE_CPP_ATTRIBUTE(clang::xray_log_args) #define ABSL_XRAY_LOG_ARGS(N) \ [[clang::xray_always_instrument, clang::xray_log_args(N)]] #else #define ABSL_XRAY_LOG_ARGS(N) [[clang::xray_always_instrument]] #endif #else #define ABSL_XRAY_ALWAYS_INSTRUMENT #define ABSL_XRAY_NEVER_INSTRUMENT #define ABSL_XRAY_LOG_ARGS(N) #endif // ABSL_ATTRIBUTE_REINITIALIZES // // Indicates that a member function reinitializes the entire object to a known // state, independent of the previous state of the object. // // The clang-tidy check bugprone-use-after-move allows member functions marked // with this attribute to be called on objects that have been moved from; // without the attribute, this would result in a use-after-move warning. #if ABSL_HAVE_CPP_ATTRIBUTE(clang::reinitializes) #define ABSL_ATTRIBUTE_REINITIALIZES [[clang::reinitializes]] #else #define ABSL_ATTRIBUTE_REINITIALIZES #endif // ----------------------------------------------------------------------------- // Variable Attributes // ----------------------------------------------------------------------------- // ABSL_ATTRIBUTE_UNUSED // // Prevents the compiler from complaining about variables that appear unused. // // For code or headers that are assured to only build with C++17 and up, prefer // just using the standard '[[maybe_unused]]' directly over this macro. // // Due to differences in positioning requirements between the old, compiler // specific __attribute__ syntax and the now standard [[maybe_unused]], this // macro does not attempt to take advantage of '[[maybe_unused]]'. #if ABSL_HAVE_ATTRIBUTE(unused) || (defined(__GNUC__) && !defined(__clang__)) #undef ABSL_ATTRIBUTE_UNUSED #define ABSL_ATTRIBUTE_UNUSED __attribute__((__unused__)) #else #define ABSL_ATTRIBUTE_UNUSED #endif // ABSL_ATTRIBUTE_INITIAL_EXEC // // Tells the compiler to use "initial-exec" mode for a thread-local variable. // See http://people.redhat.com/drepper/tls.pdf for the gory details. #if ABSL_HAVE_ATTRIBUTE(tls_model) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_ATTRIBUTE_INITIAL_EXEC __attribute__((tls_model("initial-exec"))) #else #define ABSL_ATTRIBUTE_INITIAL_EXEC #endif // ABSL_ATTRIBUTE_PACKED // // Instructs the compiler not to use natural alignment for a tagged data // structure, but instead to reduce its alignment to 1. // // Therefore, DO NOT APPLY THIS ATTRIBUTE TO STRUCTS CONTAINING ATOMICS. Doing // so can cause atomic variables to be mis-aligned and silently violate // atomicity on x86. // // This attribute can either be applied to members of a structure or to a // structure in its entirety. Applying this attribute (judiciously) to a // structure in its entirety to optimize the memory footprint of very // commonly-used structs is fine. Do not apply this attribute to a structure in // its entirety if the purpose is to control the offsets of the members in the // structure. Instead, apply this attribute only to structure members that need // it. // // When applying ABSL_ATTRIBUTE_PACKED only to specific structure members the // natural alignment of structure members not annotated is preserved. Aligned // member accesses are faster than non-aligned member accesses even if the // targeted microprocessor supports non-aligned accesses. #if ABSL_HAVE_ATTRIBUTE(packed) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_ATTRIBUTE_PACKED __attribute__((__packed__)) #else #define ABSL_ATTRIBUTE_PACKED #endif // ABSL_ATTRIBUTE_FUNC_ALIGN // // Tells the compiler to align the function start at least to certain // alignment boundary #if ABSL_HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) #define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) __attribute__((aligned(bytes))) #else #define ABSL_ATTRIBUTE_FUNC_ALIGN(bytes) #endif // ABSL_FALLTHROUGH_INTENDED // // Annotates implicit fall-through between switch labels, allowing a case to // indicate intentional fallthrough and turn off warnings about any lack of a // `break` statement. The ABSL_FALLTHROUGH_INTENDED macro should be followed by // a semicolon and can be used in most places where `break` can, provided that // no statements exist between it and the next switch label. // // Example: // // switch (x) { // case 40: // case 41: // if (truth_is_out_there) { // ++x; // ABSL_FALLTHROUGH_INTENDED; // Use instead of/along with annotations // // in comments // } else { // return x; // } // case 42: // ... // // Notes: When supported, GCC and Clang can issue a warning on switch labels // with unannotated fallthrough using the warning `-Wimplicit-fallthrough`. See // clang documentation on language extensions for details: // https://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough // // When used with unsupported compilers, the ABSL_FALLTHROUGH_INTENDED macro has // no effect on diagnostics. In any case this macro has no effect on runtime // behavior and performance of code. #ifdef ABSL_FALLTHROUGH_INTENDED #error "ABSL_FALLTHROUGH_INTENDED should not be defined." #elif ABSL_HAVE_CPP_ATTRIBUTE(fallthrough) #define ABSL_FALLTHROUGH_INTENDED [[fallthrough]] #elif ABSL_HAVE_CPP_ATTRIBUTE(clang::fallthrough) #define ABSL_FALLTHROUGH_INTENDED [[clang::fallthrough]] #elif ABSL_HAVE_CPP_ATTRIBUTE(gnu::fallthrough) #define ABSL_FALLTHROUGH_INTENDED [[gnu::fallthrough]] #else #define ABSL_FALLTHROUGH_INTENDED \ do { \ } while (0) #endif // ABSL_DEPRECATED() // // Marks a deprecated class, struct, enum, function, method and variable // declarations. The macro argument is used as a custom diagnostic message (e.g. // suggestion of a better alternative). // // For code or headers that are assured to only build with C++14 and up, prefer // just using the standard `[[deprecated("message")]]` directly over this macro. // // Examples: // // class ABSL_DEPRECATED("Use Bar instead") Foo {...}; // // ABSL_DEPRECATED("Use Baz() instead") void Bar() {...} // // template // ABSL_DEPRECATED("Use DoThat() instead") // void DoThis(); // // enum FooEnum { // kBar ABSL_DEPRECATED("Use kBaz instead"), // }; // // Every usage of a deprecated entity will trigger a warning when compiled with // GCC/Clang's `-Wdeprecated-declarations` option. Google's production toolchain // turns this warning off by default, instead relying on clang-tidy to report // new uses of deprecated code. #if ABSL_HAVE_ATTRIBUTE(deprecated) #define ABSL_DEPRECATED(message) __attribute__((deprecated(message))) #else #define ABSL_DEPRECATED(message) #endif // ABSL_CONST_INIT // // A variable declaration annotated with the `ABSL_CONST_INIT` attribute will // not compile (on supported platforms) unless the variable has a constant // initializer. This is useful for variables with static and thread storage // duration, because it guarantees that they will not suffer from the so-called // "static init order fiasco". // // This attribute must be placed on the initializing declaration of the // variable. Some compilers will give a -Wmissing-constinit warning when this // attribute is placed on some other declaration but missing from the // initializing declaration. // // In some cases (notably with thread_local variables), `ABSL_CONST_INIT` can // also be used in a non-initializing declaration to tell the compiler that a // variable is already initialized, reducing overhead that would otherwise be // incurred by a hidden guard variable. Thus annotating all declarations with // this attribute is recommended to potentially enhance optimization. // // Example: // // class MyClass { // public: // ABSL_CONST_INIT static MyType my_var; // }; // // ABSL_CONST_INIT MyType MyClass::my_var = MakeMyType(...); // // For code or headers that are assured to only build with C++20 and up, prefer // just using the standard `constinit` keyword directly over this macro. // // Note that this attribute is redundant if the variable is declared constexpr. #if defined(__cpp_constinit) && __cpp_constinit >= 201907L #define ABSL_CONST_INIT constinit #elif ABSL_HAVE_CPP_ATTRIBUTE(clang::require_constant_initialization) #define ABSL_CONST_INIT [[clang::require_constant_initialization]] #else #define ABSL_CONST_INIT #endif // ABSL_ATTRIBUTE_PURE_FUNCTION // // ABSL_ATTRIBUTE_PURE_FUNCTION is used to annotate declarations of "pure" // functions. A function is pure if its return value is only a function of its // arguments. The pure attribute prohibits a function from modifying the state // of the program that is observable by means other than inspecting the // function's return value. Declaring such functions with the pure attribute // allows the compiler to avoid emitting some calls in repeated invocations of // the function with the same argument values. // // Example: // // ABSL_ATTRIBUTE_PURE_FUNCTION int64_t ToInt64Milliseconds(Duration d); #if ABSL_HAVE_CPP_ATTRIBUTE(gnu::pure) #define ABSL_ATTRIBUTE_PURE_FUNCTION [[gnu::pure]] #elif ABSL_HAVE_ATTRIBUTE(pure) #define ABSL_ATTRIBUTE_PURE_FUNCTION __attribute__((pure)) #else #define ABSL_ATTRIBUTE_PURE_FUNCTION #endif // ABSL_ATTRIBUTE_LIFETIME_BOUND indicates that a resource owned by a function // parameter or implicit object parameter is retained by the return value of the // annotated function (or, for a parameter of a constructor, in the value of the // constructed object). This attribute causes warnings to be produced if a // temporary object does not live long enough. // // When applied to a reference parameter, the referenced object is assumed to be // retained by the return value of the function. When applied to a non-reference // parameter (for example, a pointer or a class type), all temporaries // referenced by the parameter are assumed to be retained by the return value of // the function. // // See also the upstream documentation: // https://clang.llvm.org/docs/AttributeReference.html#lifetimebound #if ABSL_HAVE_CPP_ATTRIBUTE(clang::lifetimebound) #define ABSL_ATTRIBUTE_LIFETIME_BOUND [[clang::lifetimebound]] #elif ABSL_HAVE_ATTRIBUTE(lifetimebound) #define ABSL_ATTRIBUTE_LIFETIME_BOUND __attribute__((lifetimebound)) #else #define ABSL_ATTRIBUTE_LIFETIME_BOUND #endif #endif // ABSL_BASE_ATTRIBUTES_H_ abseil-20220623.1/absl/base/bit_cast_test.cc000066400000000000000000000066441430371345100203560ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Unit test for bit_cast template. #include #include #include "gtest/gtest.h" #include "absl/base/casts.h" #include "absl/base/macros.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { template struct marshall { char buf[N]; }; template void TestMarshall(const T values[], int num_values) { for (int i = 0; i < num_values; ++i) { T t0 = values[i]; marshall m0 = absl::bit_cast >(t0); T t1 = absl::bit_cast(m0); marshall m1 = absl::bit_cast >(t1); ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); ASSERT_EQ(0, memcmp(&m0, &m1, sizeof(T))); } } // Convert back and forth to an integral type. The C++ standard does // not guarantee this will work, but we test that this works on all the // platforms we support. // // Likewise, we below make assumptions about sizeof(float) and // sizeof(double) which the standard does not guarantee, but which hold on the // platforms we support. template void TestIntegral(const T values[], int num_values) { for (int i = 0; i < num_values; ++i) { T t0 = values[i]; I i0 = absl::bit_cast(t0); T t1 = absl::bit_cast(i0); I i1 = absl::bit_cast(t1); ASSERT_EQ(0, memcmp(&t0, &t1, sizeof(T))); ASSERT_EQ(i0, i1); } } TEST(BitCast, Bool) { static const bool bool_list[] = { false, true }; TestMarshall(bool_list, ABSL_ARRAYSIZE(bool_list)); } TEST(BitCast, Int32) { static const int32_t int_list[] = { 0, 1, 100, 2147483647, -1, -100, -2147483647, -2147483647-1 }; TestMarshall(int_list, ABSL_ARRAYSIZE(int_list)); } TEST(BitCast, Int64) { static const int64_t int64_list[] = { 0, 1, 1LL << 40, -1, -(1LL<<40) }; TestMarshall(int64_list, ABSL_ARRAYSIZE(int64_list)); } TEST(BitCast, Uint64) { static const uint64_t uint64_list[] = { 0, 1, 1LLU << 40, 1LLU << 63 }; TestMarshall(uint64_list, ABSL_ARRAYSIZE(uint64_list)); } TEST(BitCast, Float) { static const float float_list[] = { 0.0f, 1.0f, -1.0f, 10.0f, -10.0f, 1e10f, 1e20f, 1e-10f, 1e-20f, 2.71828f, 3.14159f }; TestMarshall(float_list, ABSL_ARRAYSIZE(float_list)); TestIntegral(float_list, ABSL_ARRAYSIZE(float_list)); TestIntegral(float_list, ABSL_ARRAYSIZE(float_list)); } TEST(BitCast, Double) { static const double double_list[] = { 0.0, 1.0, -1.0, 10.0, -10.0, 1e10, 1e100, 1e-10, 1e-100, 2.718281828459045, 3.141592653589793238462643383279502884197169399375105820974944 }; TestMarshall(double_list, ABSL_ARRAYSIZE(double_list)); TestIntegral(double_list, ABSL_ARRAYSIZE(double_list)); TestIntegral(double_list, ABSL_ARRAYSIZE(double_list)); } } // namespace ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/call_once.h000066400000000000000000000177261430371345100173130ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: call_once.h // ----------------------------------------------------------------------------- // // This header file provides an Abseil version of `std::call_once` for invoking // a given function at most once, across all threads. This Abseil version is // faster than the C++11 version and incorporates the C++17 argument-passing // fix, so that (for example) non-const references may be passed to the invoked // function. #ifndef ABSL_BASE_CALL_ONCE_H_ #define ABSL_BASE_CALL_ONCE_H_ #include #include #include #include #include #include "absl/base/internal/invoke.h" #include "absl/base/internal/low_level_scheduling.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/spinlock_wait.h" #include "absl/base/macros.h" #include "absl/base/optimization.h" #include "absl/base/port.h" namespace absl { ABSL_NAMESPACE_BEGIN class once_flag; namespace base_internal { std::atomic* ControlWord(absl::once_flag* flag); } // namespace base_internal // call_once() // // For all invocations using a given `once_flag`, invokes a given `fn` exactly // once across all threads. The first call to `call_once()` with a particular // `once_flag` argument (that does not throw an exception) will run the // specified function with the provided `args`; other calls with the same // `once_flag` argument will not run the function, but will wait // for the provided function to finish running (if it is still running). // // This mechanism provides a safe, simple, and fast mechanism for one-time // initialization in a multi-threaded process. // // Example: // // class MyInitClass { // public: // ... // mutable absl::once_flag once_; // // MyInitClass* init() const { // absl::call_once(once_, &MyInitClass::Init, this); // return ptr_; // } // template void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args); // once_flag // // Objects of this type are used to distinguish calls to `call_once()` and // ensure the provided function is only invoked once across all threads. This // type is not copyable or movable. However, it has a `constexpr` // constructor, and is safe to use as a namespace-scoped global variable. class once_flag { public: constexpr once_flag() : control_(0) {} once_flag(const once_flag&) = delete; once_flag& operator=(const once_flag&) = delete; private: friend std::atomic* base_internal::ControlWord(once_flag* flag); std::atomic control_; }; //------------------------------------------------------------------------------ // End of public interfaces. // Implementation details follow. //------------------------------------------------------------------------------ namespace base_internal { // Like call_once, but uses KERNEL_ONLY scheduling. Intended to be used to // initialize entities used by the scheduler implementation. template void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args); // Disables scheduling while on stack when scheduling mode is non-cooperative. // No effect for cooperative scheduling modes. class SchedulingHelper { public: explicit SchedulingHelper(base_internal::SchedulingMode mode) : mode_(mode) { if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { guard_result_ = base_internal::SchedulingGuard::DisableRescheduling(); } } ~SchedulingHelper() { if (mode_ == base_internal::SCHEDULE_KERNEL_ONLY) { base_internal::SchedulingGuard::EnableRescheduling(guard_result_); } } private: base_internal::SchedulingMode mode_; bool guard_result_; }; // Bit patterns for call_once state machine values. Internal implementation // detail, not for use by clients. // // The bit patterns are arbitrarily chosen from unlikely values, to aid in // debugging. However, kOnceInit must be 0, so that a zero-initialized // once_flag will be valid for immediate use. enum { kOnceInit = 0, kOnceRunning = 0x65C2937B, kOnceWaiter = 0x05A308D2, // A very small constant is chosen for kOnceDone so that it fit in a single // compare with immediate instruction for most common ISAs. This is verified // for x86, POWER and ARM. kOnceDone = 221, // Random Number }; template ABSL_ATTRIBUTE_NOINLINE void CallOnceImpl(std::atomic* control, base_internal::SchedulingMode scheduling_mode, Callable&& fn, Args&&... args) { #ifndef NDEBUG { uint32_t old_control = control->load(std::memory_order_relaxed); if (old_control != kOnceInit && old_control != kOnceRunning && old_control != kOnceWaiter && old_control != kOnceDone) { ABSL_RAW_LOG(FATAL, "Unexpected value for control word: 0x%lx", static_cast(old_control)); // NOLINT } } #endif // NDEBUG static const base_internal::SpinLockWaitTransition trans[] = { {kOnceInit, kOnceRunning, true}, {kOnceRunning, kOnceWaiter, false}, {kOnceDone, kOnceDone, true}}; // Must do this before potentially modifying control word's state. base_internal::SchedulingHelper maybe_disable_scheduling(scheduling_mode); // Short circuit the simplest case to avoid procedure call overhead. // The base_internal::SpinLockWait() call returns either kOnceInit or // kOnceDone. If it returns kOnceDone, it must have loaded the control word // with std::memory_order_acquire and seen a value of kOnceDone. uint32_t old_control = kOnceInit; if (control->compare_exchange_strong(old_control, kOnceRunning, std::memory_order_relaxed) || base_internal::SpinLockWait(control, ABSL_ARRAYSIZE(trans), trans, scheduling_mode) == kOnceInit) { base_internal::invoke(std::forward(fn), std::forward(args)...); old_control = control->exchange(base_internal::kOnceDone, std::memory_order_release); if (old_control == base_internal::kOnceWaiter) { base_internal::SpinLockWake(control, true); } } // else *control is already kOnceDone } inline std::atomic* ControlWord(once_flag* flag) { return &flag->control_; } template void LowLevelCallOnce(absl::once_flag* flag, Callable&& fn, Args&&... args) { std::atomic* once = base_internal::ControlWord(flag); uint32_t s = once->load(std::memory_order_acquire); if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { base_internal::CallOnceImpl(once, base_internal::SCHEDULE_KERNEL_ONLY, std::forward(fn), std::forward(args)...); } } } // namespace base_internal template void call_once(absl::once_flag& flag, Callable&& fn, Args&&... args) { std::atomic* once = base_internal::ControlWord(&flag); uint32_t s = once->load(std::memory_order_acquire); if (ABSL_PREDICT_FALSE(s != base_internal::kOnceDone)) { base_internal::CallOnceImpl( once, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL, std::forward(fn), std::forward(args)...); } } ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_CALL_ONCE_H_ abseil-20220623.1/absl/base/call_once_test.cc000066400000000000000000000062101430371345100204720ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/call_once.h" #include #include #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/const_init.h" #include "absl/base/thread_annotations.h" #include "absl/synchronization/mutex.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { absl::once_flag once; ABSL_CONST_INIT Mutex counters_mu(absl::kConstInit); int running_thread_count ABSL_GUARDED_BY(counters_mu) = 0; int call_once_invoke_count ABSL_GUARDED_BY(counters_mu) = 0; int call_once_finished_count ABSL_GUARDED_BY(counters_mu) = 0; int call_once_return_count ABSL_GUARDED_BY(counters_mu) = 0; bool done_blocking ABSL_GUARDED_BY(counters_mu) = false; // Function to be called from absl::call_once. Waits for a notification. void WaitAndIncrement() { counters_mu.Lock(); ++call_once_invoke_count; counters_mu.Unlock(); counters_mu.LockWhen(Condition(&done_blocking)); ++call_once_finished_count; counters_mu.Unlock(); } void ThreadBody() { counters_mu.Lock(); ++running_thread_count; counters_mu.Unlock(); absl::call_once(once, WaitAndIncrement); counters_mu.Lock(); ++call_once_return_count; counters_mu.Unlock(); } // Returns true if all threads are set up for the test. bool ThreadsAreSetup(void*) ABSL_EXCLUSIVE_LOCKS_REQUIRED(counters_mu) { // All ten threads must be running, and WaitAndIncrement should be blocked. return running_thread_count == 10 && call_once_invoke_count == 1; } TEST(CallOnceTest, ExecutionCount) { std::vector threads; // Start 10 threads all calling call_once on the same once_flag. for (int i = 0; i < 10; ++i) { threads.emplace_back(ThreadBody); } // Wait until all ten threads have started, and WaitAndIncrement has been // invoked. counters_mu.LockWhen(Condition(ThreadsAreSetup, nullptr)); // WaitAndIncrement should have been invoked by exactly one call_once() // instance. That thread should be blocking on a notification, and all other // call_once instances should be blocking as well. EXPECT_EQ(call_once_invoke_count, 1); EXPECT_EQ(call_once_finished_count, 0); EXPECT_EQ(call_once_return_count, 0); // Allow WaitAndIncrement to finish executing. Once it does, the other // call_once waiters will be unblocked. done_blocking = true; counters_mu.Unlock(); for (std::thread& thread : threads) { thread.join(); } counters_mu.Lock(); EXPECT_EQ(call_once_invoke_count, 1); EXPECT_EQ(call_once_finished_count, 1); EXPECT_EQ(call_once_return_count, 10); counters_mu.Unlock(); } } // namespace ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/casts.h000066400000000000000000000156301430371345100165010ustar00rootroot00000000000000// // Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: casts.h // ----------------------------------------------------------------------------- // // This header file defines casting templates to fit use cases not covered by // the standard casts provided in the C++ standard. As with all cast operations, // use these with caution and only if alternatives do not exist. #ifndef ABSL_BASE_CASTS_H_ #define ABSL_BASE_CASTS_H_ #include #include #include #include #if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L #include // For std::bit_cast. #endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L #include "absl/base/internal/identity.h" #include "absl/base/macros.h" #include "absl/meta/type_traits.h" namespace absl { ABSL_NAMESPACE_BEGIN // implicit_cast() // // Performs an implicit conversion between types following the language // rules for implicit conversion; if an implicit conversion is otherwise // allowed by the language in the given context, this function performs such an // implicit conversion. // // Example: // // // If the context allows implicit conversion: // From from; // To to = from; // // // Such code can be replaced by: // implicit_cast(from); // // An `implicit_cast()` may also be used to annotate numeric type conversions // that, although safe, may produce compiler warnings (such as `long` to `int`). // Additionally, an `implicit_cast()` is also useful within return statements to // indicate a specific implicit conversion is being undertaken. // // Example: // // return implicit_cast(size_in_bytes) / capacity_; // // Annotating code with `implicit_cast()` allows you to explicitly select // particular overloads and template instantiations, while providing a safer // cast than `reinterpret_cast()` or `static_cast()`. // // Additionally, an `implicit_cast()` can be used to allow upcasting within a // type hierarchy where incorrect use of `static_cast()` could accidentally // allow downcasting. // // Finally, an `implicit_cast()` can be used to perform implicit conversions // from unrelated types that otherwise couldn't be implicitly cast directly; // C++ will normally only implicitly cast "one step" in such conversions. // // That is, if C is a type which can be implicitly converted to B, with B being // a type that can be implicitly converted to A, an `implicit_cast()` can be // used to convert C to B (which the compiler can then implicitly convert to A // using language rules). // // Example: // // // Assume an object C is convertible to B, which is implicitly convertible // // to A // A a = implicit_cast(C); // // Such implicit cast chaining may be useful within template logic. template constexpr To implicit_cast(typename absl::internal::identity_t to) { return to; } // bit_cast() // // Creates a value of the new type `Dest` whose representation is the same as // that of the argument, which is of (deduced) type `Source` (a "bitwise cast"; // every bit in the value representation of the result is equal to the // corresponding bit in the object representation of the source). Source and // destination types must be of the same size, and both types must be trivially // copyable. // // As with most casts, use with caution. A `bit_cast()` might be needed when you // need to treat a value as the value of some other type, for example, to access // the individual bits of an object which are not normally accessible through // the object's type, such as for working with the binary representation of a // floating point value: // // float f = 3.14159265358979; // int i = bit_cast(f); // // i = 0x40490fdb // // Reinterpreting and accessing a value directly as a different type (as shown // below) usually results in undefined behavior. // // Example: // // // WRONG // float f = 3.14159265358979; // int i = reinterpret_cast(f); // Wrong // int j = *reinterpret_cast(&f); // Equally wrong // int k = *bit_cast(&f); // Equally wrong // // Reinterpret-casting results in undefined behavior according to the ISO C++ // specification, section [basic.lval]. Roughly, this section says: if an object // in memory has one type, and a program accesses it with a different type, the // result is undefined behavior for most "different type". // // Using bit_cast on a pointer and then dereferencing it is no better than using // reinterpret_cast. You should only use bit_cast on the value itself. // // Such casting results in type punning: holding an object in memory of one type // and reading its bits back using a different type. A `bit_cast()` avoids this // issue by copying the object representation to a new value, which avoids // introducing this undefined behavior (since the original value is never // accessed in the wrong way). // // The requirements of `absl::bit_cast` are more strict than that of // `std::bit_cast` unless compiler support is available. Specifically, without // compiler support, this implementation also requires `Dest` to be // default-constructible. In C++20, `absl::bit_cast` is replaced by // `std::bit_cast`. #if defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L using std::bit_cast; #else // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L template ::value && type_traits_internal::is_trivially_copyable::value #if !ABSL_HAVE_BUILTIN(__builtin_bit_cast) && std::is_default_constructible::value #endif // !ABSL_HAVE_BUILTIN(__builtin_bit_cast) , int>::type = 0> #if ABSL_HAVE_BUILTIN(__builtin_bit_cast) inline constexpr Dest bit_cast(const Source& source) { return __builtin_bit_cast(Dest, source); } #else // ABSL_HAVE_BUILTIN(__builtin_bit_cast) inline Dest bit_cast(const Source& source) { Dest dest; memcpy(static_cast(std::addressof(dest)), static_cast(std::addressof(source)), sizeof(dest)); return dest; } #endif // ABSL_HAVE_BUILTIN(__builtin_bit_cast) #endif // defined(__cpp_lib_bit_cast) && __cpp_lib_bit_cast >= 201806L ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_CASTS_H_ abseil-20220623.1/absl/base/config.h000066400000000000000000001061711430371345100166320ustar00rootroot00000000000000// // Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: config.h // ----------------------------------------------------------------------------- // // This header file defines a set of macros for checking the presence of // important compiler and platform features. Such macros can be used to // produce portable code by parameterizing compilation based on the presence or // lack of a given feature. // // We define a "feature" as some interface we wish to program to: for example, // a library function or system call. A value of `1` indicates support for // that feature; any other value indicates the feature support is undefined. // // Example: // // Suppose a programmer wants to write a program that uses the 'mmap()' system // call. The Abseil macro for that feature (`ABSL_HAVE_MMAP`) allows you to // selectively include the `mmap.h` header and bracket code using that feature // in the macro: // // #include "absl/base/config.h" // // #ifdef ABSL_HAVE_MMAP // #include "sys/mman.h" // #endif //ABSL_HAVE_MMAP // // ... // #ifdef ABSL_HAVE_MMAP // void *ptr = mmap(...); // ... // #endif // ABSL_HAVE_MMAP #ifndef ABSL_BASE_CONFIG_H_ #define ABSL_BASE_CONFIG_H_ // Included for the __GLIBC__ macro (or similar macros on other systems). #include #ifdef __cplusplus // Included for __GLIBCXX__, _LIBCPP_VERSION #include #endif // __cplusplus // ABSL_INTERNAL_CPLUSPLUS_LANG // // MSVC does not set the value of __cplusplus correctly, but instead uses // _MSVC_LANG as a stand-in. // https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros // // However, there are reports that MSVC even sets _MSVC_LANG incorrectly at // times, for example: // https://github.com/microsoft/vscode-cpptools/issues/1770 // https://reviews.llvm.org/D70996 // // For this reason, this symbol is considered INTERNAL and code outside of // Abseil must not use it. #if defined(_MSVC_LANG) #define ABSL_INTERNAL_CPLUSPLUS_LANG _MSVC_LANG #elif defined(__cplusplus) #define ABSL_INTERNAL_CPLUSPLUS_LANG __cplusplus #endif #if defined(__APPLE__) // Included for TARGET_OS_IPHONE, __IPHONE_OS_VERSION_MIN_REQUIRED, // __IPHONE_8_0. #include #include #endif #include "absl/base/options.h" #include "absl/base/policy_checks.h" // Abseil long-term support (LTS) releases will define // `ABSL_LTS_RELEASE_VERSION` to the integer representing the date string of the // LTS release version, and will define `ABSL_LTS_RELEASE_PATCH_LEVEL` to the // integer representing the patch-level for that release. // // For example, for LTS release version "20300401.2", this would give us // ABSL_LTS_RELEASE_VERSION == 20300401 && ABSL_LTS_RELEASE_PATCH_LEVEL == 2 // // These symbols will not be defined in non-LTS code. // // Abseil recommends that clients live-at-head. Therefore, if you are using // these symbols to assert a minimum version requirement, we recommend you do it // as // // #if defined(ABSL_LTS_RELEASE_VERSION) && ABSL_LTS_RELEASE_VERSION < 20300401 // #error Project foo requires Abseil LTS version >= 20300401 // #endif // // The `defined(ABSL_LTS_RELEASE_VERSION)` part of the check excludes // live-at-head clients from the minimum version assertion. // // See https://abseil.io/about/releases for more information on Abseil release // management. // // LTS releases can be obtained from // https://github.com/abseil/abseil-cpp/releases. #define ABSL_LTS_RELEASE_VERSION 20220623 #define ABSL_LTS_RELEASE_PATCH_LEVEL 1 // Helper macro to convert a CPP variable to a string literal. #define ABSL_INTERNAL_DO_TOKEN_STR(x) #x #define ABSL_INTERNAL_TOKEN_STR(x) ABSL_INTERNAL_DO_TOKEN_STR(x) // ----------------------------------------------------------------------------- // Abseil namespace annotations // ----------------------------------------------------------------------------- // ABSL_NAMESPACE_BEGIN/ABSL_NAMESPACE_END // // An annotation placed at the beginning/end of each `namespace absl` scope. // This is used to inject an inline namespace. // // The proper way to write Abseil code in the `absl` namespace is: // // namespace absl { // ABSL_NAMESPACE_BEGIN // // void Foo(); // absl::Foo(). // // ABSL_NAMESPACE_END // } // namespace absl // // Users of Abseil should not use these macros, because users of Abseil should // not write `namespace absl {` in their own code for any reason. (Abseil does // not support forward declarations of its own types, nor does it support // user-provided specialization of Abseil templates. Code that violates these // rules may be broken without warning.) #if !defined(ABSL_OPTION_USE_INLINE_NAMESPACE) || \ !defined(ABSL_OPTION_INLINE_NAMESPACE_NAME) #error options.h is misconfigured. #endif // Check that ABSL_OPTION_INLINE_NAMESPACE_NAME is neither "head" nor "" #if defined(__cplusplus) && ABSL_OPTION_USE_INLINE_NAMESPACE == 1 #define ABSL_INTERNAL_INLINE_NAMESPACE_STR \ ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != '\0', "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " "not be empty."); static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[1] != 'e' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[2] != 'a' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[3] != 'd' || ABSL_INTERNAL_INLINE_NAMESPACE_STR[4] != '\0', "options.h misconfigured: ABSL_OPTION_INLINE_NAMESPACE_NAME must " "be changed to a new, unique identifier name."); #endif #if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 #define ABSL_NAMESPACE_BEGIN #define ABSL_NAMESPACE_END #define ABSL_INTERNAL_C_SYMBOL(x) x #elif ABSL_OPTION_USE_INLINE_NAMESPACE == 1 #define ABSL_NAMESPACE_BEGIN \ inline namespace ABSL_OPTION_INLINE_NAMESPACE_NAME { #define ABSL_NAMESPACE_END } #define ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) x##_##v #define ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, v) \ ABSL_INTERNAL_C_SYMBOL_HELPER_2(x, v) #define ABSL_INTERNAL_C_SYMBOL(x) \ ABSL_INTERNAL_C_SYMBOL_HELPER_1(x, ABSL_OPTION_INLINE_NAMESPACE_NAME) #else #error options.h is misconfigured. #endif // ----------------------------------------------------------------------------- // Compiler Feature Checks // ----------------------------------------------------------------------------- // ABSL_HAVE_BUILTIN() // // Checks whether the compiler supports a Clang Feature Checking Macro, and if // so, checks whether it supports the provided builtin function "x" where x // is one of the functions noted in // https://clang.llvm.org/docs/LanguageExtensions.html // // Note: Use this macro to avoid an extra level of #ifdef __has_builtin check. // http://releases.llvm.org/3.3/tools/clang/docs/LanguageExtensions.html #ifdef __has_builtin #define ABSL_HAVE_BUILTIN(x) __has_builtin(x) #else #define ABSL_HAVE_BUILTIN(x) 0 #endif #ifdef __has_feature #define ABSL_HAVE_FEATURE(f) __has_feature(f) #else #define ABSL_HAVE_FEATURE(f) 0 #endif // Portable check for GCC minimum version: // https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html #if defined(__GNUC__) && defined(__GNUC_MINOR__) #define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) \ (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) #else #define ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(x, y) 0 #endif #if defined(__clang__) && defined(__clang_major__) && defined(__clang_minor__) #define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) \ (__clang_major__ > (x) || __clang_major__ == (x) && __clang_minor__ >= (y)) #else #define ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(x, y) 0 #endif // ABSL_HAVE_TLS is defined to 1 when __thread should be supported. // We assume __thread is supported on Linux or Asylo when compiled with Clang or // compiled against libstdc++ with _GLIBCXX_HAVE_TLS defined. #ifdef ABSL_HAVE_TLS #error ABSL_HAVE_TLS cannot be directly set #elif (defined(__linux__) || defined(__ASYLO__)) && \ (defined(__clang__) || defined(_GLIBCXX_HAVE_TLS)) #define ABSL_HAVE_TLS 1 #endif // ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE // // Checks whether `std::is_trivially_destructible` is supported. // // Notes: All supported compilers using libc++ support this feature, as does // gcc >= 4.8.1 using libstdc++, and Visual Studio. #ifdef ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE #error ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE cannot be directly set #elif defined(_LIBCPP_VERSION) || defined(_MSC_VER) || \ (!defined(__clang__) && defined(__GLIBCXX__) && \ ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(4, 8)) #define ABSL_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1 #endif // ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE // // Checks whether `std::is_trivially_default_constructible` and // `std::is_trivially_copy_constructible` are supported. // ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE // // Checks whether `std::is_trivially_copy_assignable` is supported. // Notes: Clang with libc++ supports these features, as does gcc >= 7.4 with // libstdc++, or gcc >= 8.2 with libc++, and Visual Studio (but not NVCC). #if defined(ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE) #error ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE cannot be directly set #elif defined(ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE) #error ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE cannot directly set #elif (defined(__clang__) && defined(_LIBCPP_VERSION)) || \ (!defined(__clang__) && \ ((ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(7, 4) && defined(__GLIBCXX__)) || \ (ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(8, 2) && \ defined(_LIBCPP_VERSION)))) || \ (defined(_MSC_VER) && !defined(__NVCC__)) #define ABSL_HAVE_STD_IS_TRIVIALLY_CONSTRUCTIBLE 1 #define ABSL_HAVE_STD_IS_TRIVIALLY_ASSIGNABLE 1 #endif // ABSL_HAVE_THREAD_LOCAL // // Checks whether C++11's `thread_local` storage duration specifier is // supported. #ifdef ABSL_HAVE_THREAD_LOCAL #error ABSL_HAVE_THREAD_LOCAL cannot be directly set #elif defined(__APPLE__) // Notes: // * Xcode's clang did not support `thread_local` until version 8, and // even then not for all iOS < 9.0. // * Xcode 9.3 started disallowing `thread_local` for 32-bit iOS simulator // targeting iOS 9.x. // * Xcode 10 moves the deployment target check for iOS < 9.0 to link time // making ABSL_HAVE_FEATURE unreliable there. // #if ABSL_HAVE_FEATURE(cxx_thread_local) && \ !(TARGET_OS_IPHONE && __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_9_0) #define ABSL_HAVE_THREAD_LOCAL 1 #endif #else // !defined(__APPLE__) #define ABSL_HAVE_THREAD_LOCAL 1 #endif // There are platforms for which TLS should not be used even though the compiler // makes it seem like it's supported (Android NDK < r12b for example). // This is primarily because of linker problems and toolchain misconfiguration: // Abseil does not intend to support this indefinitely. Currently, the newest // toolchain that we intend to support that requires this behavior is the // r11 NDK - allowing for a 5 year support window on that means this option // is likely to be removed around June of 2021. // TLS isn't supported until NDK r12b per // https://developer.android.com/ndk/downloads/revision_history.html // Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in // . For NDK < r16, users should define these macros, // e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. #if defined(__ANDROID__) && defined(__clang__) #if __has_include() #include #endif // __has_include() #if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \ defined(__NDK_MINOR__) && \ ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1))) #undef ABSL_HAVE_TLS #undef ABSL_HAVE_THREAD_LOCAL #endif #endif // defined(__ANDROID__) && defined(__clang__) // ABSL_HAVE_INTRINSIC_INT128 // // Checks whether the __int128 compiler extension for a 128-bit integral type is // supported. // // Note: __SIZEOF_INT128__ is defined by Clang and GCC when __int128 is // supported, but we avoid using it in certain cases: // * On Clang: // * Building using Clang for Windows, where the Clang runtime library has // 128-bit support only on LP64 architectures, but Windows is LLP64. // * On Nvidia's nvcc: // * nvcc also defines __GNUC__ and __SIZEOF_INT128__, but not all versions // actually support __int128. #ifdef ABSL_HAVE_INTRINSIC_INT128 #error ABSL_HAVE_INTRINSIC_INT128 cannot be directly set #elif defined(__SIZEOF_INT128__) #if (defined(__clang__) && !defined(_WIN32)) || \ (defined(__CUDACC__) && __CUDACC_VER_MAJOR__ >= 9) || \ (defined(__GNUC__) && !defined(__clang__) && !defined(__CUDACC__)) #define ABSL_HAVE_INTRINSIC_INT128 1 #elif defined(__CUDACC__) // __CUDACC_VER__ is a full version number before CUDA 9, and is defined to a // string explaining that it has been removed starting with CUDA 9. We use // nested #ifs because there is no short-circuiting in the preprocessor. // NOTE: `__CUDACC__` could be undefined while `__CUDACC_VER__` is defined. #if __CUDACC_VER__ >= 70000 #define ABSL_HAVE_INTRINSIC_INT128 1 #endif // __CUDACC_VER__ >= 70000 #endif // defined(__CUDACC__) #endif // ABSL_HAVE_INTRINSIC_INT128 // ABSL_HAVE_EXCEPTIONS // // Checks whether the compiler both supports and enables exceptions. Many // compilers support a "no exceptions" mode that disables exceptions. // // Generally, when ABSL_HAVE_EXCEPTIONS is not defined: // // * Code using `throw` and `try` may not compile. // * The `noexcept` specifier will still compile and behave as normal. // * The `noexcept` operator may still return `false`. // // For further details, consult the compiler's documentation. #ifdef ABSL_HAVE_EXCEPTIONS #error ABSL_HAVE_EXCEPTIONS cannot be directly set. #elif ABSL_INTERNAL_HAVE_MIN_CLANG_VERSION(3, 6) // Clang >= 3.6 #if ABSL_HAVE_FEATURE(cxx_exceptions) #define ABSL_HAVE_EXCEPTIONS 1 #endif // ABSL_HAVE_FEATURE(cxx_exceptions) #elif defined(__clang__) // Clang < 3.6 // http://releases.llvm.org/3.6.0/tools/clang/docs/ReleaseNotes.html#the-exceptions-macro #if defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) #define ABSL_HAVE_EXCEPTIONS 1 #endif // defined(__EXCEPTIONS) && ABSL_HAVE_FEATURE(cxx_exceptions) // Handle remaining special cases and default to exceptions being supported. #elif !(defined(__GNUC__) && (__GNUC__ < 5) && !defined(__EXCEPTIONS)) && \ !(ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(5, 0) && \ !defined(__cpp_exceptions)) && \ !(defined(_MSC_VER) && !defined(_CPPUNWIND)) #define ABSL_HAVE_EXCEPTIONS 1 #endif // ----------------------------------------------------------------------------- // Platform Feature Checks // ----------------------------------------------------------------------------- // Currently supported operating systems and associated preprocessor // symbols: // // Linux and Linux-derived __linux__ // Android __ANDROID__ (implies __linux__) // Linux (non-Android) __linux__ && !__ANDROID__ // Darwin (macOS and iOS) __APPLE__ // Akaros (http://akaros.org) __ros__ // Windows _WIN32 // NaCL __native_client__ // AsmJS __asmjs__ // WebAssembly __wasm__ // Fuchsia __Fuchsia__ // // Note that since Android defines both __ANDROID__ and __linux__, one // may probe for either Linux or Android by simply testing for __linux__. // ABSL_HAVE_MMAP // // Checks whether the platform has an mmap(2) implementation as defined in // POSIX.1-2001. #ifdef ABSL_HAVE_MMAP #error ABSL_HAVE_MMAP cannot be directly set #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ defined(_AIX) || defined(__ros__) || defined(__native_client__) || \ defined(__asmjs__) || defined(__wasm__) || defined(__Fuchsia__) || \ defined(__sun) || defined(__ASYLO__) || defined(__myriad2__) || \ defined(__HAIKU__) || defined(__OpenBSD__) || defined(__NetBSD__) || \ defined(__QNX__) #define ABSL_HAVE_MMAP 1 #endif // ABSL_HAVE_PTHREAD_GETSCHEDPARAM // // Checks whether the platform implements the pthread_(get|set)schedparam(3) // functions as defined in POSIX.1-2001. #ifdef ABSL_HAVE_PTHREAD_GETSCHEDPARAM #error ABSL_HAVE_PTHREAD_GETSCHEDPARAM cannot be directly set #elif defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ defined(_AIX) || defined(__ros__) || defined(__OpenBSD__) || \ defined(__NetBSD__) #define ABSL_HAVE_PTHREAD_GETSCHEDPARAM 1 #endif // ABSL_HAVE_SCHED_GETCPU // // Checks whether sched_getcpu is available. #ifdef ABSL_HAVE_SCHED_GETCPU #error ABSL_HAVE_SCHED_GETCPU cannot be directly set #elif defined(__linux__) #define ABSL_HAVE_SCHED_GETCPU 1 #endif // ABSL_HAVE_SCHED_YIELD // // Checks whether the platform implements sched_yield(2) as defined in // POSIX.1-2001. #ifdef ABSL_HAVE_SCHED_YIELD #error ABSL_HAVE_SCHED_YIELD cannot be directly set #elif defined(__linux__) || defined(__ros__) || defined(__native_client__) #define ABSL_HAVE_SCHED_YIELD 1 #endif // ABSL_HAVE_SEMAPHORE_H // // Checks whether the platform supports the header and sem_init(3) // family of functions as standardized in POSIX.1-2001. // // Note: While Apple provides for both iOS and macOS, it is // explicitly deprecated and will cause build failures if enabled for those // platforms. We side-step the issue by not defining it here for Apple // platforms. #ifdef ABSL_HAVE_SEMAPHORE_H #error ABSL_HAVE_SEMAPHORE_H cannot be directly set #elif defined(__linux__) || defined(__ros__) #define ABSL_HAVE_SEMAPHORE_H 1 #endif // ABSL_HAVE_ALARM // // Checks whether the platform supports the header and alarm(2) // function as standardized in POSIX.1-2001. #ifdef ABSL_HAVE_ALARM #error ABSL_HAVE_ALARM cannot be directly set #elif defined(__GOOGLE_GRTE_VERSION__) // feature tests for Google's GRTE #define ABSL_HAVE_ALARM 1 #elif defined(__GLIBC__) // feature test for glibc #define ABSL_HAVE_ALARM 1 #elif defined(_MSC_VER) // feature tests for Microsoft's library #elif defined(__MINGW32__) // mingw32 doesn't provide alarm(2): // https://osdn.net/projects/mingw/scm/git/mingw-org-wsl/blobs/5.2-trunk/mingwrt/include/unistd.h // mingw-w64 provides a no-op implementation: // https://sourceforge.net/p/mingw-w64/mingw-w64/ci/master/tree/mingw-w64-crt/misc/alarm.c #elif defined(__EMSCRIPTEN__) // emscripten doesn't support signals #elif defined(__Fuchsia__) // Signals don't exist on fuchsia. #elif defined(__native_client__) #else // other standard libraries #define ABSL_HAVE_ALARM 1 #endif // ABSL_IS_LITTLE_ENDIAN // ABSL_IS_BIG_ENDIAN // // Checks the endianness of the platform. // // Notes: uses the built in endian macros provided by GCC (since 4.6) and // Clang (since 3.2); see // https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html. // Otherwise, if _WIN32, assume little endian. Otherwise, bail with an error. #if defined(ABSL_IS_BIG_ENDIAN) #error "ABSL_IS_BIG_ENDIAN cannot be directly set." #endif #if defined(ABSL_IS_LITTLE_ENDIAN) #error "ABSL_IS_LITTLE_ENDIAN cannot be directly set." #endif #if (defined(__BYTE_ORDER__) && defined(__ORDER_LITTLE_ENDIAN__) && \ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) #define ABSL_IS_LITTLE_ENDIAN 1 #elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define ABSL_IS_BIG_ENDIAN 1 #elif defined(_WIN32) #define ABSL_IS_LITTLE_ENDIAN 1 #else #error "absl endian detection needs to be set up for your compiler" #endif // macOS < 10.13 and iOS < 11 don't let you use , , or // even though the headers exist and are publicly noted to work, because the // libc++ shared library shipped on the system doesn't have the requisite // exported symbols. See https://github.com/abseil/abseil-cpp/issues/207 and // https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes // // libc++ spells out the availability requirements in the file // llvm-project/libcxx/include/__config via the #define // _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS. // // Unfortunately, Apple initially mis-stated the requirements as macOS < 10.14 // and iOS < 12 in the libc++ headers. This was corrected by // https://github.com/llvm/llvm-project/commit/7fb40e1569dd66292b647f4501b85517e9247953 // which subsequently made it into the XCode 12.5 release. We need to match the // old (incorrect) conditions when built with old XCode, but can use the // corrected earlier versions with new XCode. #if defined(__APPLE__) && defined(_LIBCPP_VERSION) && \ ((_LIBCPP_VERSION >= 11000 && /* XCode 12.5 or later: */ \ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101300) || \ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 110000) || \ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 40000) || \ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 110000))) || \ (_LIBCPP_VERSION < 11000 && /* Pre-XCode 12.5: */ \ ((defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101400) || \ (defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 120000) || \ (defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 50000) || \ (defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && \ __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 120000)))) #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 1 #else #define ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE 0 #endif // ABSL_HAVE_STD_ANY // // Checks whether C++17 std::any is available by checking whether exists. #ifdef ABSL_HAVE_STD_ANY #error "ABSL_HAVE_STD_ANY cannot be directly set." #endif #ifdef __has_include #if __has_include() && defined(__cplusplus) && __cplusplus >= 201703L && \ !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_ANY 1 #endif #endif // ABSL_HAVE_STD_OPTIONAL // // Checks whether C++17 std::optional is available. #ifdef ABSL_HAVE_STD_OPTIONAL #error "ABSL_HAVE_STD_OPTIONAL cannot be directly set." #endif #ifdef __has_include #if __has_include() && defined(__cplusplus) && \ __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_OPTIONAL 1 #endif #endif // ABSL_HAVE_STD_VARIANT // // Checks whether C++17 std::variant is available. #ifdef ABSL_HAVE_STD_VARIANT #error "ABSL_HAVE_STD_VARIANT cannot be directly set." #endif #ifdef __has_include #if __has_include() && defined(__cplusplus) && \ __cplusplus >= 201703L && !ABSL_INTERNAL_APPLE_CXX17_TYPES_UNAVAILABLE #define ABSL_HAVE_STD_VARIANT 1 #endif #endif // ABSL_HAVE_STD_STRING_VIEW // // Checks whether C++17 std::string_view is available. #ifdef ABSL_HAVE_STD_STRING_VIEW #error "ABSL_HAVE_STD_STRING_VIEW cannot be directly set." #endif #ifdef __has_include #if __has_include() && defined(__cplusplus) && \ __cplusplus >= 201703L #define ABSL_HAVE_STD_STRING_VIEW 1 #endif #endif // For MSVC, `__has_include` is supported in VS 2017 15.3, which is later than // the support for , , , . So we use // _MSC_VER to check whether we have VS 2017 RTM (when , , // , is implemented) or higher. Also, `__cplusplus` is // not correctly set by MSVC, so we use `_MSVC_LANG` to check the language // version. // TODO(zhangxy): fix tests before enabling aliasing for `std::any`. #if defined(_MSC_VER) && _MSC_VER >= 1910 && \ ((defined(_MSVC_LANG) && _MSVC_LANG > 201402) || \ (defined(__cplusplus) && __cplusplus > 201402)) // #define ABSL_HAVE_STD_ANY 1 #define ABSL_HAVE_STD_OPTIONAL 1 #define ABSL_HAVE_STD_VARIANT 1 #define ABSL_HAVE_STD_STRING_VIEW 1 #endif // ABSL_USES_STD_ANY // // Indicates whether absl::any is an alias for std::any. #if !defined(ABSL_OPTION_USE_STD_ANY) #error options.h is misconfigured. #elif ABSL_OPTION_USE_STD_ANY == 0 || \ (ABSL_OPTION_USE_STD_ANY == 2 && !defined(ABSL_HAVE_STD_ANY)) #undef ABSL_USES_STD_ANY #elif ABSL_OPTION_USE_STD_ANY == 1 || \ (ABSL_OPTION_USE_STD_ANY == 2 && defined(ABSL_HAVE_STD_ANY)) #define ABSL_USES_STD_ANY 1 #else #error options.h is misconfigured. #endif // ABSL_USES_STD_OPTIONAL // // Indicates whether absl::optional is an alias for std::optional. #if !defined(ABSL_OPTION_USE_STD_OPTIONAL) #error options.h is misconfigured. #elif ABSL_OPTION_USE_STD_OPTIONAL == 0 || \ (ABSL_OPTION_USE_STD_OPTIONAL == 2 && !defined(ABSL_HAVE_STD_OPTIONAL)) #undef ABSL_USES_STD_OPTIONAL #elif ABSL_OPTION_USE_STD_OPTIONAL == 1 || \ (ABSL_OPTION_USE_STD_OPTIONAL == 2 && defined(ABSL_HAVE_STD_OPTIONAL)) #define ABSL_USES_STD_OPTIONAL 1 #else #error options.h is misconfigured. #endif // ABSL_USES_STD_VARIANT // // Indicates whether absl::variant is an alias for std::variant. #if !defined(ABSL_OPTION_USE_STD_VARIANT) #error options.h is misconfigured. #elif ABSL_OPTION_USE_STD_VARIANT == 0 || \ (ABSL_OPTION_USE_STD_VARIANT == 2 && !defined(ABSL_HAVE_STD_VARIANT)) #undef ABSL_USES_STD_VARIANT #elif ABSL_OPTION_USE_STD_VARIANT == 1 || \ (ABSL_OPTION_USE_STD_VARIANT == 2 && defined(ABSL_HAVE_STD_VARIANT)) #define ABSL_USES_STD_VARIANT 1 #else #error options.h is misconfigured. #endif // ABSL_USES_STD_STRING_VIEW // // Indicates whether absl::string_view is an alias for std::string_view. #if !defined(ABSL_OPTION_USE_STD_STRING_VIEW) #error options.h is misconfigured. #elif ABSL_OPTION_USE_STD_STRING_VIEW == 0 || \ (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ !defined(ABSL_HAVE_STD_STRING_VIEW)) #undef ABSL_USES_STD_STRING_VIEW #elif ABSL_OPTION_USE_STD_STRING_VIEW == 1 || \ (ABSL_OPTION_USE_STD_STRING_VIEW == 2 && \ defined(ABSL_HAVE_STD_STRING_VIEW)) #define ABSL_USES_STD_STRING_VIEW 1 #else #error options.h is misconfigured. #endif // In debug mode, MSVC 2017's std::variant throws a EXCEPTION_ACCESS_VIOLATION // SEH exception from emplace for variant when constructing the // struct can throw. This defeats some of variant_test and // variant_exception_safety_test. #if defined(_MSC_VER) && _MSC_VER >= 1700 && defined(_DEBUG) #define ABSL_INTERNAL_MSVC_2017_DBG_MODE #endif // ABSL_INTERNAL_MANGLED_NS // ABSL_INTERNAL_MANGLED_BACKREFERENCE // // Internal macros for building up mangled names in our internal fork of CCTZ. // This implementation detail is only needed and provided for the MSVC build. // // These macros both expand to string literals. ABSL_INTERNAL_MANGLED_NS is // the mangled spelling of the `absl` namespace, and // ABSL_INTERNAL_MANGLED_BACKREFERENCE is a back-reference integer representing // the proper count to skip past the CCTZ fork namespace names. (This number // is one larger when there is an inline namespace name to skip.) #if defined(_MSC_VER) #if ABSL_OPTION_USE_INLINE_NAMESPACE == 0 #define ABSL_INTERNAL_MANGLED_NS "absl" #define ABSL_INTERNAL_MANGLED_BACKREFERENCE "5" #else #define ABSL_INTERNAL_MANGLED_NS \ ABSL_INTERNAL_TOKEN_STR(ABSL_OPTION_INLINE_NAMESPACE_NAME) "@absl" #define ABSL_INTERNAL_MANGLED_BACKREFERENCE "6" #endif #endif // ABSL_DLL // // When building Abseil as a DLL, this macro expands to `__declspec(dllexport)` // so we can annotate symbols appropriately as being exported. When used in // headers consuming a DLL, this macro expands to `__declspec(dllimport)` so // that consumers know the symbol is defined inside the DLL. In all other cases, // the macro expands to nothing. #if defined(_MSC_VER) #if defined(ABSL_BUILD_DLL) #define ABSL_DLL __declspec(dllexport) #elif defined(ABSL_CONSUME_DLL) #define ABSL_DLL __declspec(dllimport) #else #define ABSL_DLL #endif #else #define ABSL_DLL #endif // defined(_MSC_VER) // ABSL_HAVE_MEMORY_SANITIZER // // MemorySanitizer (MSan) is a detector of uninitialized reads. It consists of // a compiler instrumentation module and a run-time library. #ifdef ABSL_HAVE_MEMORY_SANITIZER #error "ABSL_HAVE_MEMORY_SANITIZER cannot be directly set." #elif !defined(__native_client__) && ABSL_HAVE_FEATURE(memory_sanitizer) #define ABSL_HAVE_MEMORY_SANITIZER 1 #endif // ABSL_HAVE_THREAD_SANITIZER // // ThreadSanitizer (TSan) is a fast data race detector. #ifdef ABSL_HAVE_THREAD_SANITIZER #error "ABSL_HAVE_THREAD_SANITIZER cannot be directly set." #elif defined(__SANITIZE_THREAD__) #define ABSL_HAVE_THREAD_SANITIZER 1 #elif ABSL_HAVE_FEATURE(thread_sanitizer) #define ABSL_HAVE_THREAD_SANITIZER 1 #endif // ABSL_HAVE_ADDRESS_SANITIZER // // AddressSanitizer (ASan) is a fast memory error detector. #ifdef ABSL_HAVE_ADDRESS_SANITIZER #error "ABSL_HAVE_ADDRESS_SANITIZER cannot be directly set." #elif defined(__SANITIZE_ADDRESS__) #define ABSL_HAVE_ADDRESS_SANITIZER 1 #elif ABSL_HAVE_FEATURE(address_sanitizer) #define ABSL_HAVE_ADDRESS_SANITIZER 1 #endif // ABSL_HAVE_HWADDRESS_SANITIZER // // Hardware-Assisted AddressSanitizer (or HWASAN) is even faster than asan // memory error detector which can use CPU features like ARM TBI, Intel LAM or // AMD UAI. #ifdef ABSL_HAVE_HWADDRESS_SANITIZER #error "ABSL_HAVE_HWADDRESS_SANITIZER cannot be directly set." #elif defined(__SANITIZE_HWADDRESS__) #define ABSL_HAVE_HWADDRESS_SANITIZER 1 #elif ABSL_HAVE_FEATURE(hwaddress_sanitizer) #define ABSL_HAVE_HWADDRESS_SANITIZER 1 #endif // ABSL_HAVE_LEAK_SANITIZER // // LeakSanitizer (or lsan) is a detector of memory leaks. // https://clang.llvm.org/docs/LeakSanitizer.html // https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer // // The macro ABSL_HAVE_LEAK_SANITIZER can be used to detect at compile-time // whether the LeakSanitizer is potentially available. However, just because the // LeakSanitizer is available does not mean it is active. Use the // always-available run-time interface in //absl/debugging/leak_check.h for // interacting with LeakSanitizer. #ifdef ABSL_HAVE_LEAK_SANITIZER #error "ABSL_HAVE_LEAK_SANITIZER cannot be directly set." #elif defined(LEAK_SANITIZER) // GCC provides no method for detecting the presense of the standalone // LeakSanitizer (-fsanitize=leak), so GCC users of -fsanitize=leak should also // use -DLEAK_SANITIZER. #define ABSL_HAVE_LEAK_SANITIZER 1 // Clang standalone LeakSanitizer (-fsanitize=leak) #elif ABSL_HAVE_FEATURE(leak_sanitizer) #define ABSL_HAVE_LEAK_SANITIZER 1 #elif defined(ABSL_HAVE_ADDRESS_SANITIZER) // GCC or Clang using the LeakSanitizer integrated into AddressSanitizer. #define ABSL_HAVE_LEAK_SANITIZER 1 #endif // ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION // // Class template argument deduction is a language feature added in C++17. #ifdef ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION #error "ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION cannot be directly set." #elif defined(__cpp_deduction_guides) #define ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION 1 #endif // ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL // // Prior to C++17, static constexpr variables defined in classes required a // separate definition outside of the class body, for example: // // class Foo { // static constexpr int kBar = 0; // }; // constexpr int Foo::kBar; // // In C++17, these variables defined in classes are considered inline variables, // and the extra declaration is redundant. Since some compilers warn on the // extra declarations, ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL can be used // conditionally ignore them: // // #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL // constexpr int Foo::kBar; // #endif #if defined(ABSL_INTERNAL_CPLUSPLUS_LANG) && \ ABSL_INTERNAL_CPLUSPLUS_LANG < 201703L #define ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL 1 #endif // `ABSL_INTERNAL_HAS_RTTI` determines whether abseil is being compiled with // RTTI support. #ifdef ABSL_INTERNAL_HAS_RTTI #error ABSL_INTERNAL_HAS_RTTI cannot be directly set #elif !defined(__GNUC__) || defined(__GXX_RTTI) #define ABSL_INTERNAL_HAS_RTTI 1 #endif // !defined(__GNUC__) || defined(__GXX_RTTI) // ABSL_INTERNAL_HAVE_SSE is used for compile-time detection of SSE support. // See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of // which architectures support the various x86 instruction sets. #ifdef ABSL_INTERNAL_HAVE_SSE #error ABSL_INTERNAL_HAVE_SSE cannot be directly set #elif defined(__SSE__) #define ABSL_INTERNAL_HAVE_SSE 1 #elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 1) // MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 1 // indicates that at least SSE was targeted with the /arch:SSE option. // All x86-64 processors support SSE, so support can be assumed. // https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros #define ABSL_INTERNAL_HAVE_SSE 1 #endif // ABSL_INTERNAL_HAVE_SSE2 is used for compile-time detection of SSE2 support. // See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of // which architectures support the various x86 instruction sets. #ifdef ABSL_INTERNAL_HAVE_SSE2 #error ABSL_INTERNAL_HAVE_SSE2 cannot be directly set #elif defined(__SSE2__) #define ABSL_INTERNAL_HAVE_SSE2 1 #elif defined(_M_X64) || (defined(_M_IX86_FP) && _M_IX86_FP >= 2) // MSVC only defines _M_IX86_FP for x86 32-bit code, and _M_IX86_FP >= 2 // indicates that at least SSE2 was targeted with the /arch:SSE2 option. // All x86-64 processors support SSE2, so support can be assumed. // https://docs.microsoft.com/en-us/cpp/preprocessor/predefined-macros #define ABSL_INTERNAL_HAVE_SSE2 1 #endif // ABSL_INTERNAL_HAVE_SSSE3 is used for compile-time detection of SSSE3 support. // See https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html for an overview of // which architectures support the various x86 instruction sets. // // MSVC does not have a mode that targets SSSE3 at compile-time. To use SSSE3 // with MSVC requires either assuming that the code will only every run on CPUs // that support SSSE3, otherwise __cpuid() can be used to detect support at // runtime and fallback to a non-SSSE3 implementation when SSSE3 is unsupported // by the CPU. #ifdef ABSL_INTERNAL_HAVE_SSSE3 #error ABSL_INTERNAL_HAVE_SSSE3 cannot be directly set #elif defined(__SSSE3__) #define ABSL_INTERNAL_HAVE_SSSE3 1 #endif // ABSL_INTERNAL_HAVE_ARM_NEON is used for compile-time detection of NEON (ARM // SIMD). #ifdef ABSL_INTERNAL_HAVE_ARM_NEON #error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set #elif defined(__ARM_NEON) #define ABSL_INTERNAL_HAVE_ARM_NEON 1 #endif #endif // ABSL_BASE_CONFIG_H_ abseil-20220623.1/absl/base/config_test.cc000066400000000000000000000032471430371345100200270ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/config.h" #include #include "gtest/gtest.h" #include "absl/synchronization/internal/thread_pool.h" namespace { TEST(ConfigTest, Endianness) { union { uint32_t value; uint8_t data[sizeof(uint32_t)]; } number; number.data[0] = 0x00; number.data[1] = 0x01; number.data[2] = 0x02; number.data[3] = 0x03; #if defined(ABSL_IS_LITTLE_ENDIAN) && defined(ABSL_IS_BIG_ENDIAN) #error Both ABSL_IS_LITTLE_ENDIAN and ABSL_IS_BIG_ENDIAN are defined #elif defined(ABSL_IS_LITTLE_ENDIAN) EXPECT_EQ(UINT32_C(0x03020100), number.value); #elif defined(ABSL_IS_BIG_ENDIAN) EXPECT_EQ(UINT32_C(0x00010203), number.value); #else #error Unknown endianness #endif } #if defined(ABSL_HAVE_THREAD_LOCAL) TEST(ConfigTest, ThreadLocal) { static thread_local int mine_mine_mine = 16; EXPECT_EQ(16, mine_mine_mine); { absl::synchronization_internal::ThreadPool pool(1); pool.Schedule([&] { EXPECT_EQ(16, mine_mine_mine); mine_mine_mine = 32; EXPECT_EQ(32, mine_mine_mine); }); } EXPECT_EQ(16, mine_mine_mine); } #endif } // namespace abseil-20220623.1/absl/base/const_init.h000066400000000000000000000065471430371345100175440ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // kConstInit // ----------------------------------------------------------------------------- // // A constructor tag used to mark an object as safe for use as a global // variable, avoiding the usual lifetime issues that can affect globals. #ifndef ABSL_BASE_CONST_INIT_H_ #define ABSL_BASE_CONST_INIT_H_ #include "absl/base/config.h" // In general, objects with static storage duration (such as global variables) // can trigger tricky object lifetime situations. Attempting to access them // from the constructors or destructors of other global objects can result in // undefined behavior, unless their constructors and destructors are designed // with this issue in mind. // // The normal way to deal with this issue in C++11 is to use constant // initialization and trivial destructors. // // Constant initialization is guaranteed to occur before any other code // executes. Constructors that are declared 'constexpr' are eligible for // constant initialization. You can annotate a variable declaration with the // ABSL_CONST_INIT macro to express this intent. For compilers that support // it, this annotation will cause a compilation error for declarations that // aren't subject to constant initialization (perhaps because a runtime value // was passed as a constructor argument). // // On program shutdown, lifetime issues can be avoided on global objects by // ensuring that they contain trivial destructors. A class has a trivial // destructor unless it has a user-defined destructor, a virtual method or base // class, or a data member or base class with a non-trivial destructor of its // own. Objects with static storage duration and a trivial destructor are not // cleaned up on program shutdown, and are thus safe to access from other code // running during shutdown. // // For a few core Abseil classes, we make a best effort to allow for safe global // instances, even though these classes have non-trivial destructors. These // objects can be created with the absl::kConstInit tag. For example: // ABSL_CONST_INIT absl::Mutex global_mutex(absl::kConstInit); // // The line above declares a global variable of type absl::Mutex which can be // accessed at any point during startup or shutdown. global_mutex's destructor // will still run, but will not invalidate the object. Note that C++ specifies // that accessing an object after its destructor has run results in undefined // behavior, but this pattern works on the toolchains we support. // // The absl::kConstInit tag should only be used to define objects with static // or thread_local storage duration. namespace absl { ABSL_NAMESPACE_BEGIN enum ConstInitType { kConstInit, }; ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_CONST_INIT_H_ abseil-20220623.1/absl/base/dynamic_annotations.h000066400000000000000000000453441430371345100214320ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file defines dynamic annotations for use with dynamic analysis tool // such as valgrind, PIN, etc. // // Dynamic annotation is a source code annotation that affects the generated // code (that is, the annotation is not a comment). Each such annotation is // attached to a particular instruction and/or to a particular object (address) // in the program. // // The annotations that should be used by users are macros in all upper-case // (e.g., ABSL_ANNOTATE_THREAD_NAME). // // Actual implementation of these macros may differ depending on the dynamic // analysis tool being used. // // This file supports the following configurations: // - Dynamic Annotations enabled (with static thread-safety warnings disabled). // In this case, macros expand to functions implemented by Thread Sanitizer, // when building with TSan. When not provided an external implementation, // dynamic_annotations.cc provides no-op implementations. // // - Static Clang thread-safety warnings enabled. // When building with a Clang compiler that supports thread-safety warnings, // a subset of annotations can be statically-checked at compile-time. We // expand these macros to static-inline functions that can be analyzed for // thread-safety, but afterwards elided when building the final binary. // // - All annotations are disabled. // If neither Dynamic Annotations nor Clang thread-safety warnings are // enabled, then all annotation-macros expand to empty. #ifndef ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ #define ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ #include #include "absl/base/attributes.h" #include "absl/base/config.h" #ifdef __cplusplus #include "absl/base/macros.h" #endif // TODO(rogeeff): Remove after the backward compatibility period. #include "absl/base/internal/dynamic_annotations.h" // IWYU pragma: export // ------------------------------------------------------------------------- // Decide which features are enabled. #ifdef ABSL_HAVE_THREAD_SANITIZER #define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 #define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 #define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 #define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 #else #define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 #define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 #define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 // Clang provides limited support for static thread-safety analysis through a // feature called Annotalysis. We configure macro-definitions according to // whether Annotalysis support is available. When running in opt-mode, GCC // will issue a warning, if these attributes are compiled. Only include them // when compiling using Clang. #if defined(__clang__) #define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 1 #if !defined(SWIG) #define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 #endif #else #define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 #endif // Read/write annotations are enabled in Annotalysis mode; disabled otherwise. #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ ABSL_INTERNAL_ANNOTALYSIS_ENABLED #endif // ABSL_HAVE_THREAD_SANITIZER #ifdef __cplusplus #define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { #define ABSL_INTERNAL_END_EXTERN_C } // extern "C" #define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F #define ABSL_INTERNAL_STATIC_INLINE inline #else #define ABSL_INTERNAL_BEGIN_EXTERN_C // empty #define ABSL_INTERNAL_END_EXTERN_C // empty #define ABSL_INTERNAL_GLOBAL_SCOPED(F) F #define ABSL_INTERNAL_STATIC_INLINE static inline #endif // ------------------------------------------------------------------------- // Define race annotations. #if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 // Some of the symbols used in this section (e.g. AnnotateBenignRaceSized) are // defined by the compiler-based santizer implementation, not by the Abseil // library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. // ------------------------------------------------------------- // Annotations that suppress errors. It is usually better to express the // program's synchronization using the other annotations, but these can be used // when all else fails. // Report that we may have a benign race at `pointer`, with size // "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the // point where `pointer` has been allocated, preferably close to the point // where the race happens. See also ABSL_ANNOTATE_BENIGN_RACE_STATIC. #define ABSL_ANNOTATE_BENIGN_RACE(pointer, description) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) // Same as ABSL_ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to // the memory range [`address`, `address`+`size`). #define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ (__FILE__, __LINE__, address, size, description) // Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. // This annotation could be useful if you want to skip expensive race analysis // during some period of program execution, e.g. during initialization. #define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ (__FILE__, __LINE__, enable) // ------------------------------------------------------------- // Annotations useful for debugging. // Report the current thread `name` to a race detector. #define ABSL_ANNOTATE_THREAD_NAME(name) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) // ------------------------------------------------------------- // Annotations useful when implementing locks. They are not normally needed by // modules that merely use locks. The `lock` argument is a pointer to the lock // object. // Report that a lock has been created at address `lock`. #define ABSL_ANNOTATE_RWLOCK_CREATE(lock) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) // Report that a linker initialized lock has been created at address `lock`. #ifdef ABSL_HAVE_THREAD_SANITIZER #define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ (__FILE__, __LINE__, lock) #else #define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ ABSL_ANNOTATE_RWLOCK_CREATE(lock) #endif // Report that the lock at address `lock` is about to be destroyed. #define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) // Report that the lock at address `lock` has been acquired. // `is_w`=1 for writer lock, `is_w`=0 for reader lock. #define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ (__FILE__, __LINE__, lock, is_w) // Report that the lock at address `lock` is about to be released. // `is_w`=1 for writer lock, `is_w`=0 for reader lock. #define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ (__FILE__, __LINE__, lock, is_w) // Apply ABSL_ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. #define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ namespace { \ class static_var##_annotator { \ public: \ static_var##_annotator() { \ ABSL_ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ #static_var ": " description); \ } \ }; \ static static_var##_annotator the##static_var##_annotator; \ } // namespace // Function prototypes of annotations provided by the compiler-based sanitizer // implementation. ABSL_INTERNAL_BEGIN_EXTERN_C void AnnotateRWLockCreate(const char* file, int line, const volatile void* lock); void AnnotateRWLockCreateStatic(const char* file, int line, const volatile void* lock); void AnnotateRWLockDestroy(const char* file, int line, const volatile void* lock); void AnnotateRWLockAcquired(const char* file, int line, const volatile void* lock, long is_w); // NOLINT void AnnotateRWLockReleased(const char* file, int line, const volatile void* lock, long is_w); // NOLINT void AnnotateBenignRace(const char* file, int line, const volatile void* address, const char* description); void AnnotateBenignRaceSized(const char* file, int line, const volatile void* address, size_t size, const char* description); void AnnotateThreadName(const char* file, int line, const char* name); void AnnotateEnableRaceDetection(const char* file, int line, int enable); ABSL_INTERNAL_END_EXTERN_C #else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 #define ABSL_ANNOTATE_RWLOCK_CREATE(lock) // empty #define ABSL_ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty #define ABSL_ANNOTATE_RWLOCK_DESTROY(lock) // empty #define ABSL_ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty #define ABSL_ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty #define ABSL_ANNOTATE_BENIGN_RACE(address, description) // empty #define ABSL_ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty #define ABSL_ANNOTATE_THREAD_NAME(name) // empty #define ABSL_ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty #define ABSL_ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty #endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED // ------------------------------------------------------------------------- // Define memory annotations. #ifdef ABSL_HAVE_MEMORY_SANITIZER #include #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ __msan_unpoison(address, size) #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ __msan_allocated_memory(address, size) #else // !defined(ABSL_HAVE_MEMORY_SANITIZER) // TODO(rogeeff): remove this branch #ifdef ABSL_HAVE_THREAD_SANITIZER #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ do { \ (void)(address); \ (void)(size); \ } while (0) #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ do { \ (void)(address); \ (void)(size); \ } while (0) #else #define ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty #define ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty #endif #endif // ABSL_HAVE_MEMORY_SANITIZER // ------------------------------------------------------------------------- // Define IGNORE_READS_BEGIN/_END attributes. #if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ __attribute((exclusive_lock_function("*"))) #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ __attribute((unlock_function("*"))) #else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty #endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) // ------------------------------------------------------------------------- // Define IGNORE_READS_BEGIN/_END annotations. #if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 // Some of the symbols used in this section (e.g. AnnotateIgnoreReadsBegin) are // defined by the compiler-based implementation, not by the Abseil // library. Therefore they do not use ABSL_INTERNAL_C_SYMBOL. // Request the analysis tool to ignore all reads in the current thread until // ABSL_ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey // reads, while still checking other reads and all writes. // See also ABSL_ANNOTATE_UNPROTECTED_READ. #define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin) \ (__FILE__, __LINE__) // Stop ignoring reads. #define ABSL_ANNOTATE_IGNORE_READS_END() \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd) \ (__FILE__, __LINE__) // Function prototypes of annotations provided by the compiler-based sanitizer // implementation. ABSL_INTERNAL_BEGIN_EXTERN_C void AnnotateIgnoreReadsBegin(const char* file, int line) ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE; void AnnotateIgnoreReadsEnd(const char* file, int line) ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE; ABSL_INTERNAL_END_EXTERN_C #elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) // When Annotalysis is enabled without Dynamic Annotations, the use of // static-inline functions allows the annotations to be read at compile-time, // while still letting the compiler elide the functions from the final build. // // TODO(delesley) -- The exclusive lock here ignores writes as well, but // allows IGNORE_READS_AND_WRITES to work properly. #define ABSL_ANNOTATE_IGNORE_READS_BEGIN() \ ABSL_INTERNAL_GLOBAL_SCOPED( \ ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsBegin)) \ () #define ABSL_ANNOTATE_IGNORE_READS_END() \ ABSL_INTERNAL_GLOBAL_SCOPED( \ ABSL_INTERNAL_C_SYMBOL(AbslInternalAnnotateIgnoreReadsEnd)) \ () ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( AbslInternalAnnotateIgnoreReadsBegin)() ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE {} ABSL_INTERNAL_STATIC_INLINE void ABSL_INTERNAL_C_SYMBOL( AbslInternalAnnotateIgnoreReadsEnd)() ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE {} #else #define ABSL_ANNOTATE_IGNORE_READS_BEGIN() // empty #define ABSL_ANNOTATE_IGNORE_READS_END() // empty #endif // ------------------------------------------------------------------------- // Define IGNORE_WRITES_BEGIN/_END annotations. #if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 // Similar to ABSL_ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. #define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) // Stop ignoring writes. #define ABSL_ANNOTATE_IGNORE_WRITES_END() \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) // Function prototypes of annotations provided by the compiler-based sanitizer // implementation. ABSL_INTERNAL_BEGIN_EXTERN_C void AnnotateIgnoreWritesBegin(const char* file, int line); void AnnotateIgnoreWritesEnd(const char* file, int line); ABSL_INTERNAL_END_EXTERN_C #else #define ABSL_ANNOTATE_IGNORE_WRITES_BEGIN() // empty #define ABSL_ANNOTATE_IGNORE_WRITES_END() // empty #endif // ------------------------------------------------------------------------- // Define the ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more // primitive annotations defined above. // // Instead of doing // ABSL_ANNOTATE_IGNORE_READS_BEGIN(); // ... = x; // ABSL_ANNOTATE_IGNORE_READS_END(); // one can use // ... = ABSL_ANNOTATE_UNPROTECTED_READ(x); #if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) // Start ignoring all memory accesses (both reads and writes). #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ do { \ ABSL_ANNOTATE_IGNORE_READS_BEGIN(); \ ABSL_ANNOTATE_IGNORE_WRITES_BEGIN(); \ } while (0) // Stop ignoring both reads and writes. #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() \ do { \ ABSL_ANNOTATE_IGNORE_WRITES_END(); \ ABSL_ANNOTATE_IGNORE_READS_END(); \ } while (0) #ifdef __cplusplus // ABSL_ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. #define ABSL_ANNOTATE_UNPROTECTED_READ(x) \ absl::base_internal::AnnotateUnprotectedRead(x) namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { template inline T AnnotateUnprotectedRead(const volatile T& x) { // NOLINT ABSL_ANNOTATE_IGNORE_READS_BEGIN(); T res = x; ABSL_ANNOTATE_IGNORE_READS_END(); return res; } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif #else #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty #define ABSL_ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty #define ABSL_ANNOTATE_UNPROTECTED_READ(x) (x) #endif // ------------------------------------------------------------------------- // Address sanitizer annotations #ifdef ABSL_HAVE_ADDRESS_SANITIZER // Describe the current state of a contiguous container such as e.g. // std::vector or std::string. For more details see // sanitizer/common_interface_defs.h, which is provided by the compiler. #include #define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) #define ABSL_ADDRESS_SANITIZER_REDZONE(name) \ struct { \ alignas(8) char x[8]; \ } name #else #define ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) // empty #define ABSL_ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") #endif // ABSL_HAVE_ADDRESS_SANITIZER // ------------------------------------------------------------------------- // Undefine the macros intended only for this file. #undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED #undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_BEGIN_EXTERN_C #undef ABSL_INTERNAL_END_EXTERN_C #undef ABSL_INTERNAL_STATIC_INLINE #endif // ABSL_BASE_DYNAMIC_ANNOTATIONS_H_ abseil-20220623.1/absl/base/exception_safety_testing_test.cc000066400000000000000000000667351430371345100237030ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/exception_safety_testing.h" #ifdef ABSL_HAVE_EXCEPTIONS #include #include #include #include #include #include #include "gtest/gtest-spi.h" #include "gtest/gtest.h" #include "absl/memory/memory.h" namespace testing { namespace { using ::testing::exceptions_internal::SetCountdown; using ::testing::exceptions_internal::TestException; using ::testing::exceptions_internal::UnsetCountdown; // EXPECT_NO_THROW can't inspect the thrown inspection in general. template void ExpectNoThrow(const F& f) { try { f(); } catch (const TestException& e) { ADD_FAILURE() << "Unexpected exception thrown from " << e.what(); } } TEST(ThrowingValueTest, Throws) { SetCountdown(); EXPECT_THROW(ThrowingValue<> bomb, TestException); // It's not guaranteed that every operator only throws *once*. The default // ctor only throws once, though, so use it to make sure we only throw when // the countdown hits 0 SetCountdown(2); ExpectNoThrow([]() { ThrowingValue<> bomb; }); ExpectNoThrow([]() { ThrowingValue<> bomb; }); EXPECT_THROW(ThrowingValue<> bomb, TestException); UnsetCountdown(); } // Tests that an operation throws when the countdown is at 0, doesn't throw when // the countdown doesn't hit 0, and doesn't modify the state of the // ThrowingValue if it throws template void TestOp(const F& f) { ExpectNoThrow(f); SetCountdown(); EXPECT_THROW(f(), TestException); UnsetCountdown(); } TEST(ThrowingValueTest, ThrowingCtors) { ThrowingValue<> bomb; TestOp([]() { ThrowingValue<> bomb(1); }); TestOp([&]() { ThrowingValue<> bomb1 = bomb; }); TestOp([&]() { ThrowingValue<> bomb1 = std::move(bomb); }); } TEST(ThrowingValueTest, ThrowingAssignment) { ThrowingValue<> bomb, bomb1; TestOp([&]() { bomb = bomb1; }); TestOp([&]() { bomb = std::move(bomb1); }); // Test that when assignment throws, the assignment should fail (lhs != rhs) // and strong guarantee fails (lhs != lhs_copy). { ThrowingValue<> lhs(39), rhs(42); ThrowingValue<> lhs_copy(lhs); SetCountdown(); EXPECT_THROW(lhs = rhs, TestException); UnsetCountdown(); EXPECT_NE(lhs, rhs); EXPECT_NE(lhs_copy, lhs); } { ThrowingValue<> lhs(39), rhs(42); ThrowingValue<> lhs_copy(lhs), rhs_copy(rhs); SetCountdown(); EXPECT_THROW(lhs = std::move(rhs), TestException); UnsetCountdown(); EXPECT_NE(lhs, rhs_copy); EXPECT_NE(lhs_copy, lhs); } } TEST(ThrowingValueTest, ThrowingComparisons) { ThrowingValue<> bomb1, bomb2; TestOp([&]() { return bomb1 == bomb2; }); TestOp([&]() { return bomb1 != bomb2; }); TestOp([&]() { return bomb1 < bomb2; }); TestOp([&]() { return bomb1 <= bomb2; }); TestOp([&]() { return bomb1 > bomb2; }); TestOp([&]() { return bomb1 >= bomb2; }); } TEST(ThrowingValueTest, ThrowingArithmeticOps) { ThrowingValue<> bomb1(1), bomb2(2); TestOp([&bomb1]() { +bomb1; }); TestOp([&bomb1]() { -bomb1; }); TestOp([&bomb1]() { ++bomb1; }); TestOp([&bomb1]() { bomb1++; }); TestOp([&bomb1]() { --bomb1; }); TestOp([&bomb1]() { bomb1--; }); TestOp([&]() { bomb1 + bomb2; }); TestOp([&]() { bomb1 - bomb2; }); TestOp([&]() { bomb1* bomb2; }); TestOp([&]() { bomb1 / bomb2; }); TestOp([&]() { bomb1 << 1; }); TestOp([&]() { bomb1 >> 1; }); } TEST(ThrowingValueTest, ThrowingLogicalOps) { ThrowingValue<> bomb1, bomb2; TestOp([&bomb1]() { !bomb1; }); TestOp([&]() { bomb1&& bomb2; }); TestOp([&]() { bomb1 || bomb2; }); } TEST(ThrowingValueTest, ThrowingBitwiseOps) { ThrowingValue<> bomb1, bomb2; TestOp([&bomb1]() { ~bomb1; }); TestOp([&]() { bomb1& bomb2; }); TestOp([&]() { bomb1 | bomb2; }); TestOp([&]() { bomb1 ^ bomb2; }); } TEST(ThrowingValueTest, ThrowingCompoundAssignmentOps) { ThrowingValue<> bomb1(1), bomb2(2); TestOp([&]() { bomb1 += bomb2; }); TestOp([&]() { bomb1 -= bomb2; }); TestOp([&]() { bomb1 *= bomb2; }); TestOp([&]() { bomb1 /= bomb2; }); TestOp([&]() { bomb1 %= bomb2; }); TestOp([&]() { bomb1 &= bomb2; }); TestOp([&]() { bomb1 |= bomb2; }); TestOp([&]() { bomb1 ^= bomb2; }); TestOp([&]() { bomb1 *= bomb2; }); } TEST(ThrowingValueTest, ThrowingStreamOps) { ThrowingValue<> bomb; TestOp([&]() { std::istringstream stream; stream >> bomb; }); TestOp([&]() { std::stringstream stream; stream << bomb; }); } // Tests the operator<< of ThrowingValue by forcing ConstructorTracker to emit // a nonfatal failure that contains the string representation of the Thrower TEST(ThrowingValueTest, StreamOpsOutput) { using ::testing::TypeSpec; exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); // Test default spec list (kEverythingThrows) EXPECT_NONFATAL_FAILURE( { using Thrower = ThrowingValue; auto thrower = Thrower(123); thrower.~Thrower(); }, "ThrowingValue<>(123)"); // Test with one item in spec list (kNoThrowCopy) EXPECT_NONFATAL_FAILURE( { using Thrower = ThrowingValue; auto thrower = Thrower(234); thrower.~Thrower(); }, "ThrowingValue(234)"); // Test with multiple items in spec list (kNoThrowMove, kNoThrowNew) EXPECT_NONFATAL_FAILURE( { using Thrower = ThrowingValue; auto thrower = Thrower(345); thrower.~Thrower(); }, "ThrowingValue(345)"); // Test with all items in spec list (kNoThrowCopy, kNoThrowMove, kNoThrowNew) EXPECT_NONFATAL_FAILURE( { using Thrower = ThrowingValue(-1)>; auto thrower = Thrower(456); thrower.~Thrower(); }, "ThrowingValue(456)"); } template void TestAllocatingOp(const F& f) { ExpectNoThrow(f); SetCountdown(); EXPECT_THROW(f(), exceptions_internal::TestBadAllocException); UnsetCountdown(); } TEST(ThrowingValueTest, ThrowingAllocatingOps) { // make_unique calls unqualified operator new, so these exercise the // ThrowingValue overloads. TestAllocatingOp([]() { return absl::make_unique>(1); }); TestAllocatingOp([]() { return absl::make_unique[]>(2); }); } TEST(ThrowingValueTest, NonThrowingMoveCtor) { ThrowingValue nothrow_ctor; SetCountdown(); ExpectNoThrow([¬hrow_ctor]() { ThrowingValue nothrow1 = std::move(nothrow_ctor); }); UnsetCountdown(); } TEST(ThrowingValueTest, NonThrowingMoveAssign) { ThrowingValue nothrow_assign1, nothrow_assign2; SetCountdown(); ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() { nothrow_assign1 = std::move(nothrow_assign2); }); UnsetCountdown(); } TEST(ThrowingValueTest, ThrowingCopyCtor) { ThrowingValue<> tv; TestOp([&]() { ThrowingValue<> tv_copy(tv); }); } TEST(ThrowingValueTest, ThrowingCopyAssign) { ThrowingValue<> tv1, tv2; TestOp([&]() { tv1 = tv2; }); } TEST(ThrowingValueTest, NonThrowingCopyCtor) { ThrowingValue nothrow_ctor; SetCountdown(); ExpectNoThrow([¬hrow_ctor]() { ThrowingValue nothrow1(nothrow_ctor); }); UnsetCountdown(); } TEST(ThrowingValueTest, NonThrowingCopyAssign) { ThrowingValue nothrow_assign1, nothrow_assign2; SetCountdown(); ExpectNoThrow([¬hrow_assign1, ¬hrow_assign2]() { nothrow_assign1 = nothrow_assign2; }); UnsetCountdown(); } TEST(ThrowingValueTest, ThrowingSwap) { ThrowingValue<> bomb1, bomb2; TestOp([&]() { std::swap(bomb1, bomb2); }); } TEST(ThrowingValueTest, NonThrowingSwap) { ThrowingValue bomb1, bomb2; ExpectNoThrow([&]() { std::swap(bomb1, bomb2); }); } TEST(ThrowingValueTest, NonThrowingAllocation) { ThrowingValue* allocated; ThrowingValue* array; ExpectNoThrow([&allocated]() { allocated = new ThrowingValue(1); delete allocated; }); ExpectNoThrow([&array]() { array = new ThrowingValue[2]; delete[] array; }); } TEST(ThrowingValueTest, NonThrowingDelete) { auto* allocated = new ThrowingValue<>(1); auto* array = new ThrowingValue<>[2]; SetCountdown(); ExpectNoThrow([allocated]() { delete allocated; }); SetCountdown(); ExpectNoThrow([array]() { delete[] array; }); UnsetCountdown(); } TEST(ThrowingValueTest, NonThrowingPlacementDelete) { constexpr int kArrayLen = 2; // We intentionally create extra space to store the tag allocated by placement // new[]. constexpr int kStorageLen = 4; alignas(ThrowingValue<>) unsigned char buf[sizeof(ThrowingValue<>)]; alignas(ThrowingValue<>) unsigned char array_buf[sizeof(ThrowingValue<>[kStorageLen])]; auto* placed = new (&buf) ThrowingValue<>(1); auto placed_array = new (&array_buf) ThrowingValue<>[kArrayLen]; SetCountdown(); ExpectNoThrow([placed, &buf]() { placed->~ThrowingValue<>(); ThrowingValue<>::operator delete(placed, &buf); }); SetCountdown(); ExpectNoThrow([&, placed_array]() { for (int i = 0; i < kArrayLen; ++i) placed_array[i].~ThrowingValue<>(); ThrowingValue<>::operator delete[](placed_array, &array_buf); }); UnsetCountdown(); } TEST(ThrowingValueTest, NonThrowingDestructor) { auto* allocated = new ThrowingValue<>(); SetCountdown(); ExpectNoThrow([allocated]() { delete allocated; }); UnsetCountdown(); } TEST(ThrowingBoolTest, ThrowingBool) { ThrowingBool t = true; // Test that it's contextually convertible to bool if (t) { // NOLINT(whitespace/empty_if_body) } EXPECT_TRUE(t); TestOp([&]() { (void)!t; }); } TEST(ThrowingAllocatorTest, MemoryManagement) { // Just exercise the memory management capabilities under LSan to make sure we // don't leak. ThrowingAllocator int_alloc; int* ip = int_alloc.allocate(1); int_alloc.deallocate(ip, 1); int* i_array = int_alloc.allocate(2); int_alloc.deallocate(i_array, 2); ThrowingAllocator> tv_alloc; ThrowingValue<>* ptr = tv_alloc.allocate(1); tv_alloc.deallocate(ptr, 1); ThrowingValue<>* tv_array = tv_alloc.allocate(2); tv_alloc.deallocate(tv_array, 2); } TEST(ThrowingAllocatorTest, CallsGlobalNew) { ThrowingAllocator, AllocSpec::kNoThrowAllocate> nothrow_alloc; ThrowingValue<>* ptr; SetCountdown(); // This will only throw if ThrowingValue::new is called. ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); }); nothrow_alloc.deallocate(ptr, 1); UnsetCountdown(); } TEST(ThrowingAllocatorTest, ThrowingConstructors) { ThrowingAllocator int_alloc; int* ip = nullptr; SetCountdown(); EXPECT_THROW(ip = int_alloc.allocate(1), TestException); ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); *ip = 1; SetCountdown(); EXPECT_THROW(int_alloc.construct(ip, 2), TestException); EXPECT_EQ(*ip, 1); int_alloc.deallocate(ip, 1); UnsetCountdown(); } TEST(ThrowingAllocatorTest, NonThrowingConstruction) { { ThrowingAllocator int_alloc; int* ip = nullptr; SetCountdown(); ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); SetCountdown(); ExpectNoThrow([&]() { int_alloc.construct(ip, 2); }); EXPECT_EQ(*ip, 2); int_alloc.deallocate(ip, 1); UnsetCountdown(); } { ThrowingAllocator int_alloc; int* ip = nullptr; ExpectNoThrow([&]() { ip = int_alloc.allocate(1); }); ExpectNoThrow([&]() { int_alloc.construct(ip, 2); }); EXPECT_EQ(*ip, 2); int_alloc.deallocate(ip, 1); } { ThrowingAllocator, AllocSpec::kNoThrowAllocate> nothrow_alloc; ThrowingValue<>* ptr; SetCountdown(); ExpectNoThrow([&]() { ptr = nothrow_alloc.allocate(1); }); SetCountdown(); ExpectNoThrow( [&]() { nothrow_alloc.construct(ptr, 2, testing::nothrow_ctor); }); EXPECT_EQ(ptr->Get(), 2); nothrow_alloc.destroy(ptr); nothrow_alloc.deallocate(ptr, 1); UnsetCountdown(); } { ThrowingAllocator a; SetCountdown(); ExpectNoThrow([&]() { ThrowingAllocator a1 = a; }); SetCountdown(); ExpectNoThrow([&]() { ThrowingAllocator a1 = std::move(a); }); UnsetCountdown(); } } TEST(ThrowingAllocatorTest, ThrowingAllocatorConstruction) { ThrowingAllocator a; TestOp([]() { ThrowingAllocator a; }); TestOp([&]() { a.select_on_container_copy_construction(); }); } TEST(ThrowingAllocatorTest, State) { ThrowingAllocator a1, a2; EXPECT_NE(a1, a2); auto a3 = a1; EXPECT_EQ(a3, a1); int* ip = a1.allocate(1); EXPECT_EQ(a3, a1); a3.deallocate(ip, 1); EXPECT_EQ(a3, a1); } TEST(ThrowingAllocatorTest, InVector) { std::vector, ThrowingAllocator>> v; for (int i = 0; i < 20; ++i) v.push_back({}); for (int i = 0; i < 20; ++i) v.pop_back(); } TEST(ThrowingAllocatorTest, InList) { std::list, ThrowingAllocator>> l; for (int i = 0; i < 20; ++i) l.push_back({}); for (int i = 0; i < 20; ++i) l.pop_back(); for (int i = 0; i < 20; ++i) l.push_front({}); for (int i = 0; i < 20; ++i) l.pop_front(); } template struct NullaryTestValidator : public std::false_type {}; template struct NullaryTestValidator< TesterInstance, absl::void_t().Test())>> : public std::true_type {}; template bool HasNullaryTest(const TesterInstance&) { return NullaryTestValidator::value; } void DummyOp(void*) {} template struct UnaryTestValidator : public std::false_type {}; template struct UnaryTestValidator< TesterInstance, absl::void_t().Test(DummyOp))>> : public std::true_type {}; template bool HasUnaryTest(const TesterInstance&) { return UnaryTestValidator::value; } TEST(ExceptionSafetyTesterTest, IncompleteTypesAreNotTestable) { using T = exceptions_internal::UninitializedT; auto op = [](T* t) {}; auto inv = [](T*) { return testing::AssertionSuccess(); }; auto fac = []() { return absl::make_unique(); }; // Test that providing operation and inveriants still does not allow for the // the invocation of .Test() and .Test(op) because it lacks a factory auto without_fac = testing::MakeExceptionSafetyTester().WithOperation(op).WithContracts( inv, testing::strong_guarantee); EXPECT_FALSE(HasNullaryTest(without_fac)); EXPECT_FALSE(HasUnaryTest(without_fac)); // Test that providing contracts and factory allows the invocation of // .Test(op) but does not allow for .Test() because it lacks an operation auto without_op = testing::MakeExceptionSafetyTester() .WithContracts(inv, testing::strong_guarantee) .WithFactory(fac); EXPECT_FALSE(HasNullaryTest(without_op)); EXPECT_TRUE(HasUnaryTest(without_op)); // Test that providing operation and factory still does not allow for the // the invocation of .Test() and .Test(op) because it lacks contracts auto without_inv = testing::MakeExceptionSafetyTester().WithOperation(op).WithFactory(fac); EXPECT_FALSE(HasNullaryTest(without_inv)); EXPECT_FALSE(HasUnaryTest(without_inv)); } struct ExampleStruct {}; std::unique_ptr ExampleFunctionFactory() { return absl::make_unique(); } void ExampleFunctionOperation(ExampleStruct*) {} testing::AssertionResult ExampleFunctionContract(ExampleStruct*) { return testing::AssertionSuccess(); } struct { std::unique_ptr operator()() const { return ExampleFunctionFactory(); } } example_struct_factory; struct { void operator()(ExampleStruct*) const {} } example_struct_operation; struct { testing::AssertionResult operator()(ExampleStruct* example_struct) const { return ExampleFunctionContract(example_struct); } } example_struct_contract; auto example_lambda_factory = []() { return ExampleFunctionFactory(); }; auto example_lambda_operation = [](ExampleStruct*) {}; auto example_lambda_contract = [](ExampleStruct* example_struct) { return ExampleFunctionContract(example_struct); }; // Testing that function references, pointers, structs with operator() and // lambdas can all be used with ExceptionSafetyTester TEST(ExceptionSafetyTesterTest, MixedFunctionTypes) { // function reference EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithFactory(ExampleFunctionFactory) .WithOperation(ExampleFunctionOperation) .WithContracts(ExampleFunctionContract) .Test()); // function pointer EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithFactory(&ExampleFunctionFactory) .WithOperation(&ExampleFunctionOperation) .WithContracts(&ExampleFunctionContract) .Test()); // struct EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithFactory(example_struct_factory) .WithOperation(example_struct_operation) .WithContracts(example_struct_contract) .Test()); // lambda EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithFactory(example_lambda_factory) .WithOperation(example_lambda_operation) .WithContracts(example_lambda_contract) .Test()); } struct NonNegative { bool operator==(const NonNegative& other) const { return i == other.i; } int i; }; testing::AssertionResult CheckNonNegativeInvariants(NonNegative* g) { if (g->i >= 0) { return testing::AssertionSuccess(); } return testing::AssertionFailure() << "i should be non-negative but is " << g->i; } struct { template void operator()(T* t) const { (*t)(); } } invoker; auto tester = testing::MakeExceptionSafetyTester().WithOperation(invoker).WithContracts( CheckNonNegativeInvariants); auto strong_tester = tester.WithContracts(testing::strong_guarantee); struct FailsBasicGuarantee : public NonNegative { void operator()() { --i; ThrowingValue<> bomb; ++i; } }; TEST(ExceptionCheckTest, BasicGuaranteeFailure) { EXPECT_FALSE(tester.WithInitialValue(FailsBasicGuarantee{}).Test()); } struct FollowsBasicGuarantee : public NonNegative { void operator()() { ++i; ThrowingValue<> bomb; } }; TEST(ExceptionCheckTest, BasicGuarantee) { EXPECT_TRUE(tester.WithInitialValue(FollowsBasicGuarantee{}).Test()); } TEST(ExceptionCheckTest, StrongGuaranteeFailure) { EXPECT_FALSE(strong_tester.WithInitialValue(FailsBasicGuarantee{}).Test()); EXPECT_FALSE(strong_tester.WithInitialValue(FollowsBasicGuarantee{}).Test()); } struct BasicGuaranteeWithExtraContracts : public NonNegative { // After operator(), i is incremented. If operator() throws, i is set to 9999 void operator()() { int old_i = i; i = kExceptionSentinel; ThrowingValue<> bomb; i = ++old_i; } static constexpr int kExceptionSentinel = 9999; }; #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr int BasicGuaranteeWithExtraContracts::kExceptionSentinel; #endif TEST(ExceptionCheckTest, BasicGuaranteeWithExtraContracts) { auto tester_with_val = tester.WithInitialValue(BasicGuaranteeWithExtraContracts{}); EXPECT_TRUE(tester_with_val.Test()); EXPECT_TRUE( tester_with_val .WithContracts([](BasicGuaranteeWithExtraContracts* o) { if (o->i == BasicGuaranteeWithExtraContracts::kExceptionSentinel) { return testing::AssertionSuccess(); } return testing::AssertionFailure() << "i should be " << BasicGuaranteeWithExtraContracts::kExceptionSentinel << ", but is " << o->i; }) .Test()); } struct FollowsStrongGuarantee : public NonNegative { void operator()() { ThrowingValue<> bomb; } }; TEST(ExceptionCheckTest, StrongGuarantee) { EXPECT_TRUE(tester.WithInitialValue(FollowsStrongGuarantee{}).Test()); EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}).Test()); } struct HasReset : public NonNegative { void operator()() { i = -1; ThrowingValue<> bomb; i = 1; } void reset() { i = 0; } }; testing::AssertionResult CheckHasResetContracts(HasReset* h) { h->reset(); return testing::AssertionResult(h->i == 0); } TEST(ExceptionCheckTest, ModifyingChecker) { auto set_to_1000 = [](FollowsBasicGuarantee* g) { g->i = 1000; return testing::AssertionSuccess(); }; auto is_1000 = [](FollowsBasicGuarantee* g) { return testing::AssertionResult(g->i == 1000); }; auto increment = [](FollowsStrongGuarantee* g) { ++g->i; return testing::AssertionSuccess(); }; EXPECT_FALSE(tester.WithInitialValue(FollowsBasicGuarantee{}) .WithContracts(set_to_1000, is_1000) .Test()); EXPECT_TRUE(strong_tester.WithInitialValue(FollowsStrongGuarantee{}) .WithContracts(increment) .Test()); EXPECT_TRUE(testing::MakeExceptionSafetyTester() .WithInitialValue(HasReset{}) .WithContracts(CheckHasResetContracts) .Test(invoker)); } TEST(ExceptionSafetyTesterTest, ResetsCountdown) { auto test = testing::MakeExceptionSafetyTester() .WithInitialValue(ThrowingValue<>()) .WithContracts([](ThrowingValue<>*) { return AssertionSuccess(); }) .WithOperation([](ThrowingValue<>*) {}); ASSERT_TRUE(test.Test()); // If the countdown isn't reset because there were no exceptions thrown, then // this will fail with a termination from an unhandled exception EXPECT_TRUE(test.Test()); } struct NonCopyable : public NonNegative { NonCopyable(const NonCopyable&) = delete; NonCopyable() : NonNegative{0} {} void operator()() { ThrowingValue<> bomb; } }; TEST(ExceptionCheckTest, NonCopyable) { auto factory = []() { return absl::make_unique(); }; EXPECT_TRUE(tester.WithFactory(factory).Test()); EXPECT_TRUE(strong_tester.WithFactory(factory).Test()); } struct NonEqualityComparable : public NonNegative { void operator()() { ThrowingValue<> bomb; } void ModifyOnThrow() { ++i; ThrowingValue<> bomb; static_cast(bomb); --i; } }; TEST(ExceptionCheckTest, NonEqualityComparable) { auto nec_is_strong = [](NonEqualityComparable* nec) { return testing::AssertionResult(nec->i == NonEqualityComparable().i); }; auto strong_nec_tester = tester.WithInitialValue(NonEqualityComparable{}) .WithContracts(nec_is_strong); EXPECT_TRUE(strong_nec_tester.Test()); EXPECT_FALSE(strong_nec_tester.Test( [](NonEqualityComparable* n) { n->ModifyOnThrow(); })); } template struct ExhaustivenessTester { void operator()() { successes |= 1; T b1; static_cast(b1); successes |= (1 << 1); T b2; static_cast(b2); successes |= (1 << 2); T b3; static_cast(b3); successes |= (1 << 3); } bool operator==(const ExhaustivenessTester>&) const { return true; } static unsigned char successes; }; struct { template testing::AssertionResult operator()(ExhaustivenessTester*) const { return testing::AssertionSuccess(); } } CheckExhaustivenessTesterContracts; template unsigned char ExhaustivenessTester::successes = 0; TEST(ExceptionCheckTest, Exhaustiveness) { auto exhaust_tester = testing::MakeExceptionSafetyTester() .WithContracts(CheckExhaustivenessTesterContracts) .WithOperation(invoker); EXPECT_TRUE( exhaust_tester.WithInitialValue(ExhaustivenessTester{}).Test()); EXPECT_EQ(ExhaustivenessTester::successes, 0xF); EXPECT_TRUE( exhaust_tester.WithInitialValue(ExhaustivenessTester>{}) .WithContracts(testing::strong_guarantee) .Test()); EXPECT_EQ(ExhaustivenessTester>::successes, 0xF); } struct LeaksIfCtorThrows : private exceptions_internal::TrackedObject { LeaksIfCtorThrows() : TrackedObject(ABSL_PRETTY_FUNCTION) { ++counter; ThrowingValue<> v; static_cast(v); --counter; } LeaksIfCtorThrows(const LeaksIfCtorThrows&) noexcept : TrackedObject(ABSL_PRETTY_FUNCTION) {} static int counter; }; int LeaksIfCtorThrows::counter = 0; TEST(ExceptionCheckTest, TestLeakyCtor) { testing::TestThrowingCtor(); EXPECT_EQ(LeaksIfCtorThrows::counter, 1); LeaksIfCtorThrows::counter = 0; } struct Tracked : private exceptions_internal::TrackedObject { Tracked() : TrackedObject(ABSL_PRETTY_FUNCTION) {} }; TEST(ConstructorTrackerTest, CreatedBefore) { Tracked a, b, c; exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); } TEST(ConstructorTrackerTest, CreatedAfter) { exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); Tracked a, b, c; } TEST(ConstructorTrackerTest, NotDestroyedAfter) { alignas(Tracked) unsigned char storage[sizeof(Tracked)]; EXPECT_NONFATAL_FAILURE( { exceptions_internal::ConstructorTracker ct( exceptions_internal::countdown); new (&storage) Tracked(); }, "not destroyed"); } TEST(ConstructorTrackerTest, DestroyedTwice) { exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); EXPECT_NONFATAL_FAILURE( { Tracked t; t.~Tracked(); }, "re-destroyed"); } TEST(ConstructorTrackerTest, ConstructedTwice) { exceptions_internal::ConstructorTracker ct(exceptions_internal::countdown); alignas(Tracked) unsigned char storage[sizeof(Tracked)]; EXPECT_NONFATAL_FAILURE( { new (&storage) Tracked(); new (&storage) Tracked(); reinterpret_cast(&storage)->~Tracked(); }, "re-constructed"); } TEST(ThrowingValueTraitsTest, RelationalOperators) { ThrowingValue<> a, b; EXPECT_TRUE((std::is_convertible::value)); EXPECT_TRUE((std::is_convertible::value)); EXPECT_TRUE((std::is_convertible::value)); EXPECT_TRUE((std::is_convertible::value)); EXPECT_TRUE((std::is_convertible b), bool>::value)); EXPECT_TRUE((std::is_convertible= b), bool>::value)); } TEST(ThrowingAllocatorTraitsTest, Assignablility) { EXPECT_TRUE(absl::is_move_assignable>::value); EXPECT_TRUE(absl::is_copy_assignable>::value); EXPECT_TRUE(std::is_nothrow_move_assignable>::value); EXPECT_TRUE(std::is_nothrow_copy_assignable>::value); } } // namespace } // namespace testing #endif // ABSL_HAVE_EXCEPTIONS abseil-20220623.1/absl/base/inline_variable_test.cc000066400000000000000000000036721430371345100217070ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include "absl/base/internal/inline_variable.h" #include "absl/base/internal/inline_variable_testing.h" #include "gtest/gtest.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace inline_variable_testing_internal { namespace { TEST(InlineVariableTest, Constexpr) { static_assert(inline_variable_foo.value == 5, ""); static_assert(other_inline_variable_foo.value == 5, ""); static_assert(inline_variable_int == 5, ""); static_assert(other_inline_variable_int == 5, ""); } TEST(InlineVariableTest, DefaultConstructedIdentityEquality) { EXPECT_EQ(get_foo_a().value, 5); EXPECT_EQ(get_foo_b().value, 5); EXPECT_EQ(&get_foo_a(), &get_foo_b()); } TEST(InlineVariableTest, DefaultConstructedIdentityInequality) { EXPECT_NE(&inline_variable_foo, &other_inline_variable_foo); } TEST(InlineVariableTest, InitializedIdentityEquality) { EXPECT_EQ(get_int_a(), 5); EXPECT_EQ(get_int_b(), 5); EXPECT_EQ(&get_int_a(), &get_int_b()); } TEST(InlineVariableTest, InitializedIdentityInequality) { EXPECT_NE(&inline_variable_int, &other_inline_variable_int); } TEST(InlineVariableTest, FunPtrType) { static_assert( std::is_same::type>::value, ""); } } // namespace } // namespace inline_variable_testing_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/inline_variable_test_a.cc000066400000000000000000000016551430371345100222060ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/inline_variable_testing.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace inline_variable_testing_internal { const Foo& get_foo_a() { return inline_variable_foo; } const int& get_int_a() { return inline_variable_int; } } // namespace inline_variable_testing_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/inline_variable_test_b.cc000066400000000000000000000016551430371345100222070ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/inline_variable_testing.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace inline_variable_testing_internal { const Foo& get_foo_b() { return inline_variable_foo; } const int& get_int_b() { return inline_variable_int; } } // namespace inline_variable_testing_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/000077500000000000000000000000001430371345100170225ustar00rootroot00000000000000abseil-20220623.1/absl/base/internal/atomic_hook.h000066400000000000000000000165271430371345100215020ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ #define ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ #include #include #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" #if defined(_MSC_VER) && !defined(__clang__) #define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 0 #else #define ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT 1 #endif #if defined(_MSC_VER) #define ABSL_HAVE_WORKING_ATOMIC_POINTER 0 #else #define ABSL_HAVE_WORKING_ATOMIC_POINTER 1 #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { template class AtomicHook; // To workaround AtomicHook not being constant-initializable on some platforms, // prefer to annotate instances with `ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES` // instead of `ABSL_CONST_INIT`. #if ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT #define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_CONST_INIT #else #define ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES #endif // `AtomicHook` is a helper class, templatized on a raw function pointer type, // for implementing Abseil customization hooks. It is a callable object that // dispatches to the registered hook. Objects of type `AtomicHook` must have // static or thread storage duration. // // A default constructed object performs a no-op (and returns a default // constructed object) if no hook has been registered. // // Hooks can be pre-registered via constant initialization, for example: // // ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static AtomicHook // my_hook(DefaultAction); // // and then changed at runtime via a call to `Store()`. // // Reads and writes guarantee memory_order_acquire/memory_order_release // semantics. template class AtomicHook { public: using FnPtr = ReturnType (*)(Args...); // Constructs an object that by default performs a no-op (and // returns a default constructed object) when no hook as been registered. constexpr AtomicHook() : AtomicHook(DummyFunction) {} // Constructs an object that by default dispatches to/returns the // pre-registered default_fn when no hook has been registered at runtime. #if ABSL_HAVE_WORKING_ATOMIC_POINTER && ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT explicit constexpr AtomicHook(FnPtr default_fn) : hook_(default_fn), default_fn_(default_fn) {} #elif ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT explicit constexpr AtomicHook(FnPtr default_fn) : hook_(kUninitialized), default_fn_(default_fn) {} #else // As of January 2020, on all known versions of MSVC this constructor runs in // the global constructor sequence. If `Store()` is called by a dynamic // initializer, we want to preserve the value, even if this constructor runs // after the call to `Store()`. If not, `hook_` will be // zero-initialized by the linker and we have no need to set it. // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html explicit constexpr AtomicHook(FnPtr default_fn) : /* hook_(deliberately omitted), */ default_fn_(default_fn) { static_assert(kUninitialized == 0, "here we rely on zero-initialization"); } #endif // Stores the provided function pointer as the value for this hook. // // This is intended to be called once. Multiple calls are legal only if the // same function pointer is provided for each call. The store is implemented // as a memory_order_release operation, and read accesses are implemented as // memory_order_acquire. void Store(FnPtr fn) { bool success = DoStore(fn); static_cast(success); assert(success); } // Invokes the registered callback. If no callback has yet been registered, a // default-constructed object of the appropriate type is returned instead. template ReturnType operator()(CallArgs&&... args) const { return DoLoad()(std::forward(args)...); } // Returns the registered callback, or nullptr if none has been registered. // Useful if client code needs to conditionalize behavior based on whether a // callback was registered. // // Note that atomic_hook.Load()() and atomic_hook() have different semantics: // operator()() will perform a no-op if no callback was registered, while // Load()() will dereference a null function pointer. Prefer operator()() to // Load()() unless you must conditionalize behavior on whether a hook was // registered. FnPtr Load() const { FnPtr ptr = DoLoad(); return (ptr == DummyFunction) ? nullptr : ptr; } private: static ReturnType DummyFunction(Args...) { return ReturnType(); } // Current versions of MSVC (as of September 2017) have a broken // implementation of std::atomic: Its constructor attempts to do the // equivalent of a reinterpret_cast in a constexpr context, which is not // allowed. // // This causes an issue when building with LLVM under Windows. To avoid this, // we use a less-efficient, intptr_t-based implementation on Windows. #if ABSL_HAVE_WORKING_ATOMIC_POINTER // Return the stored value, or DummyFunction if no value has been stored. FnPtr DoLoad() const { return hook_.load(std::memory_order_acquire); } // Store the given value. Returns false if a different value was already // stored to this object. bool DoStore(FnPtr fn) { assert(fn); FnPtr expected = default_fn_; const bool store_succeeded = hook_.compare_exchange_strong( expected, fn, std::memory_order_acq_rel, std::memory_order_acquire); const bool same_value_already_stored = (expected == fn); return store_succeeded || same_value_already_stored; } std::atomic hook_; #else // !ABSL_HAVE_WORKING_ATOMIC_POINTER // Use a sentinel value unlikely to be the address of an actual function. static constexpr intptr_t kUninitialized = 0; static_assert(sizeof(intptr_t) >= sizeof(FnPtr), "intptr_t can't contain a function pointer"); FnPtr DoLoad() const { const intptr_t value = hook_.load(std::memory_order_acquire); if (value == kUninitialized) { return default_fn_; } return reinterpret_cast(value); } bool DoStore(FnPtr fn) { assert(fn); const auto value = reinterpret_cast(fn); intptr_t expected = kUninitialized; const bool store_succeeded = hook_.compare_exchange_strong( expected, value, std::memory_order_acq_rel, std::memory_order_acquire); const bool same_value_already_stored = (expected == value); return store_succeeded || same_value_already_stored; } std::atomic hook_; #endif const FnPtr default_fn_; }; #undef ABSL_HAVE_WORKING_ATOMIC_POINTER #undef ABSL_HAVE_WORKING_CONSTEXPR_STATIC_INIT } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_ATOMIC_HOOK_H_ abseil-20220623.1/absl/base/internal/atomic_hook_test.cc000066400000000000000000000060671430371345100226750ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/atomic_hook.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/internal/atomic_hook_test_helper.h" namespace { using ::testing::Eq; int value = 0; void TestHook(int x) { value = x; } TEST(AtomicHookTest, NoDefaultFunction) { ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< void (*)(int)> hook; value = 0; // Test the default DummyFunction. EXPECT_TRUE(hook.Load() == nullptr); EXPECT_EQ(value, 0); hook(1); EXPECT_EQ(value, 0); // Test a stored hook. hook.Store(TestHook); EXPECT_TRUE(hook.Load() == TestHook); EXPECT_EQ(value, 0); hook(1); EXPECT_EQ(value, 1); // Calling Store() with the same hook should not crash. hook.Store(TestHook); EXPECT_TRUE(hook.Load() == TestHook); EXPECT_EQ(value, 1); hook(2); EXPECT_EQ(value, 2); } TEST(AtomicHookTest, WithDefaultFunction) { // Set the default value to TestHook at compile-time. ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static absl::base_internal::AtomicHook< void (*)(int)> hook(TestHook); value = 0; // Test the default value is TestHook. EXPECT_TRUE(hook.Load() == TestHook); EXPECT_EQ(value, 0); hook(1); EXPECT_EQ(value, 1); // Calling Store() with the same hook should not crash. hook.Store(TestHook); EXPECT_TRUE(hook.Load() == TestHook); EXPECT_EQ(value, 1); hook(2); EXPECT_EQ(value, 2); } ABSL_CONST_INIT int override_func_calls = 0; void OverrideFunc() { override_func_calls++; } static struct OverrideInstaller { OverrideInstaller() { absl::atomic_hook_internal::func.Store(OverrideFunc); } } override_installer; TEST(AtomicHookTest, DynamicInitFromAnotherTU) { // MSVC 14.2 doesn't do constexpr static init correctly; in particular it // tends to sequence static init (i.e. defaults) of `AtomicHook` objects // after their dynamic init (i.e. overrides), overwriting whatever value was // written during dynamic init. This regression test validates the fix. // https://developercommunity.visualstudio.com/content/problem/336946/class-with-constexpr-constructor-not-using-static.html EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0)); EXPECT_THAT(override_func_calls, Eq(0)); absl::atomic_hook_internal::func(); EXPECT_THAT(absl::atomic_hook_internal::default_func_calls, Eq(0)); EXPECT_THAT(override_func_calls, Eq(1)); EXPECT_THAT(absl::atomic_hook_internal::func.Load(), Eq(OverrideFunc)); } } // namespace abseil-20220623.1/absl/base/internal/atomic_hook_test_helper.cc000066400000000000000000000021371430371345100242260ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/atomic_hook_test_helper.h" #include "absl/base/attributes.h" #include "absl/base/internal/atomic_hook.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace atomic_hook_internal { ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook func(DefaultFunc); ABSL_CONST_INIT int default_func_calls = 0; void DefaultFunc() { default_func_calls++; } void RegisterFunc(VoidF f) { func.Store(f); } } // namespace atomic_hook_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/atomic_hook_test_helper.h000066400000000000000000000021061430371345100240640ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ #define ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ #include "absl/base/internal/atomic_hook.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace atomic_hook_internal { using VoidF = void (*)(); extern absl::base_internal::AtomicHook func; extern int default_func_calls; void DefaultFunc(); void RegisterFunc(VoidF func); } // namespace atomic_hook_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_ATOMIC_HOOK_TEST_HELPER_H_ abseil-20220623.1/absl/base/internal/cmake_thread_test.cc000066400000000000000000000015151430371345100230010ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include "absl/base/internal/thread_identity.h" int main() { auto* tid = absl::base_internal::CurrentThreadIdentityIfPresent(); // Make sure the above call can't be optimized out std::cout << (void*)tid << std::endl; } abseil-20220623.1/absl/base/internal/cycleclock.cc000066400000000000000000000042351430371345100214500ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // The implementation of CycleClock::Frequency. // // NOTE: only i386 and x86_64 have been well tested. // PPC, sparc, alpha, and ia64 are based on // http://peter.kuscsik.com/wordpress/?p=14 // with modifications by m3b. See also // https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h #include "absl/base/internal/cycleclock.h" #include #include // NOLINT(build/c++11) #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/unscaledcycleclock.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { #if ABSL_USE_UNSCALED_CYCLECLOCK #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr int32_t CycleClock::kShift; constexpr double CycleClock::kFrequencyScale; #endif ABSL_CONST_INIT std::atomic CycleClock::cycle_clock_source_{nullptr}; void CycleClockSource::Register(CycleClockSourceFunc source) { // Corresponds to the load(std::memory_order_acquire) in LoadCycleClockSource. CycleClock::cycle_clock_source_.store(source, std::memory_order_release); } #ifdef _WIN32 int64_t CycleClock::Now() { auto fn = LoadCycleClockSource(); if (fn == nullptr) { return base_internal::UnscaledCycleClock::Now() >> kShift; } return fn() >> kShift; } #endif #else int64_t CycleClock::Now() { return std::chrono::duration_cast( std::chrono::steady_clock::now().time_since_epoch()) .count(); } double CycleClock::Frequency() { return 1e9; } #endif } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/cycleclock.h000066400000000000000000000127361430371345100213170ustar00rootroot00000000000000// // Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: cycleclock.h // ----------------------------------------------------------------------------- // // This header file defines a `CycleClock`, which yields the value and frequency // of a cycle counter that increments at a rate that is approximately constant. // // NOTE: // // The cycle counter frequency is not necessarily related to the core clock // frequency and should not be treated as such. That is, `CycleClock` cycles are // not necessarily "CPU cycles" and code should not rely on that behavior, even // if experimentally observed. // // An arbitrary offset may have been added to the counter at power on. // // On some platforms, the rate and offset of the counter may differ // slightly when read from different CPUs of a multiprocessor. Usually, // we try to ensure that the operating system adjusts values periodically // so that values agree approximately. If you need stronger guarantees, // consider using alternate interfaces. // // The CPU is not required to maintain the ordering of a cycle counter read // with respect to surrounding instructions. #ifndef ABSL_BASE_INTERNAL_CYCLECLOCK_H_ #define ABSL_BASE_INTERNAL_CYCLECLOCK_H_ #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/unscaledcycleclock.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { using CycleClockSourceFunc = int64_t (*)(); // ----------------------------------------------------------------------------- // CycleClock // ----------------------------------------------------------------------------- class CycleClock { public: // CycleClock::Now() // // Returns the value of a cycle counter that counts at a rate that is // approximately constant. static int64_t Now(); // CycleClock::Frequency() // // Returns the amount by which `CycleClock::Now()` increases per second. Note // that this value may not necessarily match the core CPU clock frequency. static double Frequency(); private: #if ABSL_USE_UNSCALED_CYCLECLOCK static CycleClockSourceFunc LoadCycleClockSource(); #ifdef NDEBUG #ifdef ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY // Not debug mode and the UnscaledCycleClock frequency is the CPU // frequency. Scale the CycleClock to prevent overflow if someone // tries to represent the time as cycles since the Unix epoch. static constexpr int32_t kShift = 1; #else // Not debug mode and the UnscaledCycleClock isn't operating at the // raw CPU frequency. There is no need to do any scaling, so don't // needlessly sacrifice precision. static constexpr int32_t kShift = 0; #endif #else // NDEBUG // In debug mode use a different shift to discourage depending on a // particular shift value. static constexpr int32_t kShift = 2; #endif // NDEBUG static constexpr double kFrequencyScale = 1.0 / (1 << kShift); ABSL_CONST_INIT static std::atomic cycle_clock_source_; #endif // ABSL_USE_UNSCALED_CYCLECLOC CycleClock() = delete; // no instances CycleClock(const CycleClock&) = delete; CycleClock& operator=(const CycleClock&) = delete; friend class CycleClockSource; }; class CycleClockSource { private: // CycleClockSource::Register() // // Register a function that provides an alternate source for the unscaled CPU // cycle count value. The source function must be async signal safe, must not // call CycleClock::Now(), and must have a frequency that matches that of the // unscaled clock used by CycleClock. A nullptr value resets CycleClock to use // the default source. static void Register(CycleClockSourceFunc source); }; #if ABSL_USE_UNSCALED_CYCLECLOCK inline CycleClockSourceFunc CycleClock::LoadCycleClockSource() { #if !defined(__x86_64__) // Optimize for the common case (no callback) by first doing a relaxed load; // this is significantly faster on non-x86 platforms. if (cycle_clock_source_.load(std::memory_order_relaxed) == nullptr) { return nullptr; } #endif // !defined(__x86_64__) // This corresponds to the store(std::memory_order_release) in // CycleClockSource::Register, and makes sure that any updates made prior to // registering the callback are visible to this thread before the callback // is invoked. return cycle_clock_source_.load(std::memory_order_acquire); } // Accessing globals in inlined code in Window DLLs is problematic. #ifndef _WIN32 inline int64_t CycleClock::Now() { auto fn = LoadCycleClockSource(); if (fn == nullptr) { return base_internal::UnscaledCycleClock::Now() >> kShift; } return fn() >> kShift; } #endif inline double CycleClock::Frequency() { return kFrequencyScale * base_internal::UnscaledCycleClock::Frequency(); } #endif // ABSL_USE_UNSCALED_CYCLECLOCK } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_CYCLECLOCK_H_ abseil-20220623.1/absl/base/internal/direct_mmap.h000066400000000000000000000132651430371345100214660ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Functions for directly invoking mmap() via syscall, avoiding the case where // mmap() has been locally overridden. #ifndef ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ #define ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ #include "absl/base/config.h" #ifdef ABSL_HAVE_MMAP #include #ifdef __linux__ #include #ifdef __BIONIC__ #include #else #include #endif #include #include #include #include #include #ifdef __mips__ // Include definitions of the ABI currently in use. #if defined(__BIONIC__) || !defined(__GLIBC__) // Android doesn't have sgidefs.h, but does have asm/sgidefs.h, which has the // definitions we need. #include #else #include #endif // __BIONIC__ || !__GLIBC__ #endif // __mips__ // SYS_mmap and SYS_munmap are not defined in Android. #ifdef __BIONIC__ extern "C" void* __mmap2(void*, size_t, int, int, int, size_t); #if defined(__NR_mmap) && !defined(SYS_mmap) #define SYS_mmap __NR_mmap #endif #ifndef SYS_munmap #define SYS_munmap __NR_munmap #endif #endif // __BIONIC__ #if defined(__NR_mmap2) && !defined(SYS_mmap2) #define SYS_mmap2 __NR_mmap2 #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // Platform specific logic extracted from // https://chromium.googlesource.com/linux-syscall-support/+/master/linux_syscall_support.h inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off64_t offset) noexcept { #if defined(__i386__) || defined(__ARM_ARCH_3__) || defined(__ARM_EABI__) || \ defined(__m68k__) || defined(__sh__) || \ (defined(__hppa__) && !defined(__LP64__)) || \ (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || \ (defined(__PPC__) && !defined(__PPC64__)) || \ (defined(__riscv) && __riscv_xlen == 32) || \ (defined(__s390__) && !defined(__s390x__)) || \ (defined(__sparc__) && !defined(__arch64__)) // On these architectures, implement mmap with mmap2. static int pagesize = 0; if (pagesize == 0) { #if defined(__wasm__) || defined(__asmjs__) pagesize = getpagesize(); #else pagesize = sysconf(_SC_PAGESIZE); #endif } if (offset < 0 || offset % pagesize != 0) { errno = EINVAL; return MAP_FAILED; } #ifdef __BIONIC__ // SYS_mmap2 has problems on Android API level <= 16. // Workaround by invoking __mmap2() instead. return __mmap2(start, length, prot, flags, fd, offset / pagesize); #else return reinterpret_cast( syscall(SYS_mmap2, start, length, prot, flags, fd, static_cast(offset / pagesize))); #endif #elif defined(__s390x__) // On s390x, mmap() arguments are passed in memory. unsigned long buf[6] = {reinterpret_cast(start), // NOLINT static_cast(length), // NOLINT static_cast(prot), // NOLINT static_cast(flags), // NOLINT static_cast(fd), // NOLINT static_cast(offset)}; // NOLINT return reinterpret_cast(syscall(SYS_mmap, buf)); #elif defined(__x86_64__) // The x32 ABI has 32 bit longs, but the syscall interface is 64 bit. // We need to explicitly cast to an unsigned 64 bit type to avoid implicit // sign extension. We can't cast pointers directly because those are // 32 bits, and gcc will dump ugly warnings about casting from a pointer // to an integer of a different size. We also need to make sure __off64_t // isn't truncated to 32-bits under x32. #define MMAP_SYSCALL_ARG(x) ((uint64_t)(uintptr_t)(x)) return reinterpret_cast( syscall(SYS_mmap, MMAP_SYSCALL_ARG(start), MMAP_SYSCALL_ARG(length), MMAP_SYSCALL_ARG(prot), MMAP_SYSCALL_ARG(flags), MMAP_SYSCALL_ARG(fd), static_cast(offset))); #undef MMAP_SYSCALL_ARG #else // Remaining 64-bit aritectures. static_assert(sizeof(unsigned long) == 8, "Platform is not 64-bit"); return reinterpret_cast( syscall(SYS_mmap, start, length, prot, flags, fd, offset)); #endif } inline int DirectMunmap(void* start, size_t length) { return static_cast(syscall(SYS_munmap, start, length)); } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #else // !__linux__ // For non-linux platforms where we have mmap, just dispatch directly to the // actual mmap()/munmap() methods. namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { inline void* DirectMmap(void* start, size_t length, int prot, int flags, int fd, off_t offset) { return mmap(start, length, prot, flags, fd, offset); } inline int DirectMunmap(void* start, size_t length) { return munmap(start, length); } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // __linux__ #endif // ABSL_HAVE_MMAP #endif // ABSL_BASE_INTERNAL_DIRECT_MMAP_H_ abseil-20220623.1/absl/base/internal/dynamic_annotations.h000066400000000000000000000370021430371345100232360ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This file defines dynamic annotations for use with dynamic analysis tool // such as valgrind, PIN, etc. // // Dynamic annotation is a source code annotation that affects the generated // code (that is, the annotation is not a comment). Each such annotation is // attached to a particular instruction and/or to a particular object (address) // in the program. // // The annotations that should be used by users are macros in all upper-case // (e.g., ANNOTATE_THREAD_NAME). // // Actual implementation of these macros may differ depending on the dynamic // analysis tool being used. // // This file supports the following configurations: // - Dynamic Annotations enabled (with static thread-safety warnings disabled). // In this case, macros expand to functions implemented by Thread Sanitizer, // when building with TSan. When not provided an external implementation, // dynamic_annotations.cc provides no-op implementations. // // - Static Clang thread-safety warnings enabled. // When building with a Clang compiler that supports thread-safety warnings, // a subset of annotations can be statically-checked at compile-time. We // expand these macros to static-inline functions that can be analyzed for // thread-safety, but afterwards elided when building the final binary. // // - All annotations are disabled. // If neither Dynamic Annotations nor Clang thread-safety warnings are // enabled, then all annotation-macros expand to empty. #ifndef ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ #define ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ #include #include "absl/base/config.h" // ------------------------------------------------------------------------- // Decide which features are enabled #ifndef DYNAMIC_ANNOTATIONS_ENABLED #define DYNAMIC_ANNOTATIONS_ENABLED 0 #endif #if defined(__clang__) && !defined(SWIG) #define ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED 1 #endif #if DYNAMIC_ANNOTATIONS_ENABLED != 0 #define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 1 #define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 1 #define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 1 #define ABSL_INTERNAL_ANNOTALYSIS_ENABLED 0 #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED 1 #else #define ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED 0 #define ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED 0 #define ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED 0 // Clang provides limited support for static thread-safety analysis through a // feature called Annotalysis. We configure macro-definitions according to // whether Annotalysis support is available. When running in opt-mode, GCC // will issue a warning, if these attributes are compiled. Only include them // when compiling using Clang. // ANNOTALYSIS_ENABLED == 1 when IGNORE_READ_ATTRIBUTE_ENABLED == 1 #define ABSL_INTERNAL_ANNOTALYSIS_ENABLED \ defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) // Read/write annotations are enabled in Annotalysis mode; disabled otherwise. #define ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED \ ABSL_INTERNAL_ANNOTALYSIS_ENABLED #endif // Memory annotations are also made available to LLVM's Memory Sanitizer #if defined(ABSL_HAVE_MEMORY_SANITIZER) && !defined(__native_client__) #define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 1 #endif #ifndef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED #define ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED 0 #endif #ifdef __cplusplus #define ABSL_INTERNAL_BEGIN_EXTERN_C extern "C" { #define ABSL_INTERNAL_END_EXTERN_C } // extern "C" #define ABSL_INTERNAL_GLOBAL_SCOPED(F) ::F #define ABSL_INTERNAL_STATIC_INLINE inline #else #define ABSL_INTERNAL_BEGIN_EXTERN_C // empty #define ABSL_INTERNAL_END_EXTERN_C // empty #define ABSL_INTERNAL_GLOBAL_SCOPED(F) F #define ABSL_INTERNAL_STATIC_INLINE static inline #endif // ------------------------------------------------------------------------- // Define race annotations. #if ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 1 // ------------------------------------------------------------- // Annotations that suppress errors. It is usually better to express the // program's synchronization using the other annotations, but these can be used // when all else fails. // Report that we may have a benign race at `pointer`, with size // "sizeof(*(pointer))". `pointer` must be a non-void* pointer. Insert at the // point where `pointer` has been allocated, preferably close to the point // where the race happens. See also ANNOTATE_BENIGN_RACE_STATIC. #define ANNOTATE_BENIGN_RACE(pointer, description) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ (__FILE__, __LINE__, pointer, sizeof(*(pointer)), description) // Same as ANNOTATE_BENIGN_RACE(`address`, `description`), but applies to // the memory range [`address`, `address`+`size`). #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateBenignRaceSized) \ (__FILE__, __LINE__, address, size, description) // Enable (`enable`!=0) or disable (`enable`==0) race detection for all threads. // This annotation could be useful if you want to skip expensive race analysis // during some period of program execution, e.g. during initialization. #define ANNOTATE_ENABLE_RACE_DETECTION(enable) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateEnableRaceDetection) \ (__FILE__, __LINE__, enable) // ------------------------------------------------------------- // Annotations useful for debugging. // Report the current thread `name` to a race detector. #define ANNOTATE_THREAD_NAME(name) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateThreadName)(__FILE__, __LINE__, name) // ------------------------------------------------------------- // Annotations useful when implementing locks. They are not normally needed by // modules that merely use locks. The `lock` argument is a pointer to the lock // object. // Report that a lock has been created at address `lock`. #define ANNOTATE_RWLOCK_CREATE(lock) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreate)(__FILE__, __LINE__, lock) // Report that a linker initialized lock has been created at address `lock`. #ifdef ABSL_HAVE_THREAD_SANITIZER #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockCreateStatic) \ (__FILE__, __LINE__, lock) #else #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) ANNOTATE_RWLOCK_CREATE(lock) #endif // Report that the lock at address `lock` is about to be destroyed. #define ANNOTATE_RWLOCK_DESTROY(lock) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockDestroy)(__FILE__, __LINE__, lock) // Report that the lock at address `lock` has been acquired. // `is_w`=1 for writer lock, `is_w`=0 for reader lock. #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockAcquired) \ (__FILE__, __LINE__, lock, is_w) // Report that the lock at address `lock` is about to be released. // `is_w`=1 for writer lock, `is_w`=0 for reader lock. #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateRWLockReleased) \ (__FILE__, __LINE__, lock, is_w) // Apply ANNOTATE_BENIGN_RACE_SIZED to a static variable `static_var`. #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) \ namespace { \ class static_var##_annotator { \ public: \ static_var##_annotator() { \ ANNOTATE_BENIGN_RACE_SIZED(&static_var, sizeof(static_var), \ #static_var ": " description); \ } \ }; \ static static_var##_annotator the##static_var##_annotator; \ } // namespace #else // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED == 0 #define ANNOTATE_RWLOCK_CREATE(lock) // empty #define ANNOTATE_RWLOCK_CREATE_STATIC(lock) // empty #define ANNOTATE_RWLOCK_DESTROY(lock) // empty #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) // empty #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) // empty #define ANNOTATE_BENIGN_RACE(address, description) // empty #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) // empty #define ANNOTATE_THREAD_NAME(name) // empty #define ANNOTATE_ENABLE_RACE_DETECTION(enable) // empty #define ANNOTATE_BENIGN_RACE_STATIC(static_var, description) // empty #endif // ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED // ------------------------------------------------------------------------- // Define memory annotations. #if ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 1 #include #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ __msan_unpoison(address, size) #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ __msan_allocated_memory(address, size) #else // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED == 0 #if DYNAMIC_ANNOTATIONS_ENABLED == 1 #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \ do { \ (void)(address); \ (void)(size); \ } while (0) #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) \ do { \ (void)(address); \ (void)(size); \ } while (0) #else #define ANNOTATE_MEMORY_IS_INITIALIZED(address, size) // empty #define ANNOTATE_MEMORY_IS_UNINITIALIZED(address, size) // empty #endif #endif // ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED // ------------------------------------------------------------------------- // Define IGNORE_READS_BEGIN/_END attributes. #if defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE \ __attribute((exclusive_lock_function("*"))) #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE \ __attribute((unlock_function("*"))) #else // !defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) #define ABSL_INTERNAL_IGNORE_READS_BEGIN_ATTRIBUTE // empty #define ABSL_INTERNAL_IGNORE_READS_END_ATTRIBUTE // empty #endif // defined(ABSL_INTERNAL_IGNORE_READS_ATTRIBUTE_ENABLED) // ------------------------------------------------------------------------- // Define IGNORE_READS_BEGIN/_END annotations. #if ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED == 1 // Request the analysis tool to ignore all reads in the current thread until // ANNOTATE_IGNORE_READS_END is called. Useful to ignore intentional racey // reads, while still checking other reads and all writes. // See also ANNOTATE_UNPROTECTED_READ. #define ANNOTATE_IGNORE_READS_BEGIN() \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsBegin)(__FILE__, __LINE__) // Stop ignoring reads. #define ANNOTATE_IGNORE_READS_END() \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreReadsEnd)(__FILE__, __LINE__) #elif defined(ABSL_INTERNAL_ANNOTALYSIS_ENABLED) // When Annotalysis is enabled without Dynamic Annotations, the use of // static-inline functions allows the annotations to be read at compile-time, // while still letting the compiler elide the functions from the final build. // // TODO(delesley) -- The exclusive lock here ignores writes as well, but // allows IGNORE_READS_AND_WRITES to work properly. #define ANNOTATE_IGNORE_READS_BEGIN() \ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsBegin)() #define ANNOTATE_IGNORE_READS_END() \ ABSL_INTERNAL_GLOBAL_SCOPED(AbslInternalAnnotateIgnoreReadsEnd)() #else #define ANNOTATE_IGNORE_READS_BEGIN() // empty #define ANNOTATE_IGNORE_READS_END() // empty #endif // ------------------------------------------------------------------------- // Define IGNORE_WRITES_BEGIN/_END annotations. #if ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED == 1 // Similar to ANNOTATE_IGNORE_READS_BEGIN, but ignore writes instead. #define ANNOTATE_IGNORE_WRITES_BEGIN() \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesBegin)(__FILE__, __LINE__) // Stop ignoring writes. #define ANNOTATE_IGNORE_WRITES_END() \ ABSL_INTERNAL_GLOBAL_SCOPED(AnnotateIgnoreWritesEnd)(__FILE__, __LINE__) #else #define ANNOTATE_IGNORE_WRITES_BEGIN() // empty #define ANNOTATE_IGNORE_WRITES_END() // empty #endif // ------------------------------------------------------------------------- // Define the ANNOTATE_IGNORE_READS_AND_WRITES_* annotations using the more // primitive annotations defined above. // // Instead of doing // ANNOTATE_IGNORE_READS_BEGIN(); // ... = x; // ANNOTATE_IGNORE_READS_END(); // one can use // ... = ANNOTATE_UNPROTECTED_READ(x); #if defined(ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED) // Start ignoring all memory accesses (both reads and writes). #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() \ do { \ ANNOTATE_IGNORE_READS_BEGIN(); \ ANNOTATE_IGNORE_WRITES_BEGIN(); \ } while (0) // Stop ignoring both reads and writes. #define ANNOTATE_IGNORE_READS_AND_WRITES_END() \ do { \ ANNOTATE_IGNORE_WRITES_END(); \ ANNOTATE_IGNORE_READS_END(); \ } while (0) #ifdef __cplusplus // ANNOTATE_UNPROTECTED_READ is the preferred way to annotate racey reads. #define ANNOTATE_UNPROTECTED_READ(x) \ absl::base_internal::AnnotateUnprotectedRead(x) #endif #else #define ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN() // empty #define ANNOTATE_IGNORE_READS_AND_WRITES_END() // empty #define ANNOTATE_UNPROTECTED_READ(x) (x) #endif // ------------------------------------------------------------------------- // Address sanitizer annotations #ifdef ABSL_HAVE_ADDRESS_SANITIZER // Describe the current state of a contiguous container such as e.g. // std::vector or std::string. For more details see // sanitizer/common_interface_defs.h, which is provided by the compiler. #include #define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) \ __sanitizer_annotate_contiguous_container(beg, end, old_mid, new_mid) #define ADDRESS_SANITIZER_REDZONE(name) \ struct { \ char x[8] __attribute__((aligned(8))); \ } name #else #define ANNOTATE_CONTIGUOUS_CONTAINER(beg, end, old_mid, new_mid) #define ADDRESS_SANITIZER_REDZONE(name) static_assert(true, "") #endif // ABSL_HAVE_ADDRESS_SANITIZER // ------------------------------------------------------------------------- // Undefine the macros intended only for this file. #undef ABSL_INTERNAL_RACE_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_MEMORY_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_READS_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_WRITES_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_ANNOTALYSIS_ENABLED #undef ABSL_INTERNAL_READS_WRITES_ANNOTATIONS_ENABLED #undef ABSL_INTERNAL_BEGIN_EXTERN_C #undef ABSL_INTERNAL_END_EXTERN_C #undef ABSL_INTERNAL_STATIC_INLINE #endif // ABSL_BASE_INTERNAL_DYNAMIC_ANNOTATIONS_H_ abseil-20220623.1/absl/base/internal/endian.h000066400000000000000000000220551430371345100204350ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef ABSL_BASE_INTERNAL_ENDIAN_H_ #define ABSL_BASE_INTERNAL_ENDIAN_H_ #include #include #include "absl/base/casts.h" #include "absl/base/config.h" #include "absl/base/internal/unaligned_access.h" #include "absl/base/port.h" namespace absl { ABSL_NAMESPACE_BEGIN inline uint64_t gbswap_64(uint64_t host_int) { #if ABSL_HAVE_BUILTIN(__builtin_bswap64) || defined(__GNUC__) return __builtin_bswap64(host_int); #elif defined(_MSC_VER) return _byteswap_uint64(host_int); #else return (((host_int & uint64_t{0xFF}) << 56) | ((host_int & uint64_t{0xFF00}) << 40) | ((host_int & uint64_t{0xFF0000}) << 24) | ((host_int & uint64_t{0xFF000000}) << 8) | ((host_int & uint64_t{0xFF00000000}) >> 8) | ((host_int & uint64_t{0xFF0000000000}) >> 24) | ((host_int & uint64_t{0xFF000000000000}) >> 40) | ((host_int & uint64_t{0xFF00000000000000}) >> 56)); #endif } inline uint32_t gbswap_32(uint32_t host_int) { #if ABSL_HAVE_BUILTIN(__builtin_bswap32) || defined(__GNUC__) return __builtin_bswap32(host_int); #elif defined(_MSC_VER) return _byteswap_ulong(host_int); #else return (((host_int & uint32_t{0xFF}) << 24) | ((host_int & uint32_t{0xFF00}) << 8) | ((host_int & uint32_t{0xFF0000}) >> 8) | ((host_int & uint32_t{0xFF000000}) >> 24)); #endif } inline uint16_t gbswap_16(uint16_t host_int) { #if ABSL_HAVE_BUILTIN(__builtin_bswap16) || defined(__GNUC__) return __builtin_bswap16(host_int); #elif defined(_MSC_VER) return _byteswap_ushort(host_int); #else return (((host_int & uint16_t{0xFF}) << 8) | ((host_int & uint16_t{0xFF00}) >> 8)); #endif } #ifdef ABSL_IS_LITTLE_ENDIAN // Portable definitions for htonl (host-to-network) and friends on little-endian // architectures. inline uint16_t ghtons(uint16_t x) { return gbswap_16(x); } inline uint32_t ghtonl(uint32_t x) { return gbswap_32(x); } inline uint64_t ghtonll(uint64_t x) { return gbswap_64(x); } #elif defined ABSL_IS_BIG_ENDIAN // Portable definitions for htonl (host-to-network) etc on big-endian // architectures. These definitions are simpler since the host byte order is the // same as network byte order. inline uint16_t ghtons(uint16_t x) { return x; } inline uint32_t ghtonl(uint32_t x) { return x; } inline uint64_t ghtonll(uint64_t x) { return x; } #else #error \ "Unsupported byte order: Either ABSL_IS_BIG_ENDIAN or " \ "ABSL_IS_LITTLE_ENDIAN must be defined" #endif // byte order inline uint16_t gntohs(uint16_t x) { return ghtons(x); } inline uint32_t gntohl(uint32_t x) { return ghtonl(x); } inline uint64_t gntohll(uint64_t x) { return ghtonll(x); } // Utilities to convert numbers between the current hosts's native byte // order and little-endian byte order // // Load/Store methods are alignment safe namespace little_endian { // Conversion functions. #ifdef ABSL_IS_LITTLE_ENDIAN inline uint16_t FromHost16(uint16_t x) { return x; } inline uint16_t ToHost16(uint16_t x) { return x; } inline uint32_t FromHost32(uint32_t x) { return x; } inline uint32_t ToHost32(uint32_t x) { return x; } inline uint64_t FromHost64(uint64_t x) { return x; } inline uint64_t ToHost64(uint64_t x) { return x; } inline constexpr bool IsLittleEndian() { return true; } #elif defined ABSL_IS_BIG_ENDIAN inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } inline constexpr bool IsLittleEndian() { return false; } #endif /* ENDIAN */ inline uint8_t FromHost(uint8_t x) { return x; } inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } inline uint8_t ToHost(uint8_t x) { return x; } inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } inline int8_t FromHost(int8_t x) { return x; } inline int16_t FromHost(int16_t x) { return bit_cast(FromHost16(bit_cast(x))); } inline int32_t FromHost(int32_t x) { return bit_cast(FromHost32(bit_cast(x))); } inline int64_t FromHost(int64_t x) { return bit_cast(FromHost64(bit_cast(x))); } inline int8_t ToHost(int8_t x) { return x; } inline int16_t ToHost(int16_t x) { return bit_cast(ToHost16(bit_cast(x))); } inline int32_t ToHost(int32_t x) { return bit_cast(ToHost32(bit_cast(x))); } inline int64_t ToHost(int64_t x) { return bit_cast(ToHost64(bit_cast(x))); } // Functions to do unaligned loads and stores in little-endian order. inline uint16_t Load16(const void *p) { return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); } inline void Store16(void *p, uint16_t v) { ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); } inline uint32_t Load32(const void *p) { return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); } inline void Store32(void *p, uint32_t v) { ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); } inline uint64_t Load64(const void *p) { return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); } inline void Store64(void *p, uint64_t v) { ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); } } // namespace little_endian // Utilities to convert numbers between the current hosts's native byte // order and big-endian byte order (same as network byte order) // // Load/Store methods are alignment safe namespace big_endian { #ifdef ABSL_IS_LITTLE_ENDIAN inline uint16_t FromHost16(uint16_t x) { return gbswap_16(x); } inline uint16_t ToHost16(uint16_t x) { return gbswap_16(x); } inline uint32_t FromHost32(uint32_t x) { return gbswap_32(x); } inline uint32_t ToHost32(uint32_t x) { return gbswap_32(x); } inline uint64_t FromHost64(uint64_t x) { return gbswap_64(x); } inline uint64_t ToHost64(uint64_t x) { return gbswap_64(x); } inline constexpr bool IsLittleEndian() { return true; } #elif defined ABSL_IS_BIG_ENDIAN inline uint16_t FromHost16(uint16_t x) { return x; } inline uint16_t ToHost16(uint16_t x) { return x; } inline uint32_t FromHost32(uint32_t x) { return x; } inline uint32_t ToHost32(uint32_t x) { return x; } inline uint64_t FromHost64(uint64_t x) { return x; } inline uint64_t ToHost64(uint64_t x) { return x; } inline constexpr bool IsLittleEndian() { return false; } #endif /* ENDIAN */ inline uint8_t FromHost(uint8_t x) { return x; } inline uint16_t FromHost(uint16_t x) { return FromHost16(x); } inline uint32_t FromHost(uint32_t x) { return FromHost32(x); } inline uint64_t FromHost(uint64_t x) { return FromHost64(x); } inline uint8_t ToHost(uint8_t x) { return x; } inline uint16_t ToHost(uint16_t x) { return ToHost16(x); } inline uint32_t ToHost(uint32_t x) { return ToHost32(x); } inline uint64_t ToHost(uint64_t x) { return ToHost64(x); } inline int8_t FromHost(int8_t x) { return x; } inline int16_t FromHost(int16_t x) { return bit_cast(FromHost16(bit_cast(x))); } inline int32_t FromHost(int32_t x) { return bit_cast(FromHost32(bit_cast(x))); } inline int64_t FromHost(int64_t x) { return bit_cast(FromHost64(bit_cast(x))); } inline int8_t ToHost(int8_t x) { return x; } inline int16_t ToHost(int16_t x) { return bit_cast(ToHost16(bit_cast(x))); } inline int32_t ToHost(int32_t x) { return bit_cast(ToHost32(bit_cast(x))); } inline int64_t ToHost(int64_t x) { return bit_cast(ToHost64(bit_cast(x))); } // Functions to do unaligned loads and stores in big-endian order. inline uint16_t Load16(const void *p) { return ToHost16(ABSL_INTERNAL_UNALIGNED_LOAD16(p)); } inline void Store16(void *p, uint16_t v) { ABSL_INTERNAL_UNALIGNED_STORE16(p, FromHost16(v)); } inline uint32_t Load32(const void *p) { return ToHost32(ABSL_INTERNAL_UNALIGNED_LOAD32(p)); } inline void Store32(void *p, uint32_t v) { ABSL_INTERNAL_UNALIGNED_STORE32(p, FromHost32(v)); } inline uint64_t Load64(const void *p) { return ToHost64(ABSL_INTERNAL_UNALIGNED_LOAD64(p)); } inline void Store64(void *p, uint64_t v) { ABSL_INTERNAL_UNALIGNED_STORE64(p, FromHost64(v)); } } // namespace big_endian ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_ENDIAN_H_ abseil-20220623.1/absl/base/internal/endian_test.cc000066400000000000000000000172401430371345100216320ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/endian.h" #include #include #include #include #include #include "gtest/gtest.h" #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { const uint64_t kInitialNumber{0x0123456789abcdef}; const uint64_t k64Value{kInitialNumber}; const uint32_t k32Value{0x01234567}; const uint16_t k16Value{0x0123}; const int kNumValuesToTest = 1000000; const int kRandomSeed = 12345; #if defined(ABSL_IS_BIG_ENDIAN) const uint64_t kInitialInNetworkOrder{kInitialNumber}; const uint64_t k64ValueLE{0xefcdab8967452301}; const uint32_t k32ValueLE{0x67452301}; const uint16_t k16ValueLE{0x2301}; const uint64_t k64ValueBE{kInitialNumber}; const uint32_t k32ValueBE{k32Value}; const uint16_t k16ValueBE{k16Value}; #elif defined(ABSL_IS_LITTLE_ENDIAN) const uint64_t kInitialInNetworkOrder{0xefcdab8967452301}; const uint64_t k64ValueLE{kInitialNumber}; const uint32_t k32ValueLE{k32Value}; const uint16_t k16ValueLE{k16Value}; const uint64_t k64ValueBE{0xefcdab8967452301}; const uint32_t k32ValueBE{0x67452301}; const uint16_t k16ValueBE{0x2301}; #endif std::vector GenerateAllUint16Values() { std::vector result; result.reserve(size_t{1} << (sizeof(uint16_t) * 8)); for (uint32_t i = std::numeric_limits::min(); i <= std::numeric_limits::max(); ++i) { result.push_back(static_cast(i)); } return result; } template std::vector GenerateRandomIntegers(size_t num_values_to_test) { std::vector result; result.reserve(num_values_to_test); std::mt19937_64 rng(kRandomSeed); for (size_t i = 0; i < num_values_to_test; ++i) { result.push_back(rng()); } return result; } void ManualByteSwap(char* bytes, int length) { if (length == 1) return; EXPECT_EQ(0, length % 2); for (int i = 0; i < length / 2; ++i) { int j = (length - 1) - i; using std::swap; swap(bytes[i], bytes[j]); } } template inline T UnalignedLoad(const char* p) { static_assert( sizeof(T) == 1 || sizeof(T) == 2 || sizeof(T) == 4 || sizeof(T) == 8, "Unexpected type size"); switch (sizeof(T)) { case 1: return *reinterpret_cast(p); case 2: return ABSL_INTERNAL_UNALIGNED_LOAD16(p); case 4: return ABSL_INTERNAL_UNALIGNED_LOAD32(p); case 8: return ABSL_INTERNAL_UNALIGNED_LOAD64(p); default: // Suppresses invalid "not all control paths return a value" on MSVC return {}; } } template static void GBSwapHelper(const std::vector& host_values_to_test, const ByteSwapper& byte_swapper) { // Test byte_swapper against a manual byte swap. for (typename std::vector::const_iterator it = host_values_to_test.begin(); it != host_values_to_test.end(); ++it) { T host_value = *it; char actual_value[sizeof(host_value)]; memcpy(actual_value, &host_value, sizeof(host_value)); byte_swapper(actual_value); char expected_value[sizeof(host_value)]; memcpy(expected_value, &host_value, sizeof(host_value)); ManualByteSwap(expected_value, sizeof(host_value)); ASSERT_EQ(0, memcmp(actual_value, expected_value, sizeof(host_value))) << "Swap output for 0x" << std::hex << host_value << " does not match. " << "Expected: 0x" << UnalignedLoad(expected_value) << "; " << "actual: 0x" << UnalignedLoad(actual_value); } } void Swap16(char* bytes) { ABSL_INTERNAL_UNALIGNED_STORE16( bytes, gbswap_16(ABSL_INTERNAL_UNALIGNED_LOAD16(bytes))); } void Swap32(char* bytes) { ABSL_INTERNAL_UNALIGNED_STORE32( bytes, gbswap_32(ABSL_INTERNAL_UNALIGNED_LOAD32(bytes))); } void Swap64(char* bytes) { ABSL_INTERNAL_UNALIGNED_STORE64( bytes, gbswap_64(ABSL_INTERNAL_UNALIGNED_LOAD64(bytes))); } TEST(EndianessTest, Uint16) { GBSwapHelper(GenerateAllUint16Values(), &Swap16); } TEST(EndianessTest, Uint32) { GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap32); } TEST(EndianessTest, Uint64) { GBSwapHelper(GenerateRandomIntegers(kNumValuesToTest), &Swap64); } TEST(EndianessTest, ghtonll_gntohll) { // Test that absl::ghtonl compiles correctly uint32_t test = 0x01234567; EXPECT_EQ(absl::gntohl(absl::ghtonl(test)), test); uint64_t comp = absl::ghtonll(kInitialNumber); EXPECT_EQ(comp, kInitialInNetworkOrder); comp = absl::gntohll(kInitialInNetworkOrder); EXPECT_EQ(comp, kInitialNumber); // Test that htonll and ntohll are each others' inverse functions on a // somewhat assorted batch of numbers. 37 is chosen to not be anything // particularly nice base 2. uint64_t value = 1; for (int i = 0; i < 100; ++i) { comp = absl::ghtonll(absl::gntohll(value)); EXPECT_EQ(value, comp); comp = absl::gntohll(absl::ghtonll(value)); EXPECT_EQ(value, comp); value *= 37; } } TEST(EndianessTest, little_endian) { // Check little_endian uint16_t. uint64_t comp = little_endian::FromHost16(k16Value); EXPECT_EQ(comp, k16ValueLE); comp = little_endian::ToHost16(k16ValueLE); EXPECT_EQ(comp, k16Value); // Check little_endian uint32_t. comp = little_endian::FromHost32(k32Value); EXPECT_EQ(comp, k32ValueLE); comp = little_endian::ToHost32(k32ValueLE); EXPECT_EQ(comp, k32Value); // Check little_endian uint64_t. comp = little_endian::FromHost64(k64Value); EXPECT_EQ(comp, k64ValueLE); comp = little_endian::ToHost64(k64ValueLE); EXPECT_EQ(comp, k64Value); // Check little-endian Load and store functions. uint16_t u16Buf; uint32_t u32Buf; uint64_t u64Buf; little_endian::Store16(&u16Buf, k16Value); EXPECT_EQ(u16Buf, k16ValueLE); comp = little_endian::Load16(&u16Buf); EXPECT_EQ(comp, k16Value); little_endian::Store32(&u32Buf, k32Value); EXPECT_EQ(u32Buf, k32ValueLE); comp = little_endian::Load32(&u32Buf); EXPECT_EQ(comp, k32Value); little_endian::Store64(&u64Buf, k64Value); EXPECT_EQ(u64Buf, k64ValueLE); comp = little_endian::Load64(&u64Buf); EXPECT_EQ(comp, k64Value); } TEST(EndianessTest, big_endian) { // Check big-endian Load and store functions. uint16_t u16Buf; uint32_t u32Buf; uint64_t u64Buf; unsigned char buffer[10]; big_endian::Store16(&u16Buf, k16Value); EXPECT_EQ(u16Buf, k16ValueBE); uint64_t comp = big_endian::Load16(&u16Buf); EXPECT_EQ(comp, k16Value); big_endian::Store32(&u32Buf, k32Value); EXPECT_EQ(u32Buf, k32ValueBE); comp = big_endian::Load32(&u32Buf); EXPECT_EQ(comp, k32Value); big_endian::Store64(&u64Buf, k64Value); EXPECT_EQ(u64Buf, k64ValueBE); comp = big_endian::Load64(&u64Buf); EXPECT_EQ(comp, k64Value); big_endian::Store16(buffer + 1, k16Value); EXPECT_EQ(u16Buf, k16ValueBE); comp = big_endian::Load16(buffer + 1); EXPECT_EQ(comp, k16Value); big_endian::Store32(buffer + 1, k32Value); EXPECT_EQ(u32Buf, k32ValueBE); comp = big_endian::Load32(buffer + 1); EXPECT_EQ(comp, k32Value); big_endian::Store64(buffer + 1, k64Value); EXPECT_EQ(u64Buf, k64ValueBE); comp = big_endian::Load64(buffer + 1); EXPECT_EQ(comp, k64Value); } } // namespace ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/errno_saver.h000066400000000000000000000024511430371345100215220ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ #define ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // `ErrnoSaver` captures the value of `errno` upon construction and restores it // upon deletion. It is used in low-level code and must be super fast. Do not // add instrumentation, even in debug modes. class ErrnoSaver { public: ErrnoSaver() : saved_errno_(errno) {} ~ErrnoSaver() { errno = saved_errno_; } int operator()() const { return saved_errno_; } private: const int saved_errno_; }; } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_ERRNO_SAVER_H_ abseil-20220623.1/absl/base/internal/errno_saver_test.cc000066400000000000000000000026441430371345100227230ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/errno_saver.h" #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/internal/strerror.h" namespace { using ::testing::Eq; struct ErrnoPrinter { int no; }; std::ostream &operator<<(std::ostream &os, ErrnoPrinter ep) { return os << absl::base_internal::StrError(ep.no) << " [" << ep.no << "]"; } bool operator==(ErrnoPrinter one, ErrnoPrinter two) { return one.no == two.no; } TEST(ErrnoSaverTest, Works) { errno = EDOM; { absl::base_internal::ErrnoSaver errno_saver; EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM})); errno = ERANGE; EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{ERANGE})); EXPECT_THAT(ErrnoPrinter{errno_saver()}, Eq(ErrnoPrinter{EDOM})); } EXPECT_THAT(ErrnoPrinter{errno}, Eq(ErrnoPrinter{EDOM})); } } // namespace abseil-20220623.1/absl/base/internal/exception_safety_testing.cc000066400000000000000000000042751430371345100244470ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/exception_safety_testing.h" #ifdef ABSL_HAVE_EXCEPTIONS #include "gtest/gtest.h" #include "absl/meta/type_traits.h" namespace testing { exceptions_internal::NoThrowTag nothrow_ctor; exceptions_internal::StrongGuaranteeTagType strong_guarantee; exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester() { return {}; } namespace exceptions_internal { int countdown = -1; ConstructorTracker* ConstructorTracker::current_tracker_instance_ = nullptr; void MaybeThrow(absl::string_view msg, bool throw_bad_alloc) { if (countdown-- == 0) { if (throw_bad_alloc) throw TestBadAllocException(msg); throw TestException(msg); } } testing::AssertionResult FailureMessage(const TestException& e, int countdown) noexcept { return testing::AssertionFailure() << "Exception thrown from " << e.what(); } std::string GetSpecString(TypeSpec spec) { std::string out; absl::string_view sep; const auto append = [&](absl::string_view s) { absl::StrAppend(&out, sep, s); sep = " | "; }; if (static_cast(TypeSpec::kNoThrowCopy & spec)) { append("kNoThrowCopy"); } if (static_cast(TypeSpec::kNoThrowMove & spec)) { append("kNoThrowMove"); } if (static_cast(TypeSpec::kNoThrowNew & spec)) { append("kNoThrowNew"); } return out; } std::string GetSpecString(AllocSpec spec) { return static_cast(AllocSpec::kNoThrowAllocate & spec) ? "kNoThrowAllocate" : ""; } } // namespace exceptions_internal } // namespace testing #endif // ABSL_HAVE_EXCEPTIONS abseil-20220623.1/absl/base/internal/exception_safety_testing.h000066400000000000000000001127151430371345100243100ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Utilities for testing exception-safety #ifndef ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ #define ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ #include "absl/base/config.h" #ifdef ABSL_HAVE_EXCEPTIONS #include #include #include #include #include #include #include #include #include "gtest/gtest.h" #include "absl/base/internal/pretty_function.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "absl/utility/utility.h" namespace testing { enum class TypeSpec; enum class AllocSpec; constexpr TypeSpec operator|(TypeSpec a, TypeSpec b) { using T = absl::underlying_type_t; return static_cast(static_cast(a) | static_cast(b)); } constexpr TypeSpec operator&(TypeSpec a, TypeSpec b) { using T = absl::underlying_type_t; return static_cast(static_cast(a) & static_cast(b)); } constexpr AllocSpec operator|(AllocSpec a, AllocSpec b) { using T = absl::underlying_type_t; return static_cast(static_cast(a) | static_cast(b)); } constexpr AllocSpec operator&(AllocSpec a, AllocSpec b) { using T = absl::underlying_type_t; return static_cast(static_cast(a) & static_cast(b)); } namespace exceptions_internal { std::string GetSpecString(TypeSpec); std::string GetSpecString(AllocSpec); struct NoThrowTag {}; struct StrongGuaranteeTagType {}; // A simple exception class. We throw this so that test code can catch // exceptions specifically thrown by ThrowingValue. class TestException { public: explicit TestException(absl::string_view msg) : msg_(msg) {} virtual ~TestException() {} virtual const char* what() const noexcept { return msg_.c_str(); } private: std::string msg_; }; // TestBadAllocException exists because allocation functions must throw an // exception which can be caught by a handler of std::bad_alloc. We use a child // class of std::bad_alloc so we can customise the error message, and also // derive from TestException so we don't accidentally end up catching an actual // bad_alloc exception in TestExceptionSafety. class TestBadAllocException : public std::bad_alloc, public TestException { public: explicit TestBadAllocException(absl::string_view msg) : TestException(msg) {} using TestException::what; }; extern int countdown; // Allows the countdown variable to be set manually (defaulting to the initial // value of 0) inline void SetCountdown(int i = 0) { countdown = i; } // Sets the countdown to the terminal value -1 inline void UnsetCountdown() { SetCountdown(-1); } void MaybeThrow(absl::string_view msg, bool throw_bad_alloc = false); testing::AssertionResult FailureMessage(const TestException& e, int countdown) noexcept; struct TrackedAddress { bool is_alive; std::string description; }; // Inspects the constructions and destructions of anything inheriting from // TrackedObject. This allows us to safely "leak" TrackedObjects, as // ConstructorTracker will destroy everything left over in its destructor. class ConstructorTracker { public: explicit ConstructorTracker(int count) : countdown_(count) { assert(current_tracker_instance_ == nullptr); current_tracker_instance_ = this; } ~ConstructorTracker() { assert(current_tracker_instance_ == this); current_tracker_instance_ = nullptr; for (auto& it : address_map_) { void* address = it.first; TrackedAddress& tracked_address = it.second; if (tracked_address.is_alive) { ADD_FAILURE() << ErrorMessage(address, tracked_address.description, countdown_, "Object was not destroyed."); } } } static void ObjectConstructed(void* address, std::string description) { if (!CurrentlyTracking()) return; TrackedAddress& tracked_address = current_tracker_instance_->address_map_[address]; if (tracked_address.is_alive) { ADD_FAILURE() << ErrorMessage( address, tracked_address.description, current_tracker_instance_->countdown_, "Object was re-constructed. Current object was constructed by " + description); } tracked_address = {true, std::move(description)}; } static void ObjectDestructed(void* address) { if (!CurrentlyTracking()) return; auto it = current_tracker_instance_->address_map_.find(address); // Not tracked. Ignore. if (it == current_tracker_instance_->address_map_.end()) return; TrackedAddress& tracked_address = it->second; if (!tracked_address.is_alive) { ADD_FAILURE() << ErrorMessage(address, tracked_address.description, current_tracker_instance_->countdown_, "Object was re-destroyed."); } tracked_address.is_alive = false; } private: static bool CurrentlyTracking() { return current_tracker_instance_ != nullptr; } static std::string ErrorMessage(void* address, const std::string& address_description, int countdown, const std::string& error_description) { return absl::Substitute( "With coundtown at $0:\n" " $1\n" " Object originally constructed by $2\n" " Object address: $3\n", countdown, error_description, address_description, address); } std::unordered_map address_map_; int countdown_; static ConstructorTracker* current_tracker_instance_; }; class TrackedObject { public: TrackedObject(const TrackedObject&) = delete; TrackedObject(TrackedObject&&) = delete; protected: explicit TrackedObject(std::string description) { ConstructorTracker::ObjectConstructed(this, std::move(description)); } ~TrackedObject() noexcept { ConstructorTracker::ObjectDestructed(this); } }; } // namespace exceptions_internal extern exceptions_internal::NoThrowTag nothrow_ctor; extern exceptions_internal::StrongGuaranteeTagType strong_guarantee; // A test class which is convertible to bool. The conversion can be // instrumented to throw at a controlled time. class ThrowingBool { public: ThrowingBool(bool b) noexcept : b_(b) {} // NOLINT(runtime/explicit) operator bool() const { // NOLINT exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return b_; } private: bool b_; }; /* * Configuration enum for the ThrowingValue type that defines behavior for the * lifetime of the instance. Use testing::nothrow_ctor to prevent the integer * constructor from throwing. * * kEverythingThrows: Every operation can throw an exception * kNoThrowCopy: Copy construction and copy assignment will not throw * kNoThrowMove: Move construction and move assignment will not throw * kNoThrowNew: Overloaded operators new and new[] will not throw */ enum class TypeSpec { kEverythingThrows = 0, kNoThrowCopy = 1, kNoThrowMove = 1 << 1, kNoThrowNew = 1 << 2, }; /* * A testing class instrumented to throw an exception at a controlled time. * * ThrowingValue implements a slightly relaxed version of the Regular concept -- * that is it's a value type with the expected semantics. It also implements * arithmetic operations. It doesn't implement member and pointer operators * like operator-> or operator[]. * * ThrowingValue can be instrumented to have certain operations be noexcept by * using compile-time bitfield template arguments. That is, to make an * ThrowingValue which has noexcept move construction/assignment and noexcept * copy construction/assignment, use the following: * ThrowingValue my_thrwr{val}; */ template class ThrowingValue : private exceptions_internal::TrackedObject { static constexpr bool IsSpecified(TypeSpec spec) { return static_cast(Spec & spec); } static constexpr int kDefaultValue = 0; static constexpr int kBadValue = 938550620; public: ThrowingValue() : TrackedObject(GetInstanceString(kDefaultValue)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ = kDefaultValue; } ThrowingValue(const ThrowingValue& other) noexcept( IsSpecified(TypeSpec::kNoThrowCopy)) : TrackedObject(GetInstanceString(other.dummy_)) { if (!IsSpecified(TypeSpec::kNoThrowCopy)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); } dummy_ = other.dummy_; } ThrowingValue(ThrowingValue&& other) noexcept( IsSpecified(TypeSpec::kNoThrowMove)) : TrackedObject(GetInstanceString(other.dummy_)) { if (!IsSpecified(TypeSpec::kNoThrowMove)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); } dummy_ = other.dummy_; } explicit ThrowingValue(int i) : TrackedObject(GetInstanceString(i)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ = i; } ThrowingValue(int i, exceptions_internal::NoThrowTag) noexcept : TrackedObject(GetInstanceString(i)), dummy_(i) {} // absl expects nothrow destructors ~ThrowingValue() noexcept = default; ThrowingValue& operator=(const ThrowingValue& other) noexcept( IsSpecified(TypeSpec::kNoThrowCopy)) { dummy_ = kBadValue; if (!IsSpecified(TypeSpec::kNoThrowCopy)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); } dummy_ = other.dummy_; return *this; } ThrowingValue& operator=(ThrowingValue&& other) noexcept( IsSpecified(TypeSpec::kNoThrowMove)) { dummy_ = kBadValue; if (!IsSpecified(TypeSpec::kNoThrowMove)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); } dummy_ = other.dummy_; return *this; } // Arithmetic Operators ThrowingValue operator+(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ + other.dummy_, nothrow_ctor); } ThrowingValue operator+() const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_, nothrow_ctor); } ThrowingValue operator-(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ - other.dummy_, nothrow_ctor); } ThrowingValue operator-() const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(-dummy_, nothrow_ctor); } ThrowingValue& operator++() { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); ++dummy_; return *this; } ThrowingValue operator++(int) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); auto out = ThrowingValue(dummy_, nothrow_ctor); ++dummy_; return out; } ThrowingValue& operator--() { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); --dummy_; return *this; } ThrowingValue operator--(int) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); auto out = ThrowingValue(dummy_, nothrow_ctor); --dummy_; return out; } ThrowingValue operator*(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ * other.dummy_, nothrow_ctor); } ThrowingValue operator/(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ / other.dummy_, nothrow_ctor); } ThrowingValue operator%(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ % other.dummy_, nothrow_ctor); } ThrowingValue operator<<(int shift) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ << shift, nothrow_ctor); } ThrowingValue operator>>(int shift) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ >> shift, nothrow_ctor); } // Comparison Operators // NOTE: We use `ThrowingBool` instead of `bool` because most STL // types/containers requires T to be convertible to bool. friend ThrowingBool operator==(const ThrowingValue& a, const ThrowingValue& b) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return a.dummy_ == b.dummy_; } friend ThrowingBool operator!=(const ThrowingValue& a, const ThrowingValue& b) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return a.dummy_ != b.dummy_; } friend ThrowingBool operator<(const ThrowingValue& a, const ThrowingValue& b) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return a.dummy_ < b.dummy_; } friend ThrowingBool operator<=(const ThrowingValue& a, const ThrowingValue& b) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return a.dummy_ <= b.dummy_; } friend ThrowingBool operator>(const ThrowingValue& a, const ThrowingValue& b) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return a.dummy_ > b.dummy_; } friend ThrowingBool operator>=(const ThrowingValue& a, const ThrowingValue& b) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return a.dummy_ >= b.dummy_; } // Logical Operators ThrowingBool operator!() const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return !dummy_; } ThrowingBool operator&&(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return dummy_ && other.dummy_; } ThrowingBool operator||(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return dummy_ || other.dummy_; } // Bitwise Logical Operators ThrowingValue operator~() const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(~dummy_, nothrow_ctor); } ThrowingValue operator&(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ & other.dummy_, nothrow_ctor); } ThrowingValue operator|(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ | other.dummy_, nothrow_ctor); } ThrowingValue operator^(const ThrowingValue& other) const { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return ThrowingValue(dummy_ ^ other.dummy_, nothrow_ctor); } // Compound Assignment operators ThrowingValue& operator+=(const ThrowingValue& other) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ += other.dummy_; return *this; } ThrowingValue& operator-=(const ThrowingValue& other) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ -= other.dummy_; return *this; } ThrowingValue& operator*=(const ThrowingValue& other) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ *= other.dummy_; return *this; } ThrowingValue& operator/=(const ThrowingValue& other) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ /= other.dummy_; return *this; } ThrowingValue& operator%=(const ThrowingValue& other) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ %= other.dummy_; return *this; } ThrowingValue& operator&=(const ThrowingValue& other) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ &= other.dummy_; return *this; } ThrowingValue& operator|=(const ThrowingValue& other) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ |= other.dummy_; return *this; } ThrowingValue& operator^=(const ThrowingValue& other) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ ^= other.dummy_; return *this; } ThrowingValue& operator<<=(int shift) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ <<= shift; return *this; } ThrowingValue& operator>>=(int shift) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ >>= shift; return *this; } // Pointer operators void operator&() const = delete; // NOLINT(runtime/operator) // Stream operators friend std::ostream& operator<<(std::ostream& os, const ThrowingValue& tv) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return os << GetInstanceString(tv.dummy_); } friend std::istream& operator>>(std::istream& is, const ThrowingValue&) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); return is; } // Memory management operators static void* operator new(size_t s) noexcept( IsSpecified(TypeSpec::kNoThrowNew)) { if (!IsSpecified(TypeSpec::kNoThrowNew)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); } return ::operator new(s); } static void* operator new[](size_t s) noexcept( IsSpecified(TypeSpec::kNoThrowNew)) { if (!IsSpecified(TypeSpec::kNoThrowNew)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); } return ::operator new[](s); } template static void* operator new(size_t s, Args&&... args) noexcept( IsSpecified(TypeSpec::kNoThrowNew)) { if (!IsSpecified(TypeSpec::kNoThrowNew)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); } return ::operator new(s, std::forward(args)...); } template static void* operator new[](size_t s, Args&&... args) noexcept( IsSpecified(TypeSpec::kNoThrowNew)) { if (!IsSpecified(TypeSpec::kNoThrowNew)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION, true); } return ::operator new[](s, std::forward(args)...); } // Abseil doesn't support throwing overloaded operator delete. These are // provided so a throwing operator-new can clean up after itself. void operator delete(void* p) noexcept { ::operator delete(p); } template void operator delete(void* p, Args&&... args) noexcept { ::operator delete(p, std::forward(args)...); } void operator delete[](void* p) noexcept { return ::operator delete[](p); } template void operator delete[](void* p, Args&&... args) noexcept { return ::operator delete[](p, std::forward(args)...); } // Non-standard access to the actual contained value. No need for this to // throw. int& Get() noexcept { return dummy_; } const int& Get() const noexcept { return dummy_; } private: static std::string GetInstanceString(int dummy) { return absl::StrCat("ThrowingValue<", exceptions_internal::GetSpecString(Spec), ">(", dummy, ")"); } int dummy_; }; // While not having to do with exceptions, explicitly delete comma operator, to // make sure we don't use it on user-supplied types. template void operator,(const ThrowingValue&, T&&) = delete; template void operator,(T&&, const ThrowingValue&) = delete; /* * Configuration enum for the ThrowingAllocator type that defines behavior for * the lifetime of the instance. * * kEverythingThrows: Calls to the member functions may throw * kNoThrowAllocate: Calls to the member functions will not throw */ enum class AllocSpec { kEverythingThrows = 0, kNoThrowAllocate = 1, }; /* * An allocator type which is instrumented to throw at a controlled time, or not * to throw, using AllocSpec. The supported settings are the default of every * function which is allowed to throw in a conforming allocator possibly * throwing, or nothing throws, in line with the ABSL_ALLOCATOR_THROWS * configuration macro. */ template class ThrowingAllocator : private exceptions_internal::TrackedObject { static constexpr bool IsSpecified(AllocSpec spec) { return static_cast(Spec & spec); } public: using pointer = T*; using const_pointer = const T*; using reference = T&; using const_reference = const T&; using void_pointer = void*; using const_void_pointer = const void*; using value_type = T; using size_type = size_t; using difference_type = ptrdiff_t; using is_nothrow = std::integral_constant; using propagate_on_container_copy_assignment = std::true_type; using propagate_on_container_move_assignment = std::true_type; using propagate_on_container_swap = std::true_type; using is_always_equal = std::false_type; ThrowingAllocator() : TrackedObject(GetInstanceString(next_id_)) { exceptions_internal::MaybeThrow(ABSL_PRETTY_FUNCTION); dummy_ = std::make_shared(next_id_++); } template ThrowingAllocator(const ThrowingAllocator& other) noexcept // NOLINT : TrackedObject(GetInstanceString(*other.State())), dummy_(other.State()) {} // According to C++11 standard [17.6.3.5], Table 28, the move/copy ctors of // allocator shall not exit via an exception, thus they are marked noexcept. ThrowingAllocator(const ThrowingAllocator& other) noexcept : TrackedObject(GetInstanceString(*other.State())), dummy_(other.State()) {} template ThrowingAllocator(ThrowingAllocator&& other) noexcept // NOLINT : TrackedObject(GetInstanceString(*other.State())), dummy_(std::move(other.State())) {} ThrowingAllocator(ThrowingAllocator&& other) noexcept : TrackedObject(GetInstanceString(*other.State())), dummy_(std::move(other.State())) {} ~ThrowingAllocator() noexcept = default; ThrowingAllocator& operator=(const ThrowingAllocator& other) noexcept { dummy_ = other.State(); return *this; } template ThrowingAllocator& operator=( const ThrowingAllocator& other) noexcept { dummy_ = other.State(); return *this; } template ThrowingAllocator& operator=(ThrowingAllocator&& other) noexcept { dummy_ = std::move(other.State()); return *this; } template struct rebind { using other = ThrowingAllocator; }; pointer allocate(size_type n) noexcept( IsSpecified(AllocSpec::kNoThrowAllocate)) { ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); return static_cast(::operator new(n * sizeof(T))); } pointer allocate(size_type n, const_void_pointer) noexcept( IsSpecified(AllocSpec::kNoThrowAllocate)) { return allocate(n); } void deallocate(pointer ptr, size_type) noexcept { ReadState(); ::operator delete(static_cast(ptr)); } template void construct(U* ptr, Args&&... args) noexcept( IsSpecified(AllocSpec::kNoThrowAllocate)) { ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); ::new (static_cast(ptr)) U(std::forward(args)...); } template void destroy(U* p) noexcept { ReadState(); p->~U(); } size_type max_size() const noexcept { return (std::numeric_limits::max)() / sizeof(value_type); } ThrowingAllocator select_on_container_copy_construction() noexcept( IsSpecified(AllocSpec::kNoThrowAllocate)) { ReadStateAndMaybeThrow(ABSL_PRETTY_FUNCTION); return *this; } template bool operator==(const ThrowingAllocator& other) const noexcept { return dummy_ == other.dummy_; } template bool operator!=(const ThrowingAllocator& other) const noexcept { return dummy_ != other.dummy_; } template friend class ThrowingAllocator; private: static std::string GetInstanceString(int dummy) { return absl::StrCat("ThrowingAllocator<", exceptions_internal::GetSpecString(Spec), ">(", dummy, ")"); } const std::shared_ptr& State() const { return dummy_; } std::shared_ptr& State() { return dummy_; } void ReadState() { // we know that this will never be true, but the compiler doesn't, so this // should safely force a read of the value. if (*dummy_ < 0) std::abort(); } void ReadStateAndMaybeThrow(absl::string_view msg) const { if (!IsSpecified(AllocSpec::kNoThrowAllocate)) { exceptions_internal::MaybeThrow( absl::Substitute("Allocator id $0 threw from $1", *dummy_, msg)); } } static int next_id_; std::shared_ptr dummy_; }; template int ThrowingAllocator::next_id_ = 0; // Tests for resource leaks by attempting to construct a T using args repeatedly // until successful, using the countdown method. Side effects can then be // tested for resource leaks. template void TestThrowingCtor(Args&&... args) { struct Cleanup { ~Cleanup() { exceptions_internal::UnsetCountdown(); } } c; for (int count = 0;; ++count) { exceptions_internal::ConstructorTracker ct(count); exceptions_internal::SetCountdown(count); try { T temp(std::forward(args)...); static_cast(temp); break; } catch (const exceptions_internal::TestException&) { } } } // Tests the nothrow guarantee of the provided nullary operation. If the an // exception is thrown, the result will be AssertionFailure(). Otherwise, it // will be AssertionSuccess(). template testing::AssertionResult TestNothrowOp(const Operation& operation) { struct Cleanup { Cleanup() { exceptions_internal::SetCountdown(); } ~Cleanup() { exceptions_internal::UnsetCountdown(); } } c; try { operation(); return testing::AssertionSuccess(); } catch (const exceptions_internal::TestException&) { return testing::AssertionFailure() << "TestException thrown during call to operation() when nothrow " "guarantee was expected."; } catch (...) { return testing::AssertionFailure() << "Unknown exception thrown during call to operation() when " "nothrow guarantee was expected."; } } namespace exceptions_internal { // Dummy struct for ExceptionSafetyTestBuilder<> partial state. struct UninitializedT {}; template class DefaultFactory { public: explicit DefaultFactory(const T& t) : t_(t) {} std::unique_ptr operator()() const { return absl::make_unique(t_); } private: T t_; }; template using EnableIfTestable = typename absl::enable_if_t< LazyContractsCount != 0 && !std::is_same::value && !std::is_same::value>; template class ExceptionSafetyTestBuilder; } // namespace exceptions_internal /* * Constructs an empty ExceptionSafetyTestBuilder. All * ExceptionSafetyTestBuilder objects are immutable and all With[thing] mutation * methods return new instances of ExceptionSafetyTestBuilder. * * In order to test a T for exception safety, a factory for that T, a testable * operation, and at least one contract callback returning an assertion * result must be applied using the respective methods. */ exceptions_internal::ExceptionSafetyTestBuilder<> MakeExceptionSafetyTester(); namespace exceptions_internal { template struct IsUniquePtr : std::false_type {}; template struct IsUniquePtr> : std::true_type {}; template struct FactoryPtrTypeHelper { using type = decltype(std::declval()()); static_assert(IsUniquePtr::value, "Factories must return a unique_ptr"); }; template using FactoryPtrType = typename FactoryPtrTypeHelper::type; template using FactoryElementType = typename FactoryPtrType::element_type; template class ExceptionSafetyTest { using Factory = std::function()>; using Operation = std::function; using Contract = std::function; public: template explicit ExceptionSafetyTest(const Factory& f, const Operation& op, const Contracts&... contracts) : factory_(f), operation_(op), contracts_{WrapContract(contracts)...} {} AssertionResult Test() const { for (int count = 0;; ++count) { exceptions_internal::ConstructorTracker ct(count); for (const auto& contract : contracts_) { auto t_ptr = factory_(); try { SetCountdown(count); operation_(t_ptr.get()); // Unset for the case that the operation throws no exceptions, which // would leave the countdown set and break the *next* exception safety // test after this one. UnsetCountdown(); return AssertionSuccess(); } catch (const exceptions_internal::TestException& e) { if (!contract(t_ptr.get())) { return AssertionFailure() << e.what() << " failed contract check"; } } } } } private: template Contract WrapContract(const ContractFn& contract) { return [contract](T* t_ptr) { return AssertionResult(contract(t_ptr)); }; } Contract WrapContract(StrongGuaranteeTagType) { return [this](T* t_ptr) { return AssertionResult(*factory_() == *t_ptr); }; } Factory factory_; Operation operation_; std::vector contracts_; }; /* * Builds a tester object that tests if performing a operation on a T follows * exception safety guarantees. Verification is done via contract assertion * callbacks applied to T instances post-throw. * * Template parameters for ExceptionSafetyTestBuilder: * * - Factory: The factory object (passed in via tester.WithFactory(...) or * tester.WithInitialValue(...)) must be invocable with the signature * `std::unique_ptr operator()() const` where T is the type being tested. * It is used for reliably creating identical T instances to test on. * * - Operation: The operation object (passsed in via tester.WithOperation(...) * or tester.Test(...)) must be invocable with the signature * `void operator()(T*) const` where T is the type being tested. It is used * for performing steps on a T instance that may throw and that need to be * checked for exception safety. Each call to the operation will receive a * fresh T instance so it's free to modify and destroy the T instances as it * pleases. * * - Contracts...: The contract assertion callback objects (passed in via * tester.WithContracts(...)) must be invocable with the signature * `testing::AssertionResult operator()(T*) const` where T is the type being * tested. Contract assertion callbacks are provided T instances post-throw. * They must return testing::AssertionSuccess when the type contracts of the * provided T instance hold. If the type contracts of the T instance do not * hold, they must return testing::AssertionFailure. Execution order of * Contracts... is unspecified. They will each individually get a fresh T * instance so they are free to modify and destroy the T instances as they * please. */ template class ExceptionSafetyTestBuilder { public: /* * Returns a new ExceptionSafetyTestBuilder with an included T factory based * on the provided T instance. The existing factory will not be included in * the newly created tester instance. The created factory returns a new T * instance by copy-constructing the provided const T& t. * * Preconditions for tester.WithInitialValue(const T& t): * * - The const T& t object must be copy-constructible where T is the type * being tested. For non-copy-constructible objects, use the method * tester.WithFactory(...). */ template ExceptionSafetyTestBuilder, Operation, Contracts...> WithInitialValue(const T& t) const { return WithFactory(DefaultFactory(t)); } /* * Returns a new ExceptionSafetyTestBuilder with the provided T factory * included. The existing factory will not be included in the newly-created * tester instance. This method is intended for use with types lacking a copy * constructor. Types that can be copy-constructed should instead use the * method tester.WithInitialValue(...). */ template ExceptionSafetyTestBuilder, Operation, Contracts...> WithFactory(const NewFactory& new_factory) const { return {new_factory, operation_, contracts_}; } /* * Returns a new ExceptionSafetyTestBuilder with the provided testable * operation included. The existing operation will not be included in the * newly created tester. */ template ExceptionSafetyTestBuilder, Contracts...> WithOperation(const NewOperation& new_operation) const { return {factory_, new_operation, contracts_}; } /* * Returns a new ExceptionSafetyTestBuilder with the provided MoreContracts... * combined with the Contracts... that were already included in the instance * on which the method was called. Contracts... cannot be removed or replaced * once added to an ExceptionSafetyTestBuilder instance. A fresh object must * be created in order to get an empty Contracts... list. * * In addition to passing in custom contract assertion callbacks, this method * accepts `testing::strong_guarantee` as an argument which checks T instances * post-throw against freshly created T instances via operator== to verify * that any state changes made during the execution of the operation were * properly rolled back. */ template ExceptionSafetyTestBuilder...> WithContracts(const MoreContracts&... more_contracts) const { return { factory_, operation_, std::tuple_cat(contracts_, std::tuple...>( more_contracts...))}; } /* * Returns a testing::AssertionResult that is the reduced result of the * exception safety algorithm. The algorithm short circuits and returns * AssertionFailure after the first contract callback returns an * AssertionFailure. Otherwise, if all contract callbacks return an * AssertionSuccess, the reduced result is AssertionSuccess. * * The passed-in testable operation will not be saved in a new tester instance * nor will it modify/replace the existing tester instance. This is useful * when each operation being tested is unique and does not need to be reused. * * Preconditions for tester.Test(const NewOperation& new_operation): * * - May only be called after at least one contract assertion callback and a * factory or initial value have been provided. */ template < typename NewOperation, typename = EnableIfTestable> testing::AssertionResult Test(const NewOperation& new_operation) const { return TestImpl(new_operation, absl::index_sequence_for()); } /* * Returns a testing::AssertionResult that is the reduced result of the * exception safety algorithm. The algorithm short circuits and returns * AssertionFailure after the first contract callback returns an * AssertionFailure. Otherwise, if all contract callbacks return an * AssertionSuccess, the reduced result is AssertionSuccess. * * Preconditions for tester.Test(): * * - May only be called after at least one contract assertion callback, a * factory or initial value and a testable operation have been provided. */ template < typename LazyOperation = Operation, typename = EnableIfTestable> testing::AssertionResult Test() const { return Test(operation_); } private: template friend class ExceptionSafetyTestBuilder; friend ExceptionSafetyTestBuilder<> testing::MakeExceptionSafetyTester(); ExceptionSafetyTestBuilder() {} ExceptionSafetyTestBuilder(const Factory& f, const Operation& o, const std::tuple& i) : factory_(f), operation_(o), contracts_(i) {} template testing::AssertionResult TestImpl(SelectedOperation selected_operation, absl::index_sequence) const { return ExceptionSafetyTest>( factory_, selected_operation, std::get(contracts_)...) .Test(); } Factory factory_; Operation operation_; std::tuple contracts_; }; } // namespace exceptions_internal } // namespace testing #endif // ABSL_HAVE_EXCEPTIONS #endif // ABSL_BASE_INTERNAL_EXCEPTION_SAFETY_TESTING_H_ abseil-20220623.1/absl/base/internal/exception_testing.h000066400000000000000000000027711430371345100227350ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Testing utilities for ABSL types which throw exceptions. #ifndef ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ #define ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ #include "gtest/gtest.h" #include "absl/base/config.h" // ABSL_BASE_INTERNAL_EXPECT_FAIL tests either for a specified thrown exception // if exceptions are enabled, or for death with a specified text in the error // message #ifdef ABSL_HAVE_EXCEPTIONS #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ EXPECT_THROW(expr, exception_t) #elif defined(__ANDROID__) // Android asserts do not log anywhere that gtest can currently inspect. // So we expect exit, but cannot match the message. #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ EXPECT_DEATH(expr, ".*") #else #define ABSL_BASE_INTERNAL_EXPECT_FAIL(expr, exception_t, text) \ EXPECT_DEATH_IF_SUPPORTED(expr, text) #endif #endif // ABSL_BASE_INTERNAL_EXCEPTION_TESTING_H_ abseil-20220623.1/absl/base/internal/fast_type_id.h000066400000000000000000000026621430371345100216530ustar00rootroot00000000000000// // Copyright 2020 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ #define ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { template struct FastTypeTag { constexpr static char dummy_var = 0; }; #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL template constexpr char FastTypeTag::dummy_var; #endif // FastTypeId() evaluates at compile/link-time to a unique pointer for the // passed-in type. These are meant to be good match for keys into maps or // straight up comparisons. using FastTypeIdType = const void*; template constexpr inline FastTypeIdType FastTypeId() { return &FastTypeTag::dummy_var; } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_FAST_TYPE_ID_H_ abseil-20220623.1/absl/base/internal/fast_type_id_test.cc000066400000000000000000000063061430371345100230470ustar00rootroot00000000000000// Copyright 2020 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/fast_type_id.h" #include #include #include #include "gtest/gtest.h" namespace { namespace bi = absl::base_internal; // NOLINTNEXTLINE #define PRIM_TYPES(A) \ A(bool) \ A(short) \ A(unsigned short) \ A(int) \ A(unsigned int) \ A(long) \ A(unsigned long) \ A(long long) \ A(unsigned long long) \ A(float) \ A(double) \ A(long double) TEST(FastTypeIdTest, PrimitiveTypes) { bi::FastTypeIdType type_ids[] = { #define A(T) bi::FastTypeId(), PRIM_TYPES(A) #undef A #define A(T) bi::FastTypeId(), PRIM_TYPES(A) #undef A #define A(T) bi::FastTypeId(), PRIM_TYPES(A) #undef A #define A(T) bi::FastTypeId(), PRIM_TYPES(A) #undef A }; size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType); for (int i = 0; i < total_type_ids; ++i) { EXPECT_EQ(type_ids[i], type_ids[i]); for (int j = 0; j < i; ++j) { EXPECT_NE(type_ids[i], type_ids[j]); } } } #define FIXED_WIDTH_TYPES(A) \ A(int8_t) \ A(uint8_t) \ A(int16_t) \ A(uint16_t) \ A(int32_t) \ A(uint32_t) \ A(int64_t) \ A(uint64_t) TEST(FastTypeIdTest, FixedWidthTypes) { bi::FastTypeIdType type_ids[] = { #define A(T) bi::FastTypeId(), FIXED_WIDTH_TYPES(A) #undef A #define A(T) bi::FastTypeId(), FIXED_WIDTH_TYPES(A) #undef A #define A(T) bi::FastTypeId(), FIXED_WIDTH_TYPES(A) #undef A #define A(T) bi::FastTypeId(), FIXED_WIDTH_TYPES(A) #undef A }; size_t total_type_ids = sizeof(type_ids) / sizeof(bi::FastTypeIdType); for (int i = 0; i < total_type_ids; ++i) { EXPECT_EQ(type_ids[i], type_ids[i]); for (int j = 0; j < i; ++j) { EXPECT_NE(type_ids[i], type_ids[j]); } } } TEST(FastTypeIdTest, AliasTypes) { using int_alias = int; EXPECT_EQ(bi::FastTypeId(), bi::FastTypeId()); } TEST(FastTypeIdTest, TemplateSpecializations) { EXPECT_NE(bi::FastTypeId>(), bi::FastTypeId>()); EXPECT_NE((bi::FastTypeId>()), (bi::FastTypeId>())); } struct Base {}; struct Derived : Base {}; struct PDerived : private Base {}; TEST(FastTypeIdTest, Inheritance) { EXPECT_NE(bi::FastTypeId(), bi::FastTypeId()); EXPECT_NE(bi::FastTypeId(), bi::FastTypeId()); } } // namespace abseil-20220623.1/absl/base/internal/hide_ptr.h000066400000000000000000000032151430371345100207720ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INTERNAL_HIDE_PTR_H_ #define ABSL_BASE_INTERNAL_HIDE_PTR_H_ #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // Arbitrary value with high bits set. Xor'ing with it is unlikely // to map one valid pointer to another valid pointer. constexpr uintptr_t HideMask() { return (uintptr_t{0xF03A5F7BU} << (sizeof(uintptr_t) - 4) * 8) | 0xF03A5F7BU; } // Hide a pointer from the leak checker. For internal use only. // Differs from absl::IgnoreLeak(ptr) in that absl::IgnoreLeak(ptr) causes ptr // and all objects reachable from ptr to be ignored by the leak checker. template inline uintptr_t HidePtr(T* ptr) { return reinterpret_cast(ptr) ^ HideMask(); } // Return a pointer that has been hidden from the leak checker. // For internal use only. template inline T* UnhidePtr(uintptr_t hidden) { return reinterpret_cast(hidden ^ HideMask()); } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_HIDE_PTR_H_ abseil-20220623.1/absl/base/internal/identity.h000066400000000000000000000017641430371345100210340ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef ABSL_BASE_INTERNAL_IDENTITY_H_ #define ABSL_BASE_INTERNAL_IDENTITY_H_ #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace internal { template struct identity { typedef T type; }; template using identity_t = typename identity::type; } // namespace internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_IDENTITY_H_ abseil-20220623.1/absl/base/internal/inline_variable.h000066400000000000000000000111451430371345100223200ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ #define ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ #include #include "absl/base/internal/identity.h" // File: // This file define a macro that allows the creation of or emulation of C++17 // inline variables based on whether or not the feature is supported. //////////////////////////////////////////////////////////////////////////////// // Macro: ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) // // Description: // Expands to the equivalent of an inline constexpr instance of the specified // `type` and `name`, initialized to the value `init`. If the compiler being // used is detected as supporting actual inline variables as a language // feature, then the macro expands to an actual inline variable definition. // // Requires: // `type` is a type that is usable in an extern variable declaration. // // Requires: `name` is a valid identifier // // Requires: // `init` is an expression that can be used in the following definition: // constexpr type name = init; // // Usage: // // // Equivalent to: `inline constexpr size_t variant_npos = -1;` // ABSL_INTERNAL_INLINE_CONSTEXPR(size_t, variant_npos, -1); // // Differences in implementation: // For a direct, language-level inline variable, decltype(name) will be the // type that was specified along with const qualification, whereas for // emulated inline variables, decltype(name) may be different (in practice // it will likely be a reference type). //////////////////////////////////////////////////////////////////////////////// #ifdef __cpp_inline_variables // Clang's -Wmissing-variable-declarations option erroneously warned that // inline constexpr objects need to be pre-declared. This has now been fixed, // but we will need to support this workaround for people building with older // versions of clang. // // Bug: https://bugs.llvm.org/show_bug.cgi?id=35862 // // Note: // identity_t is used here so that the const and name are in the // appropriate place for pointer types, reference types, function pointer // types, etc.. #if defined(__clang__) #define ABSL_INTERNAL_EXTERN_DECL(type, name) \ extern const ::absl::internal::identity_t name; #else // Otherwise, just define the macro to do nothing. #define ABSL_INTERNAL_EXTERN_DECL(type, name) #endif // defined(__clang__) // See above comment at top of file for details. #define ABSL_INTERNAL_INLINE_CONSTEXPR(type, name, init) \ ABSL_INTERNAL_EXTERN_DECL(type, name) \ inline constexpr ::absl::internal::identity_t name = init #else // See above comment at top of file for details. // // Note: // identity_t is used here so that the const and name are in the // appropriate place for pointer types, reference types, function pointer // types, etc.. #define ABSL_INTERNAL_INLINE_CONSTEXPR(var_type, name, init) \ template \ struct AbslInternalInlineVariableHolder##name { \ static constexpr ::absl::internal::identity_t kInstance = init; \ }; \ \ template \ constexpr ::absl::internal::identity_t \ AbslInternalInlineVariableHolder##name::kInstance; \ \ static constexpr const ::absl::internal::identity_t& \ name = /* NOLINT */ \ AbslInternalInlineVariableHolder##name<>::kInstance; \ static_assert(sizeof(void (*)(decltype(name))) != 0, \ "Silence unused variable warnings.") #endif // __cpp_inline_variables #endif // ABSL_BASE_INTERNAL_INLINE_VARIABLE_EMULATION_H_ abseil-20220623.1/absl/base/internal/inline_variable_testing.h000066400000000000000000000026271430371345100240620ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INLINE_VARIABLE_TESTING_H_ #define ABSL_BASE_INLINE_VARIABLE_TESTING_H_ #include "absl/base/internal/inline_variable.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace inline_variable_testing_internal { struct Foo { int value = 5; }; ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, inline_variable_foo, {}); ABSL_INTERNAL_INLINE_CONSTEXPR(Foo, other_inline_variable_foo, {}); ABSL_INTERNAL_INLINE_CONSTEXPR(int, inline_variable_int, 5); ABSL_INTERNAL_INLINE_CONSTEXPR(int, other_inline_variable_int, 5); ABSL_INTERNAL_INLINE_CONSTEXPR(void(*)(), inline_variable_fun_ptr, nullptr); const Foo& get_foo_a(); const Foo& get_foo_b(); const int& get_int_a(); const int& get_int_b(); } // namespace inline_variable_testing_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INLINE_VARIABLE_TESTING_H_ abseil-20220623.1/absl/base/internal/invoke.h000066400000000000000000000222751430371345100204760ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // absl::base_internal::invoke(f, args...) is an implementation of // INVOKE(f, args...) from section [func.require] of the C++ standard. // When compiled as C++17 and later versions, it is implemented as an alias of // std::invoke. // // [func.require] // Define INVOKE (f, t1, t2, ..., tN) as follows: // 1. (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T // and t1 is an object of type T or a reference to an object of type T or a // reference to an object of a type derived from T; // 2. ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a // class T and t1 is not one of the types described in the previous item; // 3. t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is // an object of type T or a reference to an object of type T or a reference // to an object of a type derived from T; // 4. (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 // is not one of the types described in the previous item; // 5. f(t1, t2, ..., tN) in all other cases. // // The implementation is SFINAE-friendly: substitution failure within invoke() // isn't an error. #ifndef ABSL_BASE_INTERNAL_INVOKE_H_ #define ABSL_BASE_INTERNAL_INVOKE_H_ #include "absl/base/config.h" #if ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L #include namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { using std::invoke; using std::invoke_result_t; using std::is_invocable_r; } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #else // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L #include #include #include #include "absl/meta/type_traits.h" // The following code is internal implementation detail. See the comment at the // top of this file for the API documentation. namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // The five classes below each implement one of the clauses from the definition // of INVOKE. The inner class template Accept checks whether the // clause is applicable; static function template Invoke(f, args...) does the // invocation. // // By separating the clause selection logic from invocation we make sure that // Invoke() does exactly what the standard says. template struct StrippedAccept { template struct Accept : Derived::template AcceptImpl::type>::type...> {}; }; // (t1.*f)(t2, ..., tN) when f is a pointer to a member function of a class T // and t1 is an object of type T or a reference to an object of type T or a // reference to an object of a type derived from T. struct MemFunAndRef : StrippedAccept { template struct AcceptImpl : std::false_type {}; template struct AcceptImpl : std::integral_constant::value && absl::is_function::value> { }; template static decltype((std::declval().* std::declval())(std::declval()...)) Invoke(MemFun&& mem_fun, Obj&& obj, Args&&... args) { // Ignore bogus GCC warnings on this line. // See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=101436 for similar example. #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Warray-bounds" #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif return (std::forward(obj).* std::forward(mem_fun))(std::forward(args)...); #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(11, 0) #pragma GCC diagnostic pop #endif } }; // ((*t1).*f)(t2, ..., tN) when f is a pointer to a member function of a // class T and t1 is not one of the types described in the previous item. struct MemFunAndPtr : StrippedAccept { template struct AcceptImpl : std::false_type {}; template struct AcceptImpl : std::integral_constant::value && absl::is_function::value> { }; template static decltype(((*std::declval()).* std::declval())(std::declval()...)) Invoke(MemFun&& mem_fun, Ptr&& ptr, Args&&... args) { return ((*std::forward(ptr)).* std::forward(mem_fun))(std::forward(args)...); } }; // t1.*f when N == 1 and f is a pointer to member data of a class T and t1 is // an object of type T or a reference to an object of type T or a reference // to an object of a type derived from T. struct DataMemAndRef : StrippedAccept { template struct AcceptImpl : std::false_type {}; template struct AcceptImpl : std::integral_constant::value && !absl::is_function::value> {}; template static decltype(std::declval().*std::declval()) Invoke( DataMem&& data_mem, Ref&& ref) { return std::forward(ref).*std::forward(data_mem); } }; // (*t1).*f when N == 1 and f is a pointer to member data of a class T and t1 // is not one of the types described in the previous item. struct DataMemAndPtr : StrippedAccept { template struct AcceptImpl : std::false_type {}; template struct AcceptImpl : std::integral_constant::value && !absl::is_function::value> {}; template static decltype((*std::declval()).*std::declval()) Invoke( DataMem&& data_mem, Ptr&& ptr) { return (*std::forward(ptr)).*std::forward(data_mem); } }; // f(t1, t2, ..., tN) in all other cases. struct Callable { // Callable doesn't have Accept because it's the last clause that gets picked // when none of the previous clauses are applicable. template static decltype(std::declval()(std::declval()...)) Invoke( F&& f, Args&&... args) { return std::forward(f)(std::forward(args)...); } }; // Resolves to the first matching clause. template struct Invoker { typedef typename std::conditional< MemFunAndRef::Accept::value, MemFunAndRef, typename std::conditional< MemFunAndPtr::Accept::value, MemFunAndPtr, typename std::conditional< DataMemAndRef::Accept::value, DataMemAndRef, typename std::conditional::value, DataMemAndPtr, Callable>::type>::type>:: type>::type type; }; // The result type of Invoke. template using invoke_result_t = decltype(Invoker::type::Invoke( std::declval(), std::declval()...)); // Invoke(f, args...) is an implementation of INVOKE(f, args...) from section // [func.require] of the C++ standard. template invoke_result_t invoke(F&& f, Args&&... args) { return Invoker::type::Invoke(std::forward(f), std::forward(args)...); } template struct IsInvocableRImpl : std::false_type {}; template struct IsInvocableRImpl< absl::void_t >, R, F, Args...> : std::integral_constant< bool, std::is_convertible, R>::value || std::is_void::value> {}; // Type trait whose member `value` is true if invoking `F` with `Args` is valid, // and either the return type is convertible to `R`, or `R` is void. // C++11-compatible version of `std::is_invocable_r`. template using is_invocable_r = IsInvocableRImpl; } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_INTERNAL_CPLUSPLUS_LANG >= 201703L #endif // ABSL_BASE_INTERNAL_INVOKE_H_ abseil-20220623.1/absl/base/internal/low_level_alloc.cc000066400000000000000000000542741430371345100225070ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // A low-level allocator that can be used by other low-level // modules without introducing dependency cycles. // This allocator is slow and wasteful of memory; // it should not be used when performance is key. #include "absl/base/internal/low_level_alloc.h" #include #include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/internal/direct_mmap.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/macros.h" #include "absl/base/thread_annotations.h" // LowLevelAlloc requires that the platform support low-level // allocation of virtual memory. Platforms lacking this cannot use // LowLevelAlloc. #ifndef ABSL_LOW_LEVEL_ALLOC_MISSING #ifndef _WIN32 #include #include #include #include #else #include #endif #include #include #include #include #include #include // for placement-new #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" // MAP_ANONYMOUS #if defined(__APPLE__) // For mmap, Linux defines both MAP_ANONYMOUS and MAP_ANON and says MAP_ANON is // deprecated. In Darwin, MAP_ANON is all there is. #if !defined MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif // !MAP_ANONYMOUS #endif // __APPLE__ namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // A first-fit allocator with amortized logarithmic free() time. // --------------------------------------------------------------------------- static const int kMaxLevel = 30; namespace { // This struct describes one allocated block, or one free block. struct AllocList { struct Header { // Size of entire region, including this field. Must be // first. Valid in both allocated and unallocated blocks. uintptr_t size; // kMagicAllocated or kMagicUnallocated xor this. uintptr_t magic; // Pointer to parent arena. LowLevelAlloc::Arena *arena; // Aligns regions to 0 mod 2*sizeof(void*). void *dummy_for_alignment; } header; // Next two fields: in unallocated blocks: freelist skiplist data // in allocated blocks: overlaps with client data // Levels in skiplist used. int levels; // Actually has levels elements. The AllocList node may not have room // for all kMaxLevel entries. See max_fit in LLA_SkiplistLevels(). AllocList *next[kMaxLevel]; }; } // namespace // --------------------------------------------------------------------------- // A trivial skiplist implementation. This is used to keep the freelist // in address order while taking only logarithmic time per insert and delete. // An integer approximation of log2(size/base) // Requires size >= base. static int IntLog2(size_t size, size_t base) { int result = 0; for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result) result++; } // floor(size / 2**result) <= base < floor(size / 2**(result-1)) // => log2(size/(base+1)) <= result < 1+log2(size/base) // => result ~= log2(size/base) return result; } // Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1. static int Random(uint32_t *state) { uint32_t r = *state; int result = 1; while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) { result++; } *state = r; return result; } // Return a number of skiplist levels for a node of size bytes, where // base is the minimum node size. Compute level=log2(size / base)+n // where n is 1 if random is false and otherwise a random number generated with // the standard distribution for a skiplist: See Random() above. // Bigger nodes tend to have more skiplist levels due to the log2(size / base) // term, so first-fit searches touch fewer nodes. "level" is clipped so // level(level) > max_fit) level = static_cast(max_fit); if (level > kMaxLevel-1) level = kMaxLevel - 1; ABSL_RAW_CHECK(level >= 1, "block not big enough for even one level"); return level; } // Return "atleast", the first element of AllocList *head s.t. *atleast >= *e. // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater // points to the last element at level i in the AllocList less than *e, or is // head if no such element exists. static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e, AllocList **prev) { AllocList *p = head; for (int level = head->levels - 1; level >= 0; level--) { for (AllocList *n; (n = p->next[level]) != nullptr && n < e; p = n) { } prev[level] = p; } return (head->levels == 0) ? nullptr : prev[0]->next[0]; } // Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch. // Requires that e->levels be previously set by the caller (using // LLA_SkiplistLevels()) static void LLA_SkiplistInsert(AllocList *head, AllocList *e, AllocList **prev) { LLA_SkiplistSearch(head, e, prev); for (; head->levels < e->levels; head->levels++) { // extend prev pointers prev[head->levels] = head; // to all *e's levels } for (int i = 0; i != e->levels; i++) { // add element to list e->next[i] = prev[i]->next[i]; prev[i]->next[i] = e; } } // Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch(). // Requires that e->levels be previous set by the caller (using // LLA_SkiplistLevels()) static void LLA_SkiplistDelete(AllocList *head, AllocList *e, AllocList **prev) { AllocList *found = LLA_SkiplistSearch(head, e, prev); ABSL_RAW_CHECK(e == found, "element not in freelist"); for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) { prev[i]->next[i] = e->next[i]; } while (head->levels > 0 && head->next[head->levels - 1] == nullptr) { head->levels--; // reduce head->levels if level unused } } // --------------------------------------------------------------------------- // Arena implementation // Metadata for an LowLevelAlloc arena instance. struct LowLevelAlloc::Arena { // Constructs an arena with the given LowLevelAlloc flags. explicit Arena(uint32_t flags_value); base_internal::SpinLock mu; // Head of free list, sorted by address AllocList freelist ABSL_GUARDED_BY(mu); // Count of allocated blocks int32_t allocation_count ABSL_GUARDED_BY(mu); // flags passed to NewArena const uint32_t flags; // Result of sysconf(_SC_PAGESIZE) const size_t pagesize; // Lowest power of two >= max(16, sizeof(AllocList)) const size_t round_up; // Smallest allocation block size const size_t min_size; // PRNG state uint32_t random ABSL_GUARDED_BY(mu); }; namespace { // Static storage space for the lazily-constructed, default global arena // instances. We require this space because the whole point of LowLevelAlloc // is to avoid relying on malloc/new. alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof( LowLevelAlloc::Arena)]; alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof( LowLevelAlloc::Arena)]; #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING alignas( LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage [sizeof(LowLevelAlloc::Arena)]; #endif // We must use LowLevelCallOnce here to construct the global arenas, rather than // using function-level statics, to avoid recursively invoking the scheduler. absl::once_flag create_globals_once; void CreateGlobalArenas() { new (&default_arena_storage) LowLevelAlloc::Arena(LowLevelAlloc::kCallMallocHook); new (&unhooked_arena_storage) LowLevelAlloc::Arena(0); #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING new (&unhooked_async_sig_safe_arena_storage) LowLevelAlloc::Arena(LowLevelAlloc::kAsyncSignalSafe); #endif } // Returns a global arena that does not call into hooks. Used by NewArena() // when kCallMallocHook is not set. LowLevelAlloc::Arena* UnhookedArena() { base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); return reinterpret_cast(&unhooked_arena_storage); } #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING // Returns a global arena that is async-signal safe. Used by NewArena() when // kAsyncSignalSafe is set. LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() { base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); return reinterpret_cast( &unhooked_async_sig_safe_arena_storage); } #endif } // namespace // Returns the default arena, as used by LowLevelAlloc::Alloc() and friends. LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() { base_internal::LowLevelCallOnce(&create_globals_once, CreateGlobalArenas); return reinterpret_cast(&default_arena_storage); } // magic numbers to identify allocated and unallocated blocks static const uintptr_t kMagicAllocated = 0x4c833e95U; static const uintptr_t kMagicUnallocated = ~kMagicAllocated; namespace { class ABSL_SCOPED_LOCKABLE ArenaLock { public: explicit ArenaLock(LowLevelAlloc::Arena *arena) ABSL_EXCLUSIVE_LOCK_FUNCTION(arena->mu) : arena_(arena) { #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { sigset_t all; sigfillset(&all); mask_valid_ = pthread_sigmask(SIG_BLOCK, &all, &mask_) == 0; } #endif arena_->mu.Lock(); } ~ArenaLock() { ABSL_RAW_CHECK(left_, "haven't left Arena region"); } void Leave() ABSL_UNLOCK_FUNCTION() { arena_->mu.Unlock(); #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if (mask_valid_) { const int err = pthread_sigmask(SIG_SETMASK, &mask_, nullptr); if (err != 0) { ABSL_RAW_LOG(FATAL, "pthread_sigmask failed: %d", err); } } #endif left_ = true; } private: bool left_ = false; // whether left region #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING bool mask_valid_ = false; sigset_t mask_; // old mask of blocked signals #endif LowLevelAlloc::Arena *arena_; ArenaLock(const ArenaLock &) = delete; ArenaLock &operator=(const ArenaLock &) = delete; }; } // namespace // create an appropriate magic number for an object at "ptr" // "magic" should be kMagicAllocated or kMagicUnallocated inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) { return magic ^ reinterpret_cast(ptr); } namespace { size_t GetPageSize() { #ifdef _WIN32 SYSTEM_INFO system_info; GetSystemInfo(&system_info); return std::max(system_info.dwPageSize, system_info.dwAllocationGranularity); #elif defined(__wasm__) || defined(__asmjs__) return getpagesize(); #else return sysconf(_SC_PAGESIZE); #endif } size_t RoundedUpBlockSize() { // Round up block sizes to a power of two close to the header size. size_t round_up = 16; while (round_up < sizeof(AllocList::Header)) { round_up += round_up; } return round_up; } } // namespace LowLevelAlloc::Arena::Arena(uint32_t flags_value) : mu(base_internal::SCHEDULE_KERNEL_ONLY), allocation_count(0), flags(flags_value), pagesize(GetPageSize()), round_up(RoundedUpBlockSize()), min_size(2 * round_up), random(0) { freelist.header.size = 0; freelist.header.magic = Magic(kMagicUnallocated, &freelist.header); freelist.header.arena = this; freelist.levels = 0; memset(freelist.next, 0, sizeof(freelist.next)); } // L < meta_data_arena->mu LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32_t flags) { Arena *meta_data_arena = DefaultArena(); #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { meta_data_arena = UnhookedAsyncSigSafeArena(); } else // NOLINT(readability/braces) #endif if ((flags & LowLevelAlloc::kCallMallocHook) == 0) { meta_data_arena = UnhookedArena(); } Arena *result = new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(flags); return result; } // L < arena->mu, L < arena->arena->mu bool LowLevelAlloc::DeleteArena(Arena *arena) { ABSL_RAW_CHECK( arena != nullptr && arena != DefaultArena() && arena != UnhookedArena(), "may not delete default arena"); ArenaLock section(arena); if (arena->allocation_count != 0) { section.Leave(); return false; } while (arena->freelist.next[0] != nullptr) { AllocList *region = arena->freelist.next[0]; size_t size = region->header.size; arena->freelist.next[0] = region->next[0]; ABSL_RAW_CHECK( region->header.magic == Magic(kMagicUnallocated, ®ion->header), "bad magic number in DeleteArena()"); ABSL_RAW_CHECK(region->header.arena == arena, "bad arena pointer in DeleteArena()"); ABSL_RAW_CHECK(size % arena->pagesize == 0, "empty arena has non-page-aligned block size"); ABSL_RAW_CHECK(reinterpret_cast(region) % arena->pagesize == 0, "empty arena has non-page-aligned block"); int munmap_result; #ifdef _WIN32 munmap_result = VirtualFree(region, 0, MEM_RELEASE); ABSL_RAW_CHECK(munmap_result != 0, "LowLevelAlloc::DeleteArena: VitualFree failed"); #else #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) { munmap_result = munmap(region, size); } else { munmap_result = base_internal::DirectMunmap(region, size); } #else munmap_result = munmap(region, size); #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if (munmap_result != 0) { ABSL_RAW_LOG(FATAL, "LowLevelAlloc::DeleteArena: munmap failed: %d", errno); } #endif // _WIN32 } section.Leave(); arena->~Arena(); Free(arena); return true; } // --------------------------------------------------------------------------- // Addition, checking for overflow. The intent is to die if an external client // manages to push through a request that would cause arithmetic to fail. static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) { uintptr_t sum = a + b; ABSL_RAW_CHECK(sum >= a, "LowLevelAlloc arithmetic overflow"); return sum; } // Return value rounded up to next multiple of align. // align must be a power of two. static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) { return CheckedAdd(addr, align - 1) & ~(align - 1); } // Equivalent to "return prev->next[i]" but with sanity checking // that the freelist is in the correct order, that it // consists of regions marked "unallocated", and that no two regions // are adjacent in memory (they should have been coalesced). // L >= arena->mu static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) { ABSL_RAW_CHECK(i < prev->levels, "too few levels in Next()"); AllocList *next = prev->next[i]; if (next != nullptr) { ABSL_RAW_CHECK( next->header.magic == Magic(kMagicUnallocated, &next->header), "bad magic number in Next()"); ABSL_RAW_CHECK(next->header.arena == arena, "bad arena pointer in Next()"); if (prev != &arena->freelist) { ABSL_RAW_CHECK(prev < next, "unordered freelist"); ABSL_RAW_CHECK(reinterpret_cast(prev) + prev->header.size < reinterpret_cast(next), "malformed freelist"); } } return next; } // Coalesce list item "a" with its successor if they are adjacent. static void Coalesce(AllocList *a) { AllocList *n = a->next[0]; if (n != nullptr && reinterpret_cast(a) + a->header.size == reinterpret_cast(n)) { LowLevelAlloc::Arena *arena = a->header.arena; a->header.size += n->header.size; n->header.magic = 0; n->header.arena = nullptr; AllocList *prev[kMaxLevel]; LLA_SkiplistDelete(&arena->freelist, n, prev); LLA_SkiplistDelete(&arena->freelist, a, prev); a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, &arena->random); LLA_SkiplistInsert(&arena->freelist, a, prev); } } // Adds block at location "v" to the free list // L >= arena->mu static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) { AllocList *f = reinterpret_cast( reinterpret_cast(v) - sizeof (f->header)); ABSL_RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header), "bad magic number in AddToFreelist()"); ABSL_RAW_CHECK(f->header.arena == arena, "bad arena pointer in AddToFreelist()"); f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, &arena->random); AllocList *prev[kMaxLevel]; LLA_SkiplistInsert(&arena->freelist, f, prev); f->header.magic = Magic(kMagicUnallocated, &f->header); Coalesce(f); // maybe coalesce with successor Coalesce(prev[0]); // maybe coalesce with predecessor } // Frees storage allocated by LowLevelAlloc::Alloc(). // L < arena->mu void LowLevelAlloc::Free(void *v) { if (v != nullptr) { AllocList *f = reinterpret_cast( reinterpret_cast(v) - sizeof (f->header)); LowLevelAlloc::Arena *arena = f->header.arena; ArenaLock section(arena); AddToFreelist(v, arena); ABSL_RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free"); arena->allocation_count--; section.Leave(); } } // allocates and returns a block of size bytes, to be freed with Free() // L < arena->mu static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) { void *result = nullptr; if (request != 0) { AllocList *s; // will point to region that satisfies request ArenaLock section(arena); // round up with header size_t req_rnd = RoundUp(CheckedAdd(request, sizeof (s->header)), arena->round_up); for (;;) { // loop until we find a suitable region // find the minimum levels that a block of this size must have int i = LLA_SkiplistLevels(req_rnd, arena->min_size, nullptr) - 1; if (i < arena->freelist.levels) { // potential blocks exist AllocList *before = &arena->freelist; // predecessor of s while ((s = Next(i, before, arena)) != nullptr && s->header.size < req_rnd) { before = s; } if (s != nullptr) { // we found a region break; } } // we unlock before mmap() both because mmap() may call a callback hook, // and because it may be slow. arena->mu.Unlock(); // mmap generous 64K chunks to decrease // the chances/impact of fragmentation: size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16); void *new_pages; #ifdef _WIN32 new_pages = VirtualAlloc(0, new_pages_size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); ABSL_RAW_CHECK(new_pages != nullptr, "VirtualAlloc failed"); #else #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) { new_pages = base_internal::DirectMmap(nullptr, new_pages_size, PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); } else { new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); } #else new_pages = mmap(nullptr, new_pages_size, PROT_WRITE | PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); #endif // ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING if (new_pages == MAP_FAILED) { ABSL_RAW_LOG(FATAL, "mmap error: %d", errno); } #endif // _WIN32 arena->mu.Lock(); s = reinterpret_cast(new_pages); s->header.size = new_pages_size; // Pretend the block is allocated; call AddToFreelist() to free it. s->header.magic = Magic(kMagicAllocated, &s->header); s->header.arena = arena; AddToFreelist(&s->levels, arena); // insert new region into free list } AllocList *prev[kMaxLevel]; LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list // s points to the first free region that's big enough if (CheckedAdd(req_rnd, arena->min_size) <= s->header.size) { // big enough to split AllocList *n = reinterpret_cast (req_rnd + reinterpret_cast(s)); n->header.size = s->header.size - req_rnd; n->header.magic = Magic(kMagicAllocated, &n->header); n->header.arena = arena; s->header.size = req_rnd; AddToFreelist(&n->levels, arena); } s->header.magic = Magic(kMagicAllocated, &s->header); ABSL_RAW_CHECK(s->header.arena == arena, ""); arena->allocation_count++; section.Leave(); result = &s->levels; } ABSL_ANNOTATE_MEMORY_IS_UNINITIALIZED(result, request); return result; } void *LowLevelAlloc::Alloc(size_t request) { void *result = DoAllocWithArena(request, DefaultArena()); return result; } void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) { ABSL_RAW_CHECK(arena != nullptr, "must pass a valid arena"); void *result = DoAllocWithArena(request, arena); return result; } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_LOW_LEVEL_ALLOC_MISSING abseil-20220623.1/absl/base/internal/low_level_alloc.h000066400000000000000000000111171430371345100223360ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ #define ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ // A simple thread-safe memory allocator that does not depend on // mutexes or thread-specific data. It is intended to be used // sparingly, and only when malloc() would introduce an unwanted // dependency, such as inside the heap-checker, or the Mutex // implementation. // IWYU pragma: private, include "base/low_level_alloc.h" #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" // LowLevelAlloc requires that the platform support low-level // allocation of virtual memory. Platforms lacking this cannot use // LowLevelAlloc. #ifdef ABSL_LOW_LEVEL_ALLOC_MISSING #error ABSL_LOW_LEVEL_ALLOC_MISSING cannot be directly set #elif !defined(ABSL_HAVE_MMAP) && !defined(_WIN32) #define ABSL_LOW_LEVEL_ALLOC_MISSING 1 #endif // Using LowLevelAlloc with kAsyncSignalSafe isn't supported on Windows or // asm.js / WebAssembly. // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html // for more information. #ifdef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING #error ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING cannot be directly set #elif defined(_WIN32) || defined(__asmjs__) || defined(__wasm__) #define ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING 1 #endif #include #include "absl/base/port.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { class LowLevelAlloc { public: struct Arena; // an arena from which memory may be allocated // Returns a pointer to a block of at least "request" bytes // that have been newly allocated from the specific arena. // for Alloc() call the DefaultArena() is used. // Returns 0 if passed request==0. // Does not return 0 under other circumstances; it crashes if memory // is not available. static void *Alloc(size_t request) ABSL_ATTRIBUTE_SECTION(malloc_hook); static void *AllocWithArena(size_t request, Arena *arena) ABSL_ATTRIBUTE_SECTION(malloc_hook); // Deallocates a region of memory that was previously allocated with // Alloc(). Does nothing if passed 0. "s" must be either 0, // or must have been returned from a call to Alloc() and not yet passed to // Free() since that call to Alloc(). The space is returned to the arena // from which it was allocated. static void Free(void *s) ABSL_ATTRIBUTE_SECTION(malloc_hook); // ABSL_ATTRIBUTE_SECTION(malloc_hook) for Alloc* and Free // are to put all callers of MallocHook::Invoke* in this module // into special section, // so that MallocHook::GetCallerStackTrace can function accurately. // Create a new arena. // The root metadata for the new arena is allocated in the // meta_data_arena; the DefaultArena() can be passed for meta_data_arena. // These values may be ored into flags: enum { // Report calls to Alloc() and Free() via the MallocHook interface. // Set in the DefaultArena. kCallMallocHook = 0x0001, #ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING // Make calls to Alloc(), Free() be async-signal-safe. Not set in // DefaultArena(). Not supported on all platforms. kAsyncSignalSafe = 0x0002, #endif }; // Construct a new arena. The allocation of the underlying metadata honors // the provided flags. For example, the call NewArena(kAsyncSignalSafe) // is itself async-signal-safe, as well as generatating an arena that provides // async-signal-safe Alloc/Free. static Arena *NewArena(int32_t flags); // Destroys an arena allocated by NewArena and returns true, // provided no allocated blocks remain in the arena. // If allocated blocks remain in the arena, does nothing and // returns false. // It is illegal to attempt to destroy the DefaultArena(). static bool DeleteArena(Arena *arena); // The default arena that always exists. static Arena *DefaultArena(); private: LowLevelAlloc(); // no instances }; } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_LOW_LEVEL_ALLOC_H_ abseil-20220623.1/absl/base/internal/low_level_alloc_test.cc000066400000000000000000000127501430371345100235370ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/low_level_alloc.h" #include #include #include #include // NOLINT(build/c++11) #include #include #ifdef __EMSCRIPTEN__ #include #endif #include "absl/container/node_hash_map.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { namespace { // This test doesn't use gtest since it needs to test that everything // works before main(). #define TEST_ASSERT(x) \ if (!(x)) { \ printf("TEST_ASSERT(%s) FAILED ON LINE %d\n", #x, __LINE__); \ abort(); \ } // a block of memory obtained from the allocator struct BlockDesc { char *ptr; // pointer to memory int len; // number of bytes int fill; // filled with data starting with this }; // Check that the pattern placed in the block d // by RandomizeBlockDesc is still there. static void CheckBlockDesc(const BlockDesc &d) { for (int i = 0; i != d.len; i++) { TEST_ASSERT((d.ptr[i] & 0xff) == ((d.fill + i) & 0xff)); } } // Fill the block "*d" with a pattern // starting with a random byte. static void RandomizeBlockDesc(BlockDesc *d) { d->fill = rand() & 0xff; for (int i = 0; i != d->len; i++) { d->ptr[i] = (d->fill + i) & 0xff; } } // Use to indicate to the malloc hooks that // this calls is from LowLevelAlloc. static bool using_low_level_alloc = false; // n times, toss a coin, and based on the outcome // either allocate a new block or deallocate an old block. // New blocks are placed in a std::unordered_map with a random key // and initialized with RandomizeBlockDesc(). // If keys conflict, the older block is freed. // Old blocks are always checked with CheckBlockDesc() // before being freed. At the end of the run, // all remaining allocated blocks are freed. // If use_new_arena is true, use a fresh arena, and then delete it. // If call_malloc_hook is true and user_arena is true, // allocations and deallocations are reported via the MallocHook // interface. static void Test(bool use_new_arena, bool call_malloc_hook, int n) { typedef absl::node_hash_map AllocMap; AllocMap allocated; AllocMap::iterator it; BlockDesc block_desc; int rnd; LowLevelAlloc::Arena *arena = nullptr; if (use_new_arena) { int32_t flags = call_malloc_hook ? LowLevelAlloc::kCallMallocHook : 0; arena = LowLevelAlloc::NewArena(flags); } for (int i = 0; i != n; i++) { if (i != 0 && i % 10000 == 0) { printf("."); fflush(stdout); } switch (rand() & 1) { // toss a coin case 0: // coin came up heads: add a block using_low_level_alloc = true; block_desc.len = rand() & 0x3fff; block_desc.ptr = reinterpret_cast( arena == nullptr ? LowLevelAlloc::Alloc(block_desc.len) : LowLevelAlloc::AllocWithArena(block_desc.len, arena)); using_low_level_alloc = false; RandomizeBlockDesc(&block_desc); rnd = rand(); it = allocated.find(rnd); if (it != allocated.end()) { CheckBlockDesc(it->second); using_low_level_alloc = true; LowLevelAlloc::Free(it->second.ptr); using_low_level_alloc = false; it->second = block_desc; } else { allocated[rnd] = block_desc; } break; case 1: // coin came up tails: remove a block it = allocated.begin(); if (it != allocated.end()) { CheckBlockDesc(it->second); using_low_level_alloc = true; LowLevelAlloc::Free(it->second.ptr); using_low_level_alloc = false; allocated.erase(it); } break; } } // remove all remaining blocks while ((it = allocated.begin()) != allocated.end()) { CheckBlockDesc(it->second); using_low_level_alloc = true; LowLevelAlloc::Free(it->second.ptr); using_low_level_alloc = false; allocated.erase(it); } if (use_new_arena) { TEST_ASSERT(LowLevelAlloc::DeleteArena(arena)); } } // LowLevelAlloc is designed to be safe to call before main(). static struct BeforeMain { BeforeMain() { Test(false, false, 50000); Test(true, false, 50000); Test(true, true, 50000); } } before_main; } // namespace } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl int main(int argc, char *argv[]) { // The actual test runs in the global constructor of `before_main`. printf("PASS\n"); #ifdef __EMSCRIPTEN__ // clang-format off // This is JS here. Don't try to format it. MAIN_THREAD_EM_ASM({ if (ENVIRONMENT_IS_WEB) { if (typeof TEST_FINISH === 'function') { TEST_FINISH($0); } else { console.error('Attempted to exit with status ' + $0); console.error('But TEST_FINSIHED is not a function.'); } } }, 0); // clang-format on #endif return 0; } abseil-20220623.1/absl/base/internal/low_level_scheduling.h000066400000000000000000000113331430371345100233710ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Core interfaces and definitions used by by low-level interfaces such as // SpinLock. #ifndef ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ #define ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/macros.h" // The following two declarations exist so SchedulingGuard may friend them with // the appropriate language linkage. These callbacks allow libc internals, such // as function level statics, to schedule cooperatively when locking. extern "C" bool __google_disable_rescheduling(void); extern "C" void __google_enable_rescheduling(bool disable_result); namespace absl { ABSL_NAMESPACE_BEGIN class CondVar; class Mutex; namespace synchronization_internal { int MutexDelay(int32_t c, int mode); } // namespace synchronization_internal namespace base_internal { class SchedulingHelper; // To allow use of SchedulingGuard. class SpinLock; // To allow use of SchedulingGuard. // SchedulingGuard // Provides guard semantics that may be used to disable cooperative rescheduling // of the calling thread within specific program blocks. This is used to // protect resources (e.g. low-level SpinLocks or Domain code) that cooperative // scheduling depends on. // // Domain implementations capable of rescheduling in reaction to involuntary // kernel thread actions (e.g blocking due to a pagefault or syscall) must // guarantee that an annotated thread is not allowed to (cooperatively) // reschedule until the annotated region is complete. // // It is an error to attempt to use a cooperatively scheduled resource (e.g. // Mutex) within a rescheduling-disabled region. // // All methods are async-signal safe. class SchedulingGuard { public: // Returns true iff the calling thread may be cooperatively rescheduled. static bool ReschedulingIsAllowed(); SchedulingGuard(const SchedulingGuard&) = delete; SchedulingGuard& operator=(const SchedulingGuard&) = delete; private: // Disable cooperative rescheduling of the calling thread. It may still // initiate scheduling operations (e.g. wake-ups), however, it may not itself // reschedule. Nestable. The returned result is opaque, clients should not // attempt to interpret it. // REQUIRES: Result must be passed to a pairing EnableScheduling(). static bool DisableRescheduling(); // Marks the end of a rescheduling disabled region, previously started by // DisableRescheduling(). // REQUIRES: Pairs with innermost call (and result) of DisableRescheduling(). static void EnableRescheduling(bool disable_result); // A scoped helper for {Disable, Enable}Rescheduling(). // REQUIRES: destructor must run in same thread as constructor. struct ScopedDisable { ScopedDisable() { disabled = SchedulingGuard::DisableRescheduling(); } ~ScopedDisable() { SchedulingGuard::EnableRescheduling(disabled); } bool disabled; }; // A scoped helper to enable rescheduling temporarily. // REQUIRES: destructor must run in same thread as constructor. class ScopedEnable { public: ScopedEnable(); ~ScopedEnable(); private: int scheduling_disabled_depth_; }; // Access to SchedulingGuard is explicitly permitted. friend class absl::CondVar; friend class absl::Mutex; friend class SchedulingHelper; friend class SpinLock; friend int absl::synchronization_internal::MutexDelay(int32_t c, int mode); }; //------------------------------------------------------------------------------ // End of public interfaces. //------------------------------------------------------------------------------ inline bool SchedulingGuard::ReschedulingIsAllowed() { return false; } inline bool SchedulingGuard::DisableRescheduling() { return false; } inline void SchedulingGuard::EnableRescheduling(bool /* disable_result */) { return; } inline SchedulingGuard::ScopedEnable::ScopedEnable() : scheduling_disabled_depth_(0) {} inline SchedulingGuard::ScopedEnable::~ScopedEnable() { ABSL_RAW_CHECK(scheduling_disabled_depth_ == 0, "disable unused warning"); } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_LOW_LEVEL_SCHEDULING_H_ abseil-20220623.1/absl/base/internal/per_thread_tls.h000066400000000000000000000033361430371345100221770ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ #define ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ // This header defines two macros: // // If the platform supports thread-local storage: // // * ABSL_PER_THREAD_TLS_KEYWORD is the C keyword needed to declare a // thread-local variable // * ABSL_PER_THREAD_TLS is 1 // // Otherwise: // // * ABSL_PER_THREAD_TLS_KEYWORD is empty // * ABSL_PER_THREAD_TLS is 0 // // Microsoft C supports thread-local storage. // GCC supports it if the appropriate version of glibc is available, // which the programmer can indicate by defining ABSL_HAVE_TLS #include "absl/base/port.h" // For ABSL_HAVE_TLS #if defined(ABSL_PER_THREAD_TLS) #error ABSL_PER_THREAD_TLS cannot be directly set #elif defined(ABSL_PER_THREAD_TLS_KEYWORD) #error ABSL_PER_THREAD_TLS_KEYWORD cannot be directly set #elif defined(ABSL_HAVE_TLS) #define ABSL_PER_THREAD_TLS_KEYWORD __thread #define ABSL_PER_THREAD_TLS 1 #elif defined(_MSC_VER) #define ABSL_PER_THREAD_TLS_KEYWORD __declspec(thread) #define ABSL_PER_THREAD_TLS 1 #else #define ABSL_PER_THREAD_TLS_KEYWORD #define ABSL_PER_THREAD_TLS 0 #endif #endif // ABSL_BASE_INTERNAL_PER_THREAD_TLS_H_ abseil-20220623.1/absl/base/internal/prefetch.h000066400000000000000000000104061430371345100207740ustar00rootroot00000000000000// Copyright 2022 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INTERNAL_PREFETCH_H_ #define ABSL_BASE_INTERNAL_PREFETCH_H_ #include "absl/base/config.h" #ifdef __SSE__ #include #endif #if defined(_MSC_VER) && defined(ABSL_INTERNAL_HAVE_SSE) #include #pragma intrinsic(_mm_prefetch) #endif // Compatibility wrappers around __builtin_prefetch, to prefetch data // for read if supported by the toolchain. // Move data into the cache before it is read, or "prefetch" it. // // The value of `addr` is the address of the memory to prefetch. If // the target and compiler support it, data prefetch instructions are // generated. If the prefetch is done some time before the memory is // read, it may be in the cache by the time the read occurs. // // The function names specify the temporal locality heuristic applied, // using the names of Intel prefetch instructions: // // T0 - high degree of temporal locality; data should be left in as // many levels of the cache possible // T1 - moderate degree of temporal locality // T2 - low degree of temporal locality // Nta - no temporal locality, data need not be left in the cache // after the read // // Incorrect or gratuitous use of these functions can degrade // performance, so use them only when representative benchmarks show // an improvement. // // Example usage: // // absl::base_internal::PrefetchT0(addr); // // Currently, the different prefetch calls behave on some Intel // architectures as follows: // // SNB..SKL SKX // PrefetchT0() L1/L2/L3 L1/L2 // PrefetchT1() L2/L3 L2 // PrefetchT2() L2/L3 L2 // PrefetchNta() L1/--/L3 L1* // // * On SKX PrefetchNta() will bring the line into L1 but will evict // from L3 cache. This might result in surprising behavior. // // SNB = Sandy Bridge, SKL = Skylake, SKX = Skylake Xeon. // namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { void PrefetchT0(const void* addr); void PrefetchT1(const void* addr); void PrefetchT2(const void* addr); void PrefetchNta(const void* addr); // Implementation details follow. #if ABSL_HAVE_BUILTIN(__builtin_prefetch) || defined(__GNUC__) #define ABSL_INTERNAL_HAVE_PREFETCH 1 // See __builtin_prefetch: // https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html. // // These functions speculatively load for read only. This is // safe for all currently supported platforms. However, prefetch for // store may have problems depending on the target platform. // inline void PrefetchT0(const void* addr) { // Note: this uses prefetcht0 on Intel. __builtin_prefetch(addr, 0, 3); } inline void PrefetchT1(const void* addr) { // Note: this uses prefetcht1 on Intel. __builtin_prefetch(addr, 0, 2); } inline void PrefetchT2(const void* addr) { // Note: this uses prefetcht2 on Intel. __builtin_prefetch(addr, 0, 1); } inline void PrefetchNta(const void* addr) { // Note: this uses prefetchtnta on Intel. __builtin_prefetch(addr, 0, 0); } #elif defined(ABSL_INTERNAL_HAVE_SSE) #define ABSL_INTERNAL_HAVE_PREFETCH 1 inline void PrefetchT0(const void* addr) { _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T0); } inline void PrefetchT1(const void* addr) { _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T1); } inline void PrefetchT2(const void* addr) { _mm_prefetch(reinterpret_cast(addr), _MM_HINT_T2); } inline void PrefetchNta(const void* addr) { _mm_prefetch(reinterpret_cast(addr), _MM_HINT_NTA); } #else inline void PrefetchT0(const void*) {} inline void PrefetchT1(const void*) {} inline void PrefetchT2(const void*) {} inline void PrefetchNta(const void*) {} #endif } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_PREFETCH_H_ abseil-20220623.1/absl/base/internal/prefetch_test.cc000066400000000000000000000022101430371345100221630ustar00rootroot00000000000000// Copyright 2022 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/prefetch.h" #include "gtest/gtest.h" namespace { int number = 42; TEST(Prefetch, TemporalLocalityNone) { absl::base_internal::PrefetchNta(&number); EXPECT_EQ(number, 42); } TEST(Prefetch, TemporalLocalityLow) { absl::base_internal::PrefetchT2(&number); EXPECT_EQ(number, 42); } TEST(Prefetch, TemporalLocalityMedium) { absl::base_internal::PrefetchT1(&number); EXPECT_EQ(number, 42); } TEST(Prefetch, TemporalLocalityHigh) { absl::base_internal::PrefetchT0(&number); EXPECT_EQ(number, 42); } } // namespace abseil-20220623.1/absl/base/internal/pretty_function.h000066400000000000000000000024661430371345100224370ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ #define ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ // ABSL_PRETTY_FUNCTION // // In C++11, __func__ gives the undecorated name of the current function. That // is, "main", not "int main()". Various compilers give extra macros to get the // decorated function name, including return type and arguments, to // differentiate between overload sets. ABSL_PRETTY_FUNCTION is a portable // version of these macros which forwards to the correct macro on each compiler. #if defined(_MSC_VER) #define ABSL_PRETTY_FUNCTION __FUNCSIG__ #elif defined(__GNUC__) #define ABSL_PRETTY_FUNCTION __PRETTY_FUNCTION__ #else #error "Unsupported compiler" #endif #endif // ABSL_BASE_INTERNAL_PRETTY_FUNCTION_H_ abseil-20220623.1/absl/base/internal/raw_logging.cc000066400000000000000000000175461430371345100216450ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/raw_logging.h" #include #include #include #include #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" #include "absl/base/internal/errno_saver.h" #include "absl/base/log_severity.h" // We know how to perform low-level writes to stderr in POSIX and Windows. For // these platforms, we define the token ABSL_LOW_LEVEL_WRITE_SUPPORTED. // Much of raw_logging.cc becomes a no-op when we can't output messages, // although a FATAL ABSL_RAW_LOG message will still abort the process. // ABSL_HAVE_POSIX_WRITE is defined when the platform provides posix write() // (as from unistd.h) // // This preprocessor token is also defined in raw_io.cc. If you need to copy // this, consider moving both to config.h instead. #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \ defined(__Fuchsia__) || defined(__native_client__) || \ defined(__OpenBSD__) || defined(__EMSCRIPTEN__) || defined(__ASYLO__) #include #define ABSL_HAVE_POSIX_WRITE 1 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 #else #undef ABSL_HAVE_POSIX_WRITE #endif // ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall // syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len); // for low level operations that want to avoid libc. #if (defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \ !defined(__ANDROID__) #include #define ABSL_HAVE_SYSCALL_WRITE 1 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 #else #undef ABSL_HAVE_SYSCALL_WRITE #endif #ifdef _WIN32 #include #define ABSL_HAVE_RAW_IO 1 #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1 #else #undef ABSL_HAVE_RAW_IO #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace raw_logging_internal { namespace { // TODO(gfalcon): We want raw-logging to work on as many platforms as possible. // Explicitly `#error` out when not `ABSL_LOW_LEVEL_WRITE_SUPPORTED`, except for // a selected set of platforms for which we expect not to be able to raw log. #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED constexpr char kTruncated[] = " ... (message truncated)\n"; // sprintf the format to the buffer, adjusting *buf and *size to reflect the // consumed bytes, and return whether the message fit without truncation. If // truncation occurred, if possible leave room in the buffer for the message // kTruncated[]. bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(3, 0); bool VADoRawLog(char** buf, int* size, const char* format, va_list ap) { int n = vsnprintf(*buf, *size, format, ap); bool result = true; if (n < 0 || n > *size) { result = false; if (static_cast(*size) > sizeof(kTruncated)) { n = *size - sizeof(kTruncated); // room for truncation message } else { n = 0; // no room for truncation message } } *size -= n; *buf += n; return result; } #endif // ABSL_LOW_LEVEL_WRITE_SUPPORTED constexpr int kLogBufSize = 3000; // CAVEAT: vsnprintf called from *DoRawLog below has some (exotic) code paths // that invoke malloc() and getenv() that might acquire some locks. // Helper for RawLog below. // *DoRawLog writes to *buf of *size and move them past the written portion. // It returns true iff there was no overflow or error. bool DoRawLog(char** buf, int* size, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(3, 4); bool DoRawLog(char** buf, int* size, const char* format, ...) { va_list ap; va_start(ap, format); int n = vsnprintf(*buf, *size, format, ap); va_end(ap); if (n < 0 || n > *size) return false; *size -= n; *buf += n; return true; } bool DefaultLogFilterAndPrefix(absl::LogSeverity, const char* file, int line, char** buf, int* buf_size) { DoRawLog(buf, buf_size, "[%s : %d] RAW: ", file, line); return true; } ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook log_filter_and_prefix_hook(DefaultLogFilterAndPrefix); ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES absl::base_internal::AtomicHook abort_hook; void RawLogVA(absl::LogSeverity severity, const char* file, int line, const char* format, va_list ap) ABSL_PRINTF_ATTRIBUTE(4, 0); void RawLogVA(absl::LogSeverity severity, const char* file, int line, const char* format, va_list ap) { char buffer[kLogBufSize]; char* buf = buffer; int size = sizeof(buffer); #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED bool enabled = true; #else bool enabled = false; #endif #ifdef ABSL_MIN_LOG_LEVEL if (severity < static_cast(ABSL_MIN_LOG_LEVEL) && severity < absl::LogSeverity::kFatal) { enabled = false; } #endif enabled = log_filter_and_prefix_hook(severity, file, line, &buf, &size); const char* const prefix_end = buf; #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED if (enabled) { bool no_chop = VADoRawLog(&buf, &size, format, ap); if (no_chop) { DoRawLog(&buf, &size, "\n"); } else { DoRawLog(&buf, &size, "%s", kTruncated); } AsyncSignalSafeWriteToStderr(buffer, strlen(buffer)); } #else static_cast(format); static_cast(ap); static_cast(enabled); #endif // Abort the process after logging a FATAL message, even if the output itself // was suppressed. if (severity == absl::LogSeverity::kFatal) { abort_hook(file, line, buffer, prefix_end, buffer + kLogBufSize); abort(); } } // Non-formatting version of RawLog(). // // TODO(gfalcon): When string_view no longer depends on base, change this // interface to take its message as a string_view instead. void DefaultInternalLog(absl::LogSeverity severity, const char* file, int line, const std::string& message) { RawLog(severity, file, line, "%.*s", static_cast(message.size()), message.data()); } } // namespace void AsyncSignalSafeWriteToStderr(const char* s, size_t len) { absl::base_internal::ErrnoSaver errno_saver; #if defined(ABSL_HAVE_SYSCALL_WRITE) // We prefer calling write via `syscall` to minimize the risk of libc doing // something "helpful". syscall(SYS_write, STDERR_FILENO, s, len); #elif defined(ABSL_HAVE_POSIX_WRITE) write(STDERR_FILENO, s, len); #elif defined(ABSL_HAVE_RAW_IO) _write(/* stderr */ 2, s, len); #else // stderr logging unsupported on this platform (void) s; (void) len; #endif } void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) { va_list ap; va_start(ap, format); RawLogVA(severity, file, line, format, ap); va_end(ap); } bool RawLoggingFullySupported() { #ifdef ABSL_LOW_LEVEL_WRITE_SUPPORTED return true; #else // !ABSL_LOW_LEVEL_WRITE_SUPPORTED return false; #endif // !ABSL_LOW_LEVEL_WRITE_SUPPORTED } ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL absl::base_internal::AtomicHook internal_log_function(DefaultInternalLog); void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func) { log_filter_and_prefix_hook.Store(func); } void RegisterAbortHook(AbortHook func) { abort_hook.Store(func); } void RegisterInternalLogFunction(InternalLogFunction func) { internal_log_function.Store(func); } } // namespace raw_logging_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/raw_logging.h000066400000000000000000000225201430371345100214730ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Thread-safe logging routines that do not allocate any memory or // acquire any locks, and can therefore be used by low-level memory // allocation, synchronization, and signal-handling code. #ifndef ABSL_BASE_INTERNAL_RAW_LOGGING_H_ #define ABSL_BASE_INTERNAL_RAW_LOGGING_H_ #include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" #include "absl/base/log_severity.h" #include "absl/base/macros.h" #include "absl/base/optimization.h" #include "absl/base/port.h" // This is similar to LOG(severity) << format..., but // * it is to be used ONLY by low-level modules that can't use normal LOG() // * it is designed to be a low-level logger that does not allocate any // memory and does not need any locks, hence: // * it logs straight and ONLY to STDERR w/o buffering // * it uses an explicit printf-format and arguments list // * it will silently chop off really long message strings // Usage example: // ABSL_RAW_LOG(ERROR, "Failed foo with %i: %s", status, error); // This will print an almost standard log line like this to stderr only: // E0821 211317 file.cc:123] RAW: Failed foo with 22: bad_file #define ABSL_RAW_LOG(severity, ...) \ do { \ constexpr const char* absl_raw_logging_internal_basename = \ ::absl::raw_logging_internal::Basename(__FILE__, \ sizeof(__FILE__) - 1); \ ::absl::raw_logging_internal::RawLog(ABSL_RAW_LOGGING_INTERNAL_##severity, \ absl_raw_logging_internal_basename, \ __LINE__, __VA_ARGS__); \ } while (0) // Similar to CHECK(condition) << message, but for low-level modules: // we use only ABSL_RAW_LOG that does not allocate memory. // We do not want to provide args list here to encourage this usage: // if (!cond) ABSL_RAW_LOG(FATAL, "foo ...", hard_to_compute_args); // so that the args are not computed when not needed. #define ABSL_RAW_CHECK(condition, message) \ do { \ if (ABSL_PREDICT_FALSE(!(condition))) { \ ABSL_RAW_LOG(FATAL, "Check %s failed: %s", #condition, message); \ } \ } while (0) // ABSL_INTERNAL_LOG and ABSL_INTERNAL_CHECK work like the RAW variants above, // except that if the richer log library is linked into the binary, we dispatch // to that instead. This is potentially useful for internal logging and // assertions, where we are using RAW_LOG neither for its async-signal-safety // nor for its non-allocating nature, but rather because raw logging has very // few other dependencies. // // The API is a subset of the above: each macro only takes two arguments. Use // StrCat if you need to build a richer message. #define ABSL_INTERNAL_LOG(severity, message) \ do { \ constexpr const char* absl_raw_logging_internal_filename = __FILE__; \ ::absl::raw_logging_internal::internal_log_function( \ ABSL_RAW_LOGGING_INTERNAL_##severity, \ absl_raw_logging_internal_filename, __LINE__, message); \ if (ABSL_RAW_LOGGING_INTERNAL_##severity == ::absl::LogSeverity::kFatal) \ ABSL_INTERNAL_UNREACHABLE; \ } while (0) #define ABSL_INTERNAL_CHECK(condition, message) \ do { \ if (ABSL_PREDICT_FALSE(!(condition))) { \ std::string death_message = "Check " #condition " failed: "; \ death_message += std::string(message); \ ABSL_INTERNAL_LOG(FATAL, death_message); \ } \ } while (0) #define ABSL_RAW_LOGGING_INTERNAL_INFO ::absl::LogSeverity::kInfo #define ABSL_RAW_LOGGING_INTERNAL_WARNING ::absl::LogSeverity::kWarning #define ABSL_RAW_LOGGING_INTERNAL_ERROR ::absl::LogSeverity::kError #define ABSL_RAW_LOGGING_INTERNAL_FATAL ::absl::LogSeverity::kFatal #define ABSL_RAW_LOGGING_INTERNAL_LEVEL(severity) \ ::absl::NormalizeLogSeverity(severity) namespace absl { ABSL_NAMESPACE_BEGIN namespace raw_logging_internal { // Helper function to implement ABSL_RAW_LOG // Logs format... at "severity" level, reporting it // as called from file:line. // This does not allocate memory or acquire locks. void RawLog(absl::LogSeverity severity, const char* file, int line, const char* format, ...) ABSL_PRINTF_ATTRIBUTE(4, 5); // Writes the provided buffer directly to stderr, in a signal-safe, low-level // manner. void AsyncSignalSafeWriteToStderr(const char* s, size_t len); // compile-time function to get the "base" filename, that is, the part of // a filename after the last "/" or "\" path separator. The search starts at // the end of the string; the second parameter is the length of the string. constexpr const char* Basename(const char* fname, int offset) { return offset == 0 || fname[offset - 1] == '/' || fname[offset - 1] == '\\' ? fname + offset : Basename(fname, offset - 1); } // For testing only. // Returns true if raw logging is fully supported. When it is not // fully supported, no messages will be emitted, but a log at FATAL // severity will cause an abort. // // TODO(gfalcon): Come up with a better name for this method. bool RawLoggingFullySupported(); // Function type for a raw_logging customization hook for suppressing messages // by severity, and for writing custom prefixes on non-suppressed messages. // // The installed hook is called for every raw log invocation. The message will // be logged to stderr only if the hook returns true. FATAL errors will cause // the process to abort, even if writing to stderr is suppressed. The hook is // also provided with an output buffer, where it can write a custom log message // prefix. // // The raw_logging system does not allocate memory or grab locks. User-provided // hooks must avoid these operations, and must not throw exceptions. // // 'severity' is the severity level of the message being written. // 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro // was located. // 'buf' and 'buf_size' are pointers to the buffer and buffer size. If the // hook writes a prefix, it must increment *buf and decrement *buf_size // accordingly. using LogFilterAndPrefixHook = bool (*)(absl::LogSeverity severity, const char* file, int line, char** buf, int* buf_size); // Function type for a raw_logging customization hook called to abort a process // when a FATAL message is logged. If the provided AbortHook() returns, the // logging system will call abort(). // // 'file' and 'line' are the file and line number where the ABSL_RAW_LOG macro // was located. // The NUL-terminated logged message lives in the buffer between 'buf_start' // and 'buf_end'. 'prefix_end' points to the first non-prefix character of the // buffer (as written by the LogFilterAndPrefixHook.) // // The lifetime of the filename and message buffers will not end while the // process remains alive. using AbortHook = void (*)(const char* file, int line, const char* buf_start, const char* prefix_end, const char* buf_end); // Internal logging function for ABSL_INTERNAL_LOG to dispatch to. // // TODO(gfalcon): When string_view no longer depends on base, change this // interface to take its message as a string_view instead. using InternalLogFunction = void (*)(absl::LogSeverity severity, const char* file, int line, const std::string& message); ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES ABSL_DLL extern base_internal::AtomicHook< InternalLogFunction> internal_log_function; // Registers hooks of the above types. Only a single hook of each type may be // registered. It is an error to call these functions multiple times with // different input arguments. // // These functions are safe to call at any point during initialization; they do // not block or malloc, and are async-signal safe. void RegisterLogFilterAndPrefixHook(LogFilterAndPrefixHook func); void RegisterAbortHook(AbortHook func); void RegisterInternalLogFunction(InternalLogFunction func); } // namespace raw_logging_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_RAW_LOGGING_H_ abseil-20220623.1/absl/base/internal/scheduling_mode.h000066400000000000000000000045521430371345100223320ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Core interfaces and definitions used by by low-level interfaces such as // SpinLock. #ifndef ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ #define ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // Used to describe how a thread may be scheduled. Typically associated with // the declaration of a resource supporting synchronized access. // // SCHEDULE_COOPERATIVE_AND_KERNEL: // Specifies that when waiting, a cooperative thread (e.g. a Fiber) may // reschedule (using base::scheduling semantics); allowing other cooperative // threads to proceed. // // SCHEDULE_KERNEL_ONLY: (Also described as "non-cooperative") // Specifies that no cooperative scheduling semantics may be used, even if the // current thread is itself cooperatively scheduled. This means that // cooperative threads will NOT allow other cooperative threads to execute in // their place while waiting for a resource of this type. Host operating system // semantics (e.g. a futex) may still be used. // // When optional, clients should strongly prefer SCHEDULE_COOPERATIVE_AND_KERNEL // by default. SCHEDULE_KERNEL_ONLY should only be used for resources on which // base::scheduling (e.g. the implementation of a Scheduler) may depend. // // NOTE: Cooperative resources may not be nested below non-cooperative ones. // This means that it is invalid to to acquire a SCHEDULE_COOPERATIVE_AND_KERNEL // resource if a SCHEDULE_KERNEL_ONLY resource is already held. enum SchedulingMode { SCHEDULE_KERNEL_ONLY = 0, // Allow scheduling only the host OS. SCHEDULE_COOPERATIVE_AND_KERNEL, // Also allow cooperative scheduling. }; } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_SCHEDULING_MODE_H_ abseil-20220623.1/absl/base/internal/scoped_set_env.cc000066400000000000000000000037361430371345100223420ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/scoped_set_env.h" #ifdef _WIN32 #include #endif #include #include "absl/base/internal/raw_logging.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { namespace { #ifdef _WIN32 const int kMaxEnvVarValueSize = 1024; #endif void SetEnvVar(const char* name, const char* value) { #ifdef _WIN32 SetEnvironmentVariableA(name, value); #else if (value == nullptr) { ::unsetenv(name); } else { ::setenv(name, value, 1); } #endif } } // namespace ScopedSetEnv::ScopedSetEnv(const char* var_name, const char* new_value) : var_name_(var_name), was_unset_(false) { #ifdef _WIN32 char buf[kMaxEnvVarValueSize]; auto get_res = GetEnvironmentVariableA(var_name_.c_str(), buf, sizeof(buf)); ABSL_INTERNAL_CHECK(get_res < sizeof(buf), "value exceeds buffer size"); if (get_res == 0) { was_unset_ = (GetLastError() == ERROR_ENVVAR_NOT_FOUND); } else { old_value_.assign(buf, get_res); } SetEnvironmentVariableA(var_name_.c_str(), new_value); #else const char* val = ::getenv(var_name_.c_str()); if (val == nullptr) { was_unset_ = true; } else { old_value_ = val; } #endif SetEnvVar(var_name_.c_str(), new_value); } ScopedSetEnv::~ScopedSetEnv() { SetEnvVar(var_name_.c_str(), was_unset_ ? nullptr : old_value_.c_str()); } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/scoped_set_env.h000066400000000000000000000022431430371345100221740ustar00rootroot00000000000000// // Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ #define ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { class ScopedSetEnv { public: ScopedSetEnv(const char* var_name, const char* new_value); ~ScopedSetEnv(); private: std::string var_name_; std::string old_value_; // True if the environment variable was initially not set. bool was_unset_; }; } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_SCOPED_SET_ENV_H_ abseil-20220623.1/absl/base/internal/scoped_set_env_test.cc000066400000000000000000000047771430371345100234070ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifdef _WIN32 #include #endif #include "gtest/gtest.h" #include "absl/base/internal/scoped_set_env.h" namespace { using absl::base_internal::ScopedSetEnv; std::string GetEnvVar(const char* name) { #ifdef _WIN32 char buf[1024]; auto get_res = GetEnvironmentVariableA(name, buf, sizeof(buf)); if (get_res >= sizeof(buf)) { return "TOO_BIG"; } if (get_res == 0) { return "UNSET"; } return std::string(buf, get_res); #else const char* val = ::getenv(name); if (val == nullptr) { return "UNSET"; } return val; #endif } TEST(ScopedSetEnvTest, SetNonExistingVarToString) { EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); { ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); } EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); } TEST(ScopedSetEnvTest, SetNonExistingVarToNull) { EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); { ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr); EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); } EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); } TEST(ScopedSetEnvTest, SetExistingVarToString) { ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); { ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "new_value"); EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "new_value"); } EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); } TEST(ScopedSetEnvTest, SetExistingVarToNull) { ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", "value"); EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); { ScopedSetEnv scoped_set("SCOPED_SET_ENV_TEST_VAR", nullptr); EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "UNSET"); } EXPECT_EQ(GetEnvVar("SCOPED_SET_ENV_TEST_VAR"), "value"); } } // namespace abseil-20220623.1/absl/base/internal/spinlock.cc000066400000000000000000000225131430371345100211560ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/spinlock.h" #include #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/atomic_hook.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/spinlock_wait.h" #include "absl/base/internal/sysinfo.h" /* For NumCPUs() */ #include "absl/base/call_once.h" // Description of lock-word: // 31..00: [............................3][2][1][0] // // [0]: kSpinLockHeld // [1]: kSpinLockCooperative // [2]: kSpinLockDisabledScheduling // [31..3]: ONLY kSpinLockSleeper OR // Wait time in cycles >> PROFILE_TIMESTAMP_SHIFT // // Detailed descriptions: // // Bit [0]: The lock is considered held iff kSpinLockHeld is set. // // Bit [1]: Eligible waiters (e.g. Fibers) may co-operatively reschedule when // contended iff kSpinLockCooperative is set. // // Bit [2]: This bit is exclusive from bit [1]. It is used only by a // non-cooperative lock. When set, indicates that scheduling was // successfully disabled when the lock was acquired. May be unset, // even if non-cooperative, if a ThreadIdentity did not yet exist at // time of acquisition. // // Bit [3]: If this is the only upper bit ([31..3]) set then this lock was // acquired without contention, however, at least one waiter exists. // // Otherwise, bits [31..3] represent the time spent by the current lock // holder to acquire the lock. There may be outstanding waiter(s). namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { ABSL_INTERNAL_ATOMIC_HOOK_ATTRIBUTES static base_internal::AtomicHook submit_profile_data; void RegisterSpinLockProfiler(void (*fn)(const void *contendedlock, int64_t wait_cycles)) { submit_profile_data.Store(fn); } #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL // Static member variable definitions. constexpr uint32_t SpinLock::kSpinLockHeld; constexpr uint32_t SpinLock::kSpinLockCooperative; constexpr uint32_t SpinLock::kSpinLockDisabledScheduling; constexpr uint32_t SpinLock::kSpinLockSleeper; constexpr uint32_t SpinLock::kWaitTimeMask; #endif // Uncommon constructors. SpinLock::SpinLock(base_internal::SchedulingMode mode) : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) { ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); } // Monitor the lock to see if its value changes within some time period // (adaptive_spin_count loop iterations). The last value read from the lock // is returned from the method. uint32_t SpinLock::SpinLoop() { // We are already in the slow path of SpinLock, initialize the // adaptive_spin_count here. ABSL_CONST_INIT static absl::once_flag init_adaptive_spin_count; ABSL_CONST_INIT static int adaptive_spin_count = 0; base_internal::LowLevelCallOnce(&init_adaptive_spin_count, []() { adaptive_spin_count = base_internal::NumCPUs() > 1 ? 1000 : 1; }); int c = adaptive_spin_count; uint32_t lock_value; do { lock_value = lockword_.load(std::memory_order_relaxed); } while ((lock_value & kSpinLockHeld) != 0 && --c > 0); return lock_value; } void SpinLock::SlowLock() { uint32_t lock_value = SpinLoop(); lock_value = TryLockInternal(lock_value, 0); if ((lock_value & kSpinLockHeld) == 0) { return; } base_internal::SchedulingMode scheduling_mode; if ((lock_value & kSpinLockCooperative) != 0) { scheduling_mode = base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; } else { scheduling_mode = base_internal::SCHEDULE_KERNEL_ONLY; } // The lock was not obtained initially, so this thread needs to wait for // it. Record the current timestamp in the local variable wait_start_time // so the total wait time can be stored in the lockword once this thread // obtains the lock. int64_t wait_start_time = CycleClock::Now(); uint32_t wait_cycles = 0; int lock_wait_call_count = 0; while ((lock_value & kSpinLockHeld) != 0) { // If the lock is currently held, but not marked as having a sleeper, mark // it as having a sleeper. if ((lock_value & kWaitTimeMask) == 0) { // Here, just "mark" that the thread is going to sleep. Don't store the // lock wait time in the lock -- the lock word stores the amount of time // that the current holder waited before acquiring the lock, not the wait // time of any thread currently waiting to acquire it. if (lockword_.compare_exchange_strong( lock_value, lock_value | kSpinLockSleeper, std::memory_order_relaxed, std::memory_order_relaxed)) { // Successfully transitioned to kSpinLockSleeper. Pass // kSpinLockSleeper to the SpinLockWait routine to properly indicate // the last lock_value observed. lock_value |= kSpinLockSleeper; } else if ((lock_value & kSpinLockHeld) == 0) { // Lock is free again, so try and acquire it before sleeping. The // new lock state will be the number of cycles this thread waited if // this thread obtains the lock. lock_value = TryLockInternal(lock_value, wait_cycles); continue; // Skip the delay at the end of the loop. } else if ((lock_value & kWaitTimeMask) == 0) { // The lock is still held, without a waiter being marked, but something // else about the lock word changed, causing our CAS to fail. For // example, a new lock holder may have acquired the lock with // kSpinLockDisabledScheduling set, whereas the previous holder had not // set that flag. In this case, attempt again to mark ourselves as a // waiter. continue; } } // SpinLockDelay() calls into fiber scheduler, we need to see // synchronization there to avoid false positives. ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); // Wait for an OS specific delay. base_internal::SpinLockDelay(&lockword_, lock_value, ++lock_wait_call_count, scheduling_mode); ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); // Spin again after returning from the wait routine to give this thread // some chance of obtaining the lock. lock_value = SpinLoop(); wait_cycles = EncodeWaitCycles(wait_start_time, CycleClock::Now()); lock_value = TryLockInternal(lock_value, wait_cycles); } } void SpinLock::SlowUnlock(uint32_t lock_value) { base_internal::SpinLockWake(&lockword_, false); // wake waiter if necessary // If our acquisition was contended, collect contentionz profile info. We // reserve a unitary wait time to represent that a waiter exists without our // own acquisition having been contended. if ((lock_value & kWaitTimeMask) != kSpinLockSleeper) { const uint64_t wait_cycles = DecodeWaitCycles(lock_value); ABSL_TSAN_MUTEX_PRE_DIVERT(this, 0); submit_profile_data(this, wait_cycles); ABSL_TSAN_MUTEX_POST_DIVERT(this, 0); } } // We use the upper 29 bits of the lock word to store the time spent waiting to // acquire this lock. This is reported by contentionz profiling. Since the // lower bits of the cycle counter wrap very quickly on high-frequency // processors we divide to reduce the granularity to 2^kProfileTimestampShift // sized units. On a 4Ghz machine this will lose track of wait times greater // than (2^29/4 Ghz)*128 =~ 17.2 seconds. Such waits should be extremely rare. static constexpr int kProfileTimestampShift = 7; // We currently reserve the lower 3 bits. static constexpr int kLockwordReservedShift = 3; uint32_t SpinLock::EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time) { static const int64_t kMaxWaitTime = std::numeric_limits::max() >> kLockwordReservedShift; int64_t scaled_wait_time = (wait_end_time - wait_start_time) >> kProfileTimestampShift; // Return a representation of the time spent waiting that can be stored in // the lock word's upper bits. uint32_t clamped = static_cast( std::min(scaled_wait_time, kMaxWaitTime) << kLockwordReservedShift); if (clamped == 0) { return kSpinLockSleeper; // Just wake waiters, but don't record contention. } // Bump up value if necessary to avoid returning kSpinLockSleeper. const uint32_t kMinWaitTime = kSpinLockSleeper + (1 << kLockwordReservedShift); if (clamped == kSpinLockSleeper) { return kMinWaitTime; } return clamped; } uint64_t SpinLock::DecodeWaitCycles(uint32_t lock_value) { // Cast to uint32_t first to ensure bits [63:32] are cleared. const uint64_t scaled_wait_time = static_cast(lock_value & kWaitTimeMask); return scaled_wait_time << (kProfileTimestampShift - kLockwordReservedShift); } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/spinlock.h000066400000000000000000000231151430371345100210170ustar00rootroot00000000000000// // Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Most users requiring mutual exclusion should use Mutex. // SpinLock is provided for use in two situations: // - for use by Abseil internal code that Mutex itself depends on // - for async signal safety (see below) // SpinLock is async signal safe. If a spinlock is used within a signal // handler, all code that acquires the lock must ensure that the signal cannot // arrive while they are holding the lock. Typically, this is done by blocking // the signal. // // Threads waiting on a SpinLock may be woken in an arbitrary order. #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_ #define ABSL_BASE_INTERNAL_SPINLOCK_H_ #include #include #include #include "absl/base/attributes.h" #include "absl/base/const_init.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/low_level_scheduling.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/tsan_mutex_interface.h" #include "absl/base/macros.h" #include "absl/base/port.h" #include "absl/base/thread_annotations.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { class ABSL_LOCKABLE SpinLock { public: SpinLock() : lockword_(kSpinLockCooperative) { ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static); } // Constructors that allow non-cooperative spinlocks to be created for use // inside thread schedulers. Normal clients should not use these. explicit SpinLock(base_internal::SchedulingMode mode); // Constructor for global SpinLock instances. See absl/base/const_init.h. constexpr SpinLock(absl::ConstInitType, base_internal::SchedulingMode mode) : lockword_(IsCooperative(mode) ? kSpinLockCooperative : 0) {} // For global SpinLock instances prefer trivial destructor when possible. // Default but non-trivial destructor in some build configurations causes an // extra static initializer. #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); } #else ~SpinLock() = default; #endif // Acquire this SpinLock. inline void Lock() ABSL_EXCLUSIVE_LOCK_FUNCTION() { ABSL_TSAN_MUTEX_PRE_LOCK(this, 0); if (!TryLockImpl()) { SlowLock(); } ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0); } // Try to acquire this SpinLock without blocking and return true if the // acquisition was successful. If the lock was not acquired, false is // returned. If this SpinLock is free at the time of the call, TryLock // will return true with high probability. inline bool TryLock() ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(true) { ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock); bool res = TryLockImpl(); ABSL_TSAN_MUTEX_POST_LOCK( this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed), 0); return res; } // Release this SpinLock, which must be held by the calling thread. inline void Unlock() ABSL_UNLOCK_FUNCTION() { ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0); uint32_t lock_value = lockword_.load(std::memory_order_relaxed); lock_value = lockword_.exchange(lock_value & kSpinLockCooperative, std::memory_order_release); if ((lock_value & kSpinLockDisabledScheduling) != 0) { base_internal::SchedulingGuard::EnableRescheduling(true); } if ((lock_value & kWaitTimeMask) != 0) { // Collect contentionz profile info, and speed the wakeup of any waiter. // The wait_cycles value indicates how long this thread spent waiting // for the lock. SlowUnlock(lock_value); } ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0); } // Determine if the lock is held. When the lock is held by the invoking // thread, true will always be returned. Intended to be used as // CHECK(lock.IsHeld()). inline bool IsHeld() const { return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0; } // Return immediately if this thread holds the SpinLock exclusively. // Otherwise, report an error by crashing with a diagnostic. inline void AssertHeld() const ABSL_ASSERT_EXCLUSIVE_LOCK() { if (!IsHeld()) { ABSL_RAW_LOG(FATAL, "thread should hold the lock on SpinLock"); } } protected: // These should not be exported except for testing. // Store number of cycles between wait_start_time and wait_end_time in a // lock value. static uint32_t EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time); // Extract number of wait cycles in a lock value. static uint64_t DecodeWaitCycles(uint32_t lock_value); // Provide access to protected method above. Use for testing only. friend struct SpinLockTest; private: // lockword_ is used to store the following: // // bit[0] encodes whether a lock is being held. // bit[1] encodes whether a lock uses cooperative scheduling. // bit[2] encodes whether the current lock holder disabled scheduling when // acquiring the lock. Only set when kSpinLockHeld is also set. // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int. // This is set by the lock holder to indicate how long it waited on // the lock before eventually acquiring it. The number of cycles is // encoded as a 29-bit unsigned int, or in the case that the current // holder did not wait but another waiter is queued, the LSB // (kSpinLockSleeper) is set. The implementation does not explicitly // track the number of queued waiters beyond this. It must always be // assumed that waiters may exist if the current holder was required to // queue. // // Invariant: if the lock is not held, the value is either 0 or // kSpinLockCooperative. static constexpr uint32_t kSpinLockHeld = 1; static constexpr uint32_t kSpinLockCooperative = 2; static constexpr uint32_t kSpinLockDisabledScheduling = 4; static constexpr uint32_t kSpinLockSleeper = 8; // Includes kSpinLockSleeper. static constexpr uint32_t kWaitTimeMask = ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling); // Returns true if the provided scheduling mode is cooperative. static constexpr bool IsCooperative( base_internal::SchedulingMode scheduling_mode) { return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL; } uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles); void SlowLock() ABSL_ATTRIBUTE_COLD; void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD; uint32_t SpinLoop(); inline bool TryLockImpl() { uint32_t lock_value = lockword_.load(std::memory_order_relaxed); return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0; } std::atomic lockword_; SpinLock(const SpinLock&) = delete; SpinLock& operator=(const SpinLock&) = delete; }; // Corresponding locker object that arranges to acquire a spinlock for // the duration of a C++ scope. class ABSL_SCOPED_LOCKABLE SpinLockHolder { public: inline explicit SpinLockHolder(SpinLock* l) ABSL_EXCLUSIVE_LOCK_FUNCTION(l) : lock_(l) { l->Lock(); } inline ~SpinLockHolder() ABSL_UNLOCK_FUNCTION() { lock_->Unlock(); } SpinLockHolder(const SpinLockHolder&) = delete; SpinLockHolder& operator=(const SpinLockHolder&) = delete; private: SpinLock* lock_; }; // Register a hook for profiling support. // // The function pointer registered here will be called whenever a spinlock is // contended. The callback is given an opaque handle to the contended spinlock // and the number of wait cycles. This is thread-safe, but only a single // profiler can be registered. It is an error to call this function multiple // times with different arguments. void RegisterSpinLockProfiler(void (*fn)(const void* lock, int64_t wait_cycles)); //------------------------------------------------------------------------------ // Public interface ends here. //------------------------------------------------------------------------------ // If (result & kSpinLockHeld) == 0, then *this was successfully locked. // Otherwise, returns last observed value for lockword_. inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value, uint32_t wait_cycles) { if ((lock_value & kSpinLockHeld) != 0) { return lock_value; } uint32_t sched_disabled_bit = 0; if ((lock_value & kSpinLockCooperative) == 0) { // For non-cooperative locks we must make sure we mark ourselves as // non-reschedulable before we attempt to CompareAndSwap. if (base_internal::SchedulingGuard::DisableRescheduling()) { sched_disabled_bit = kSpinLockDisabledScheduling; } } if (!lockword_.compare_exchange_strong( lock_value, kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit, std::memory_order_acquire, std::memory_order_relaxed)) { base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0); } return lock_value; } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_SPINLOCK_H_ abseil-20220623.1/absl/base/internal/spinlock_akaros.inc000066400000000000000000000025171430371345100227040ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // This file is an Akaros-specific part of spinlock_wait.cc #include #include "absl/base/internal/scheduling_mode.h" extern "C" { ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic* /* lock_word */, uint32_t /* value */, int /* loop */, absl::base_internal::SchedulingMode /* mode */) { // In Akaros, one must take care not to call anything that could cause a // malloc(), a blocking system call, or a uthread_yield() while holding a // spinlock. Our callers assume will not call into libraries or other // arbitrary code. } ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" abseil-20220623.1/absl/base/internal/spinlock_benchmark.cc000066400000000000000000000034321430371345100231670ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // See also //absl/synchronization:mutex_benchmark for a comparison of SpinLock // and Mutex performance under varying levels of contention. #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/spinlock.h" #include "absl/synchronization/internal/create_thread_identity.h" #include "benchmark/benchmark.h" namespace { template static void BM_SpinLock(benchmark::State& state) { // Ensure a ThreadIdentity is installed. ABSL_INTERNAL_CHECK( absl::synchronization_internal::GetOrCreateCurrentThreadIdentity() != nullptr, "GetOrCreateCurrentThreadIdentity() failed"); static auto* spinlock = new absl::base_internal::SpinLock(scheduling_mode); for (auto _ : state) { absl::base_internal::SpinLockHolder holder(spinlock); } } BENCHMARK_TEMPLATE(BM_SpinLock, absl::base_internal::SCHEDULE_KERNEL_ONLY) ->UseRealTime() ->Threads(1) ->ThreadPerCpu(); BENCHMARK_TEMPLATE(BM_SpinLock, absl::base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL) ->UseRealTime() ->Threads(1) ->ThreadPerCpu(); } // namespace abseil-20220623.1/absl/base/internal/spinlock_linux.inc000066400000000000000000000045231430371345100225620ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // This file is a Linux-specific part of spinlock_wait.cc #include #include #include #include #include #include #include #include "absl/base/attributes.h" #include "absl/base/internal/errno_saver.h" // The SpinLock lockword is `std::atomic`. Here we assert that // `std::atomic` is bitwise equivalent of the `int` expected // by SYS_futex. We also assume that reads/writes done to the lockword // by SYS_futex have rational semantics with regard to the // std::atomic<> API. C++ provides no guarantees of these assumptions, // but they are believed to hold in practice. static_assert(sizeof(std::atomic) == sizeof(int), "SpinLock lockword has the wrong size for a futex"); // Some Android headers are missing these definitions even though they // support these futex operations. #ifdef __BIONIC__ #ifndef SYS_futex #define SYS_futex __NR_futex #endif #ifndef FUTEX_PRIVATE_FLAG #define FUTEX_PRIVATE_FLAG 128 #endif #endif #if defined(__NR_futex_time64) && !defined(SYS_futex_time64) #define SYS_futex_time64 __NR_futex_time64 #endif #if defined(SYS_futex_time64) && !defined(SYS_futex) #define SYS_futex SYS_futex_time64 #endif extern "C" { ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic *w, uint32_t value, int, absl::base_internal::SchedulingMode) { absl::base_internal::ErrnoSaver errno_saver; syscall(SYS_futex, w, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, value, nullptr); } ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( std::atomic *w, bool all) { syscall(SYS_futex, w, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, all ? INT_MAX : 1, 0); } } // extern "C" abseil-20220623.1/absl/base/internal/spinlock_posix.inc000066400000000000000000000027141430371345100225650ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // This file is a Posix-specific part of spinlock_wait.cc #include #include #include #include "absl/base/internal/errno_saver.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/port.h" extern "C" { ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic* /* lock_word */, uint32_t /* value */, int loop, absl::base_internal::SchedulingMode /* mode */) { absl::base_internal::ErrnoSaver errno_saver; if (loop == 0) { } else if (loop == 1) { sched_yield(); } else { struct timespec tm; tm.tv_sec = 0; tm.tv_nsec = absl::base_internal::SpinLockSuggestedDelayNS(loop); nanosleep(&tm, nullptr); } } ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" abseil-20220623.1/absl/base/internal/spinlock_wait.cc000066400000000000000000000054121430371345100222010ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // The OS-specific header included below must provide two calls: // AbslInternalSpinLockDelay() and AbslInternalSpinLockWake(). // See spinlock_wait.h for the specs. #include #include #include "absl/base/internal/spinlock_wait.h" #if defined(_WIN32) #include "absl/base/internal/spinlock_win32.inc" #elif defined(__linux__) #include "absl/base/internal/spinlock_linux.inc" #elif defined(__akaros__) #include "absl/base/internal/spinlock_akaros.inc" #else #include "absl/base/internal/spinlock_posix.inc" #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // See spinlock_wait.h for spec. uint32_t SpinLockWait(std::atomic *w, int n, const SpinLockWaitTransition trans[], base_internal::SchedulingMode scheduling_mode) { int loop = 0; for (;;) { uint32_t v = w->load(std::memory_order_acquire); int i; for (i = 0; i != n && v != trans[i].from; i++) { } if (i == n) { SpinLockDelay(w, v, ++loop, scheduling_mode); // no matching transition } else if (trans[i].to == v || // null transition w->compare_exchange_strong(v, trans[i].to, std::memory_order_acquire, std::memory_order_relaxed)) { if (trans[i].done) return v; } } } static std::atomic delay_rand; // Return a suggested delay in nanoseconds for iteration number "loop" int SpinLockSuggestedDelayNS(int loop) { // Weak pseudo-random number generator to get some spread between threads // when many are spinning. uint64_t r = delay_rand.load(std::memory_order_relaxed); r = 0x5deece66dLL * r + 0xb; // numbers from nrand48() delay_rand.store(r, std::memory_order_relaxed); if (loop < 0 || loop > 32) { // limit loop to 0..32 loop = 32; } const int kMinDelay = 128 << 10; // 128us // Double delay every 8 iterations, up to 16x (2ms). int delay = kMinDelay << (loop / 8); // Randomize in delay..2*delay range, for resulting 128us..4ms range. return delay | ((delay - 1) & static_cast(r)); } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/spinlock_wait.h000066400000000000000000000074121430371345100220450ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ #define ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ // Operations to make atomic transitions on a word, and to allow // waiting for those transitions to become possible. #include #include #include "absl/base/internal/scheduling_mode.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // SpinLockWait() waits until it can perform one of several transitions from // "from" to "to". It returns when it performs a transition where done==true. struct SpinLockWaitTransition { uint32_t from; uint32_t to; bool done; }; // Wait until *w can transition from trans[i].from to trans[i].to for some i // satisfying 0<=i *w, int n, const SpinLockWaitTransition trans[], SchedulingMode scheduling_mode); // If possible, wake some thread that has called SpinLockDelay(w, ...). If `all` // is true, wake all such threads. On some systems, this may be a no-op; on // those systems, threads calling SpinLockDelay() will always wake eventually // even if SpinLockWake() is never called. void SpinLockWake(std::atomic *w, bool all); // Wait for an appropriate spin delay on iteration "loop" of a // spin loop on location *w, whose previously observed value was "value". // SpinLockDelay() may do nothing, may yield the CPU, may sleep a clock tick, // or may wait for a call to SpinLockWake(w). void SpinLockDelay(std::atomic *w, uint32_t value, int loop, base_internal::SchedulingMode scheduling_mode); // Helper used by AbslInternalSpinLockDelay. // Returns a suggested delay in nanoseconds for iteration number "loop". int SpinLockSuggestedDelayNS(int loop); } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl // In some build configurations we pass --detect-odr-violations to the // gold linker. This causes it to flag weak symbol overrides as ODR // violations. Because ODR only applies to C++ and not C, // --detect-odr-violations ignores symbols not mangled with C++ names. // By changing our extension points to be extern "C", we dodge this // check. extern "C" { void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(std::atomic *w, bool all); void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode); } inline void absl::base_internal::SpinLockWake(std::atomic *w, bool all) { ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)(w, all); } inline void absl::base_internal::SpinLockDelay( std::atomic *w, uint32_t value, int loop, absl::base_internal::SchedulingMode scheduling_mode) { ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay) (w, value, loop, scheduling_mode); } #endif // ABSL_BASE_INTERNAL_SPINLOCK_WAIT_H_ abseil-20220623.1/absl/base/internal/spinlock_win32.inc000066400000000000000000000023231430371345100223610ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // This file is a Win32-specific part of spinlock_wait.cc #include #include #include "absl/base/internal/scheduling_mode.h" extern "C" { void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockDelay)( std::atomic* /* lock_word */, uint32_t /* value */, int loop, absl::base_internal::SchedulingMode /* mode */) { if (loop == 0) { } else if (loop == 1) { Sleep(0); } else { Sleep(absl::base_internal::SpinLockSuggestedDelayNS(loop) / 1000000); } } void ABSL_INTERNAL_C_SYMBOL(AbslInternalSpinLockWake)( std::atomic* /* lock_word */, bool /* all */) {} } // extern "C" abseil-20220623.1/absl/base/internal/strerror.cc000066400000000000000000000050721430371345100212170ustar00rootroot00000000000000// Copyright 2020 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/strerror.h" #include #include #include #include #include #include #include #include "absl/base/internal/errno_saver.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { namespace { const char* StrErrorAdaptor(int errnum, char* buf, size_t buflen) { #if defined(_WIN32) int rc = strerror_s(buf, buflen, errnum); buf[buflen - 1] = '\0'; // guarantee NUL termination if (rc == 0 && strncmp(buf, "Unknown error", buflen) == 0) *buf = '\0'; return buf; #else // The type of `ret` is platform-specific; both of these branches must compile // either way but only one will execute on any given platform: auto ret = strerror_r(errnum, buf, buflen); if (std::is_same::value) { // XSI `strerror_r`; `ret` is `int`: if (ret) *buf = '\0'; return buf; } else { // GNU `strerror_r`; `ret` is `char *`: return reinterpret_cast(ret); } #endif } std::string StrErrorInternal(int errnum) { char buf[100]; const char* str = StrErrorAdaptor(errnum, buf, sizeof buf); if (*str == '\0') { snprintf(buf, sizeof buf, "Unknown error %d", errnum); str = buf; } return str; } // kSysNerr is the number of errors from a recent glibc. `StrError()` falls back // to `StrErrorAdaptor()` if the value is larger than this. constexpr int kSysNerr = 135; std::array* NewStrErrorTable() { auto* table = new std::array; for (int i = 0; i < static_cast(table->size()); ++i) { (*table)[i] = StrErrorInternal(i); } return table; } } // namespace std::string StrError(int errnum) { absl::base_internal::ErrnoSaver errno_saver; static const auto* table = NewStrErrorTable(); if (errnum >= 0 && errnum < static_cast(table->size())) { return (*table)[errnum]; } return StrErrorInternal(errnum); } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/strerror.h000066400000000000000000000025521430371345100210610ustar00rootroot00000000000000// Copyright 2020 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_INTERNAL_STRERROR_H_ #define ABSL_BASE_INTERNAL_STRERROR_H_ #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // A portable and thread-safe alternative to C89's `strerror`. // // The C89 specification of `strerror` is not suitable for use in a // multi-threaded application as the returned string may be changed by calls to // `strerror` from another thread. The many non-stdlib alternatives differ // enough in their names, availability, and semantics to justify this wrapper // around them. `errno` will not be modified by a call to `absl::StrError`. std::string StrError(int errnum); } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_STRERROR_H_ abseil-20220623.1/absl/base/internal/strerror_benchmark.cc000066400000000000000000000016451430371345100232330ustar00rootroot00000000000000// Copyright 2020 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include "absl/base/internal/strerror.h" #include "benchmark/benchmark.h" namespace { void BM_AbslStrError(benchmark::State& state) { for (auto _ : state) { benchmark::DoNotOptimize(absl::base_internal::StrError(ERANGE)); } } BENCHMARK(BM_AbslStrError); } // namespace abseil-20220623.1/absl/base/internal/strerror_test.cc000066400000000000000000000053161430371345100222570ustar00rootroot00000000000000// Copyright 2020 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/strerror.h" #include #include #include #include #include #include // NOLINT(build/c++11) #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/strings/match.h" namespace { using ::testing::AnyOf; using ::testing::Eq; TEST(StrErrorTest, ValidErrorCode) { errno = ERANGE; EXPECT_THAT(absl::base_internal::StrError(EDOM), Eq(strerror(EDOM))); EXPECT_THAT(errno, Eq(ERANGE)); } TEST(StrErrorTest, InvalidErrorCode) { errno = ERANGE; EXPECT_THAT(absl::base_internal::StrError(-1), AnyOf(Eq("No error information"), Eq("Unknown error -1"))); EXPECT_THAT(errno, Eq(ERANGE)); } TEST(StrErrorTest, MultipleThreads) { // In this test, we will start up 2 threads and have each one call // StrError 1000 times, each time with a different errnum. We // expect that StrError(errnum) will return a string equal to the // one returned by strerror(errnum), if the code is known. Since // strerror is known to be thread-hostile, collect all the expected // strings up front. const int kNumCodes = 1000; std::vector expected_strings(kNumCodes); for (int i = 0; i < kNumCodes; ++i) { expected_strings[i] = strerror(i); } std::atomic_int counter(0); auto thread_fun = [&]() { for (int i = 0; i < kNumCodes; ++i) { ++counter; errno = ERANGE; const std::string value = absl::base_internal::StrError(i); // EXPECT_* could change errno. Stash it first. int check_err = errno; EXPECT_THAT(check_err, Eq(ERANGE)); // Only the GNU implementation is guaranteed to provide the // string "Unknown error nnn". POSIX doesn't say anything. if (!absl::StartsWith(value, "Unknown error ")) { EXPECT_THAT(value, Eq(expected_strings[i])); } } }; const int kNumThreads = 100; std::vector threads; for (int i = 0; i < kNumThreads; ++i) { threads.push_back(std::thread(thread_fun)); } for (auto& thread : threads) { thread.join(); } EXPECT_THAT(counter, Eq(kNumThreads * kNumCodes)); } } // namespace abseil-20220623.1/absl/base/internal/sysinfo.cc000066400000000000000000000370021430371345100210250ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/sysinfo.h" #include "absl/base/attributes.h" #ifdef _WIN32 #include #else #include #include #include #include #include #endif #ifdef __linux__ #include #endif #if defined(__APPLE__) || defined(__FreeBSD__) #include #endif #if defined(__myriad2__) #include #endif #include #include #include #include #include #include #include #include // NOLINT(build/c++11) #include #include #include "absl/base/call_once.h" #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" #include "absl/base/internal/unscaledcycleclock.h" #include "absl/base/thread_annotations.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { namespace { #if defined(_WIN32) // Returns number of bits set in `bitMask` DWORD Win32CountSetBits(ULONG_PTR bitMask) { for (DWORD bitSetCount = 0; ; ++bitSetCount) { if (bitMask == 0) return bitSetCount; bitMask &= bitMask - 1; } } // Returns the number of logical CPUs using GetLogicalProcessorInformation(), or // 0 if the number of processors is not available or can not be computed. // https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation int Win32NumCPUs() { #pragma comment(lib, "kernel32.lib") using Info = SYSTEM_LOGICAL_PROCESSOR_INFORMATION; DWORD info_size = sizeof(Info); Info* info(static_cast(malloc(info_size))); if (info == nullptr) return 0; bool success = GetLogicalProcessorInformation(info, &info_size); if (!success && GetLastError() == ERROR_INSUFFICIENT_BUFFER) { free(info); info = static_cast(malloc(info_size)); if (info == nullptr) return 0; success = GetLogicalProcessorInformation(info, &info_size); } DWORD logicalProcessorCount = 0; if (success) { Info* ptr = info; DWORD byteOffset = 0; while (byteOffset + sizeof(Info) <= info_size) { switch (ptr->Relationship) { case RelationProcessorCore: logicalProcessorCount += Win32CountSetBits(ptr->ProcessorMask); break; case RelationNumaNode: case RelationCache: case RelationProcessorPackage: // Ignore other entries break; default: // Ignore unknown entries break; } byteOffset += sizeof(Info); ptr++; } } free(info); return logicalProcessorCount; } #endif } // namespace static int GetNumCPUs() { #if defined(__myriad2__) return 1; #elif defined(_WIN32) const unsigned hardware_concurrency = Win32NumCPUs(); return hardware_concurrency ? hardware_concurrency : 1; #elif defined(_AIX) return sysconf(_SC_NPROCESSORS_ONLN); #else // Other possibilities: // - Read /sys/devices/system/cpu/online and use cpumask_parse() // - sysconf(_SC_NPROCESSORS_ONLN) return std::thread::hardware_concurrency(); #endif } #if defined(_WIN32) static double GetNominalCPUFrequency() { #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) && \ !WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) // UWP apps don't have access to the registry and currently don't provide an // API informing about CPU nominal frequency. return 1.0; #else #pragma comment(lib, "advapi32.lib") // For Reg* functions. HKEY key; // Use the Reg* functions rather than the SH functions because shlwapi.dll // pulls in gdi32.dll which makes process destruction much more costly. if (RegOpenKeyExA(HKEY_LOCAL_MACHINE, "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0, KEY_READ, &key) == ERROR_SUCCESS) { DWORD type = 0; DWORD data = 0; DWORD data_size = sizeof(data); auto result = RegQueryValueExA(key, "~MHz", 0, &type, reinterpret_cast(&data), &data_size); RegCloseKey(key); if (result == ERROR_SUCCESS && type == REG_DWORD && data_size == sizeof(data)) { return data * 1e6; // Value is MHz. } } return 1.0; #endif // WINAPI_PARTITION_APP && !WINAPI_PARTITION_DESKTOP } #elif defined(CTL_HW) && defined(HW_CPU_FREQ) static double GetNominalCPUFrequency() { unsigned freq; size_t size = sizeof(freq); int mib[2] = {CTL_HW, HW_CPU_FREQ}; if (sysctl(mib, 2, &freq, &size, nullptr, 0) == 0) { return static_cast(freq); } return 1.0; } #else // Helper function for reading a long from a file. Returns true if successful // and the memory location pointed to by value is set to the value read. static bool ReadLongFromFile(const char *file, long *value) { bool ret = false; int fd = open(file, O_RDONLY); if (fd != -1) { char line[1024]; char *err; memset(line, '\0', sizeof(line)); int len = read(fd, line, sizeof(line) - 1); if (len <= 0) { ret = false; } else { const long temp_value = strtol(line, &err, 10); if (line[0] != '\0' && (*err == '\n' || *err == '\0')) { *value = temp_value; ret = true; } } close(fd); } return ret; } #if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) // Reads a monotonic time source and returns a value in // nanoseconds. The returned value uses an arbitrary epoch, not the // Unix epoch. static int64_t ReadMonotonicClockNanos() { struct timespec t; #ifdef CLOCK_MONOTONIC_RAW int rc = clock_gettime(CLOCK_MONOTONIC_RAW, &t); #else int rc = clock_gettime(CLOCK_MONOTONIC, &t); #endif if (rc != 0) { perror("clock_gettime() failed"); abort(); } return int64_t{t.tv_sec} * 1000000000 + t.tv_nsec; } class UnscaledCycleClockWrapperForInitializeFrequency { public: static int64_t Now() { return base_internal::UnscaledCycleClock::Now(); } }; struct TimeTscPair { int64_t time; // From ReadMonotonicClockNanos(). int64_t tsc; // From UnscaledCycleClock::Now(). }; // Returns a pair of values (monotonic kernel time, TSC ticks) that // approximately correspond to each other. This is accomplished by // doing several reads and picking the reading with the lowest // latency. This approach is used to minimize the probability that // our thread was preempted between clock reads. static TimeTscPair GetTimeTscPair() { int64_t best_latency = std::numeric_limits::max(); TimeTscPair best; for (int i = 0; i < 10; ++i) { int64_t t0 = ReadMonotonicClockNanos(); int64_t tsc = UnscaledCycleClockWrapperForInitializeFrequency::Now(); int64_t t1 = ReadMonotonicClockNanos(); int64_t latency = t1 - t0; if (latency < best_latency) { best_latency = latency; best.time = t0; best.tsc = tsc; } } return best; } // Measures and returns the TSC frequency by taking a pair of // measurements approximately `sleep_nanoseconds` apart. static double MeasureTscFrequencyWithSleep(int sleep_nanoseconds) { auto t0 = GetTimeTscPair(); struct timespec ts; ts.tv_sec = 0; ts.tv_nsec = sleep_nanoseconds; while (nanosleep(&ts, &ts) != 0 && errno == EINTR) {} auto t1 = GetTimeTscPair(); double elapsed_ticks = t1.tsc - t0.tsc; double elapsed_time = (t1.time - t0.time) * 1e-9; return elapsed_ticks / elapsed_time; } // Measures and returns the TSC frequency by calling // MeasureTscFrequencyWithSleep(), doubling the sleep interval until the // frequency measurement stabilizes. static double MeasureTscFrequency() { double last_measurement = -1.0; int sleep_nanoseconds = 1000000; // 1 millisecond. for (int i = 0; i < 8; ++i) { double measurement = MeasureTscFrequencyWithSleep(sleep_nanoseconds); if (measurement * 0.99 < last_measurement && last_measurement < measurement * 1.01) { // Use the current measurement if it is within 1% of the // previous measurement. return measurement; } last_measurement = measurement; sleep_nanoseconds *= 2; } return last_measurement; } #endif // ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY static double GetNominalCPUFrequency() { long freq = 0; // Google's production kernel has a patch to export the TSC // frequency through sysfs. If the kernel is exporting the TSC // frequency use that. There are issues where cpuinfo_max_freq // cannot be relied on because the BIOS may be exporting an invalid // p-state (on x86) or p-states may be used to put the processor in // a new mode (turbo mode). Essentially, those frequencies cannot // always be relied upon. The same reasons apply to /proc/cpuinfo as // well. if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)) { return freq * 1e3; // Value is kHz. } #if defined(ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY) // On these platforms, the TSC frequency is the nominal CPU // frequency. But without having the kernel export it directly // though /sys/devices/system/cpu/cpu0/tsc_freq_khz, there is no // other way to reliably get the TSC frequency, so we have to // measure it ourselves. Some CPUs abuse cpuinfo_max_freq by // exporting "fake" frequencies for implementing new features. For // example, Intel's turbo mode is enabled by exposing a p-state // value with a higher frequency than that of the real TSC // rate. Because of this, we prefer to measure the TSC rate // ourselves on i386 and x86-64. return MeasureTscFrequency(); #else // If CPU scaling is in effect, we want to use the *maximum* // frequency, not whatever CPU speed some random processor happens // to be using now. if (ReadLongFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", &freq)) { return freq * 1e3; // Value is kHz. } return 1.0; #endif // !ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY } #endif ABSL_CONST_INIT static once_flag init_num_cpus_once; ABSL_CONST_INIT static int num_cpus = 0; // NumCPUs() may be called before main() and before malloc is properly // initialized, therefore this must not allocate memory. int NumCPUs() { base_internal::LowLevelCallOnce( &init_num_cpus_once, []() { num_cpus = GetNumCPUs(); }); return num_cpus; } // A default frequency of 0.0 might be dangerous if it is used in division. ABSL_CONST_INIT static once_flag init_nominal_cpu_frequency_once; ABSL_CONST_INIT static double nominal_cpu_frequency = 1.0; // NominalCPUFrequency() may be called before main() and before malloc is // properly initialized, therefore this must not allocate memory. double NominalCPUFrequency() { base_internal::LowLevelCallOnce( &init_nominal_cpu_frequency_once, []() { nominal_cpu_frequency = GetNominalCPUFrequency(); }); return nominal_cpu_frequency; } #if defined(_WIN32) pid_t GetTID() { return pid_t{GetCurrentThreadId()}; } #elif defined(__linux__) #ifndef SYS_gettid #define SYS_gettid __NR_gettid #endif pid_t GetTID() { return syscall(SYS_gettid); } #elif defined(__akaros__) pid_t GetTID() { // Akaros has a concept of "vcore context", which is the state the program // is forced into when we need to make a user-level scheduling decision, or // run a signal handler. This is analogous to the interrupt context that a // CPU might enter if it encounters some kind of exception. // // There is no current thread context in vcore context, but we need to give // a reasonable answer if asked for a thread ID (e.g., in a signal handler). // Thread 0 always exists, so if we are in vcore context, we return that. // // Otherwise, we know (since we are using pthreads) that the uthread struct // current_uthread is pointing to is the first element of a // struct pthread_tcb, so we extract and return the thread ID from that. // // TODO(dcross): Akaros anticipates moving the thread ID to the uthread // structure at some point. We should modify this code to remove the cast // when that happens. if (in_vcore_context()) return 0; return reinterpret_cast(current_uthread)->id; } #elif defined(__myriad2__) pid_t GetTID() { uint32_t tid; rtems_task_ident(RTEMS_SELF, 0, &tid); return tid; } #else // Fallback implementation of GetTID using pthread_getspecific. ABSL_CONST_INIT static once_flag tid_once; ABSL_CONST_INIT static pthread_key_t tid_key; ABSL_CONST_INIT static absl::base_internal::SpinLock tid_lock( absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); // We set a bit per thread in this array to indicate that an ID is in // use. ID 0 is unused because it is the default value returned by // pthread_getspecific(). ABSL_CONST_INIT static std::vector *tid_array ABSL_GUARDED_BY(tid_lock) = nullptr; static constexpr int kBitsPerWord = 32; // tid_array is uint32_t. // Returns the TID to tid_array. static void FreeTID(void *v) { intptr_t tid = reinterpret_cast(v); int word = tid / kBitsPerWord; uint32_t mask = ~(1u << (tid % kBitsPerWord)); absl::base_internal::SpinLockHolder lock(&tid_lock); assert(0 <= word && static_cast(word) < tid_array->size()); (*tid_array)[word] &= mask; } static void InitGetTID() { if (pthread_key_create(&tid_key, FreeTID) != 0) { // The logging system calls GetTID() so it can't be used here. perror("pthread_key_create failed"); abort(); } // Initialize tid_array. absl::base_internal::SpinLockHolder lock(&tid_lock); tid_array = new std::vector(1); (*tid_array)[0] = 1; // ID 0 is never-allocated. } // Return a per-thread small integer ID from pthread's thread-specific data. pid_t GetTID() { absl::call_once(tid_once, InitGetTID); intptr_t tid = reinterpret_cast(pthread_getspecific(tid_key)); if (tid != 0) { return tid; } int bit; // tid_array[word] = 1u << bit; size_t word; { // Search for the first unused ID. absl::base_internal::SpinLockHolder lock(&tid_lock); // First search for a word in the array that is not all ones. word = 0; while (word < tid_array->size() && ~(*tid_array)[word] == 0) { ++word; } if (word == tid_array->size()) { tid_array->push_back(0); // No space left, add kBitsPerWord more IDs. } // Search for a zero bit in the word. bit = 0; while (bit < kBitsPerWord && (((*tid_array)[word] >> bit) & 1) != 0) { ++bit; } tid = (word * kBitsPerWord) + bit; (*tid_array)[word] |= 1u << bit; // Mark the TID as allocated. } if (pthread_setspecific(tid_key, reinterpret_cast(tid)) != 0) { perror("pthread_setspecific failed"); abort(); } return static_cast(tid); } #endif // GetCachedTID() caches the thread ID in thread-local storage (which is a // userspace construct) to avoid unnecessary system calls. Without this caching, // it can take roughly 98ns, while it takes roughly 1ns with this caching. pid_t GetCachedTID() { #ifdef ABSL_HAVE_THREAD_LOCAL static thread_local pid_t thread_id = GetTID(); return thread_id; #else return GetTID(); #endif // ABSL_HAVE_THREAD_LOCAL } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/sysinfo.h000066400000000000000000000052161430371345100206710ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // This file includes routines to find out characteristics // of the machine a program is running on. It is undoubtedly // system-dependent. // Functions listed here that accept a pid_t as an argument act on the // current process if the pid_t argument is 0 // All functions here are thread-hostile due to file caching unless // commented otherwise. #ifndef ABSL_BASE_INTERNAL_SYSINFO_H_ #define ABSL_BASE_INTERNAL_SYSINFO_H_ #ifndef _WIN32 #include #endif #include #include "absl/base/config.h" #include "absl/base/port.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // Nominal core processor cycles per second of each processor. This is _not_ // necessarily the frequency of the CycleClock counter (see cycleclock.h) // Thread-safe. double NominalCPUFrequency(); // Number of logical processors (hyperthreads) in system. Thread-safe. int NumCPUs(); // Return the thread id of the current thread, as told by the system. // No two currently-live threads implemented by the OS shall have the same ID. // Thread ids of exited threads may be reused. Multiple user-level threads // may have the same thread ID if multiplexed on the same OS thread. // // On Linux, you may send a signal to the resulting ID with kill(). However, // it is recommended for portability that you use pthread_kill() instead. #ifdef _WIN32 // On Windows, process id and thread id are of the same type according to the // return types of GetProcessId() and GetThreadId() are both DWORD, an unsigned // 32-bit type. using pid_t = uint32_t; #endif pid_t GetTID(); // Like GetTID(), but caches the result in thread-local storage in order // to avoid unnecessary system calls. Note that there are some cases where // one must call through to GetTID directly, which is why this exists as a // separate function. For example, GetCachedTID() is not safe to call in // an asynchronous signal-handling context nor right after a call to fork(). pid_t GetCachedTID(); } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_SYSINFO_H_ abseil-20220623.1/absl/base/internal/sysinfo_test.cc000066400000000000000000000047721430371345100220740ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/sysinfo.h" #ifndef _WIN32 #include #include #endif #include // NOLINT(build/c++11) #include #include #include "gtest/gtest.h" #include "absl/synchronization/barrier.h" #include "absl/synchronization/mutex.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { namespace { TEST(SysinfoTest, NumCPUs) { EXPECT_NE(NumCPUs(), 0) << "NumCPUs() should not have the default value of 0"; } TEST(SysinfoTest, GetTID) { EXPECT_EQ(GetTID(), GetTID()); // Basic compile and equality test. #ifdef __native_client__ // Native Client has a race condition bug that leads to memory // exaustion when repeatedly creating and joining threads. // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1027 return; #endif // Test that TIDs are unique to each thread. // Uses a few loops to exercise implementations that reallocate IDs. for (int i = 0; i < 10; ++i) { constexpr int kNumThreads = 10; Barrier all_threads_done(kNumThreads); std::vector threads; Mutex mutex; std::unordered_set tids; for (int j = 0; j < kNumThreads; ++j) { threads.push_back(std::thread([&]() { pid_t id = GetTID(); { MutexLock lock(&mutex); ASSERT_TRUE(tids.find(id) == tids.end()); tids.insert(id); } // We can't simply join the threads here. The threads need to // be alive otherwise the TID might have been reallocated to // another live thread. all_threads_done.Block(); })); } for (auto& thread : threads) { thread.join(); } } } #ifdef __linux__ TEST(SysinfoTest, LinuxGetTID) { // On Linux, for the main thread, GetTID()==getpid() is guaranteed by the API. EXPECT_EQ(GetTID(), getpid()); } #endif } // namespace } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/thread_annotations.h000066400000000000000000000242161430371345100230640ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: thread_annotations.h // ----------------------------------------------------------------------------- // // WARNING: This is a backwards compatible header and it will be removed after // the migration to prefixed thread annotations is finished; please include // "absl/base/thread_annotations.h". // // This header file contains macro definitions for thread safety annotations // that allow developers to document the locking policies of multi-threaded // code. The annotations can also help program analysis tools to identify // potential thread safety issues. // // These annotations are implemented using compiler attributes. Using the macros // defined here instead of raw attributes allow for portability and future // compatibility. // // When referring to mutexes in the arguments of the attributes, you should // use variable names or more complex expressions (e.g. my_object->mutex_) // that evaluate to a concrete mutex object whenever possible. If the mutex // you want to refer to is not in scope, you may use a member pointer // (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. #ifndef ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ #define ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ #if defined(__clang__) #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) #else #define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op #endif // GUARDED_BY() // // Documents if a shared field or global variable needs to be protected by a // mutex. GUARDED_BY() allows the user to specify a particular mutex that // should be held when accessing the annotated variable. // // Although this annotation (and PT_GUARDED_BY, below) cannot be applied to // local variables, a local variable and its associated mutex can often be // combined into a small class or struct, thereby allowing the annotation. // // Example: // // class Foo { // Mutex mu_; // int p1_ GUARDED_BY(mu_); // ... // }; #define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) // PT_GUARDED_BY() // // Documents if the memory location pointed to by a pointer should be guarded // by a mutex when dereferencing the pointer. // // Example: // class Foo { // Mutex mu_; // int *p1_ PT_GUARDED_BY(mu_); // ... // }; // // Note that a pointer variable to a shared memory location could itself be a // shared variable. // // Example: // // // `q_`, guarded by `mu1_`, points to a shared memory location that is // // guarded by `mu2_`: // int *q_ GUARDED_BY(mu1_) PT_GUARDED_BY(mu2_); #define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) // ACQUIRED_AFTER() / ACQUIRED_BEFORE() // // Documents the acquisition order between locks that can be held // simultaneously by a thread. For any two locks that need to be annotated // to establish an acquisition order, only one of them needs the annotation. // (i.e. You don't have to annotate both locks with both ACQUIRED_AFTER // and ACQUIRED_BEFORE.) // // As with GUARDED_BY, this is only applicable to mutexes that are shared // fields or global variables. // // Example: // // Mutex m1_; // Mutex m2_ ACQUIRED_AFTER(m1_); #define ACQUIRED_AFTER(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) #define ACQUIRED_BEFORE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) // EXCLUSIVE_LOCKS_REQUIRED() / SHARED_LOCKS_REQUIRED() // // Documents a function that expects a mutex to be held prior to entry. // The mutex is expected to be held both on entry to, and exit from, the // function. // // An exclusive lock allows read-write access to the guarded data member(s), and // only one thread can acquire a lock exclusively at any one time. A shared lock // allows read-only access, and any number of threads can acquire a shared lock // concurrently. // // Generally, non-const methods should be annotated with // EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with // SHARED_LOCKS_REQUIRED. // // Example: // // Mutex mu1, mu2; // int a GUARDED_BY(mu1); // int b GUARDED_BY(mu2); // // void foo() EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } // void bar() const SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } #define EXCLUSIVE_LOCKS_REQUIRED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_locks_required(__VA_ARGS__)) #define SHARED_LOCKS_REQUIRED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_locks_required(__VA_ARGS__)) // LOCKS_EXCLUDED() // // Documents the locks acquired in the body of the function. These locks // cannot be held when calling this function (as Abseil's `Mutex` locks are // non-reentrant). #define LOCKS_EXCLUDED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) // LOCK_RETURNED() // // Documents a function that returns a mutex without acquiring it. For example, // a public getter method that returns a pointer to a private mutex should // be annotated with LOCK_RETURNED. #define LOCK_RETURNED(x) \ THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) // LOCKABLE // // Documents if a class/type is a lockable type (such as the `Mutex` class). #define LOCKABLE \ THREAD_ANNOTATION_ATTRIBUTE__(lockable) // SCOPED_LOCKABLE // // Documents if a class does RAII locking (such as the `MutexLock` class). // The constructor should use `LOCK_FUNCTION()` to specify the mutex that is // acquired, and the destructor should use `UNLOCK_FUNCTION()` with no // arguments; the analysis will assume that the destructor unlocks whatever the // constructor locked. #define SCOPED_LOCKABLE \ THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) // EXCLUSIVE_LOCK_FUNCTION() // // Documents functions that acquire a lock in the body of a function, and do // not release it. #define EXCLUSIVE_LOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__)) // SHARED_LOCK_FUNCTION() // // Documents functions that acquire a shared (reader) lock in the body of a // function, and do not release it. #define SHARED_LOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_lock_function(__VA_ARGS__)) // UNLOCK_FUNCTION() // // Documents functions that expect a lock to be held on entry to the function, // and release it in the body of the function. #define UNLOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__)) // EXCLUSIVE_TRYLOCK_FUNCTION() / SHARED_TRYLOCK_FUNCTION() // // Documents functions that try to acquire a lock, and return success or failure // (or a non-boolean value that can be interpreted as a boolean). // The first argument should be `true` for functions that return `true` on // success, or `false` for functions that return `false` on success. The second // argument specifies the mutex that is locked on success. If unspecified, this // mutex is assumed to be `this`. #define EXCLUSIVE_TRYLOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(exclusive_trylock_function(__VA_ARGS__)) #define SHARED_TRYLOCK_FUNCTION(...) \ THREAD_ANNOTATION_ATTRIBUTE__(shared_trylock_function(__VA_ARGS__)) // ASSERT_EXCLUSIVE_LOCK() / ASSERT_SHARED_LOCK() // // Documents functions that dynamically check to see if a lock is held, and fail // if it is not held. #define ASSERT_EXCLUSIVE_LOCK(...) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_exclusive_lock(__VA_ARGS__)) #define ASSERT_SHARED_LOCK(...) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_lock(__VA_ARGS__)) // NO_THREAD_SAFETY_ANALYSIS // // Turns off thread safety checking within the body of a particular function. // This annotation is used to mark functions that are known to be correct, but // the locking behavior is more complicated than the analyzer can handle. #define NO_THREAD_SAFETY_ANALYSIS \ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) //------------------------------------------------------------------------------ // Tool-Supplied Annotations //------------------------------------------------------------------------------ // TS_UNCHECKED should be placed around lock expressions that are not valid // C++ syntax, but which are present for documentation purposes. These // annotations will be ignored by the analysis. #define TS_UNCHECKED(x) "" // TS_FIXME is used to mark lock expressions that are not valid C++ syntax. // It is used by automated tools to mark and disable invalid expressions. // The annotation should either be fixed, or changed to TS_UNCHECKED. #define TS_FIXME(x) "" // Like NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body of // a particular function. However, this attribute is used to mark functions // that are incorrect and need to be fixed. It is used by automated tools to // avoid breaking the build when the analysis is updated. // Code owners are expected to eventually fix the routine. #define NO_THREAD_SAFETY_ANALYSIS_FIXME NO_THREAD_SAFETY_ANALYSIS // Similar to NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a GUARDED_BY // annotation that needs to be fixed, because it is producing thread safety // warning. It disables the GUARDED_BY. #define GUARDED_BY_FIXME(x) // Disables warnings for a single read operation. This can be used to avoid // warnings when it is known that the read is not actually involved in a race, // but the compiler cannot confirm that. #define TS_UNCHECKED_READ(x) thread_safety_analysis::ts_unchecked_read(x) namespace thread_safety_analysis { // Takes a reference to a guarded data member, and returns an unguarded // reference. template inline const T& ts_unchecked_read(const T& v) NO_THREAD_SAFETY_ANALYSIS { return v; } template inline T& ts_unchecked_read(T& v) NO_THREAD_SAFETY_ANALYSIS { return v; } } // namespace thread_safety_analysis #endif // ABSL_BASE_INTERNAL_THREAD_ANNOTATIONS_H_ abseil-20220623.1/absl/base/internal/thread_identity.cc000066400000000000000000000143311430371345100225130ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/thread_identity.h" #if !defined(_WIN32) || defined(__MINGW32__) #include #include #endif #include #include #include #include "absl/base/attributes.h" #include "absl/base/call_once.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/internal/spinlock.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { #if ABSL_THREAD_IDENTITY_MODE != ABSL_THREAD_IDENTITY_MODE_USE_CPP11 namespace { // Used to co-ordinate one-time creation of our pthread_key absl::once_flag init_thread_identity_key_once; pthread_key_t thread_identity_pthread_key; std::atomic pthread_key_initialized(false); void AllocateThreadIdentityKey(ThreadIdentityReclaimerFunction reclaimer) { pthread_key_create(&thread_identity_pthread_key, reclaimer); pthread_key_initialized.store(true, std::memory_order_release); } } // namespace #endif #if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 // The actual TLS storage for a thread's currently associated ThreadIdentity. // This is referenced by inline accessors in the header. // "protected" visibility ensures that if multiple instances of Abseil code // exist within a process (via dlopen() or similar), references to // thread_identity_ptr from each instance of the code will refer to // *different* instances of this ptr. // Apple platforms have the visibility attribute, but issue a compile warning // that protected visibility is unsupported. ABSL_CONST_INIT // Must come before __attribute__((visibility("protected"))) #if ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) __attribute__((visibility("protected"))) #endif // ABSL_HAVE_ATTRIBUTE(visibility) && !defined(__APPLE__) #if ABSL_PER_THREAD_TLS // Prefer __thread to thread_local as benchmarks indicate it is a bit faster. ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr = nullptr; #elif defined(ABSL_HAVE_THREAD_LOCAL) thread_local ThreadIdentity* thread_identity_ptr = nullptr; #endif // ABSL_PER_THREAD_TLS #endif // TLS or CPP11 void SetCurrentThreadIdentity( ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer) { assert(CurrentThreadIdentityIfPresent() == nullptr); // Associate our destructor. // NOTE: This call to pthread_setspecific is currently the only immovable // barrier to CurrentThreadIdentity() always being async signal safe. #if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC // NOTE: Not async-safe. But can be open-coded. absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, reclaimer); #if defined(__EMSCRIPTEN__) || defined(__MINGW32__) // Emscripten and MinGW pthread implementations does not support signals. // See https://kripken.github.io/emscripten-site/docs/porting/pthreads.html // for more information. pthread_setspecific(thread_identity_pthread_key, reinterpret_cast(identity)); #else // We must mask signals around the call to setspecific as with current glibc, // a concurrent getspecific (needed for GetCurrentThreadIdentityIfPresent()) // may zero our value. // // While not officially async-signal safe, getspecific within a signal handler // is otherwise OK. sigset_t all_signals; sigset_t curr_signals; sigfillset(&all_signals); pthread_sigmask(SIG_SETMASK, &all_signals, &curr_signals); pthread_setspecific(thread_identity_pthread_key, reinterpret_cast(identity)); pthread_sigmask(SIG_SETMASK, &curr_signals, nullptr); #endif // !__EMSCRIPTEN__ && !__MINGW32__ #elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS // NOTE: Not async-safe. But can be open-coded. absl::call_once(init_thread_identity_key_once, AllocateThreadIdentityKey, reclaimer); pthread_setspecific(thread_identity_pthread_key, reinterpret_cast(identity)); thread_identity_ptr = identity; #elif ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 thread_local std::unique_ptr holder(identity, reclaimer); thread_identity_ptr = identity; #else #error Unimplemented ABSL_THREAD_IDENTITY_MODE #endif } #if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 // Please see the comment on `CurrentThreadIdentityIfPresent` in // thread_identity.h. When we cannot expose thread_local variables in // headers, we opt for the correct-but-slower option of not inlining this // function. #ifndef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; } #endif #endif void ClearCurrentThreadIdentity() { #if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 thread_identity_ptr = nullptr; #elif ABSL_THREAD_IDENTITY_MODE == \ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC // pthread_setspecific expected to clear value on destruction assert(CurrentThreadIdentityIfPresent() == nullptr); #endif } #if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC ThreadIdentity* CurrentThreadIdentityIfPresent() { bool initialized = pthread_key_initialized.load(std::memory_order_acquire); if (!initialized) { return nullptr; } return reinterpret_cast( pthread_getspecific(thread_identity_pthread_key)); } #endif } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/thread_identity.h000066400000000000000000000252721430371345100223630ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Each active thread has an ThreadIdentity that may represent the thread in // various level interfaces. ThreadIdentity objects are never deallocated. // When a thread terminates, its ThreadIdentity object may be reused for a // thread created later. #ifndef ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ #define ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ #ifndef _WIN32 #include // Defines __GOOGLE_GRTE_VERSION__ (via glibc-specific features.h) when // supported. #include #endif #include #include #include "absl/base/config.h" #include "absl/base/internal/per_thread_tls.h" #include "absl/base/optimization.h" namespace absl { ABSL_NAMESPACE_BEGIN struct SynchLocksHeld; struct SynchWaitParams; namespace base_internal { class SpinLock; struct ThreadIdentity; // Used by the implementation of absl::Mutex and absl::CondVar. struct PerThreadSynch { // The internal representation of absl::Mutex and absl::CondVar rely // on the alignment of PerThreadSynch. Both store the address of the // PerThreadSynch in the high-order bits of their internal state, // which means the low kLowZeroBits of the address of PerThreadSynch // must be zero. static constexpr int kLowZeroBits = 8; static constexpr int kAlignment = 1 << kLowZeroBits; // Returns the associated ThreadIdentity. // This can be implemented as a cast because we guarantee // PerThreadSynch is the first element of ThreadIdentity. ThreadIdentity* thread_identity() { return reinterpret_cast(this); } PerThreadSynch *next; // Circular waiter queue; initialized to 0. PerThreadSynch *skip; // If non-zero, all entries in Mutex queue // up to and including "skip" have same // condition as this, and will be woken later bool may_skip; // if false while on mutex queue, a mutex unlocker // is using this PerThreadSynch as a terminator. Its // skip field must not be filled in because the loop // might then skip over the terminator. bool wake; // This thread is to be woken from a Mutex. // If "x" is on a waiter list for a mutex, "x->cond_waiter" is true iff the // waiter is waiting on the mutex as part of a CV Wait or Mutex Await. // // The value of "x->cond_waiter" is meaningless if "x" is not on a // Mutex waiter list. bool cond_waiter; bool maybe_unlocking; // Valid at head of Mutex waiter queue; // true if UnlockSlow could be searching // for a waiter to wake. Used for an optimization // in Enqueue(). true is always a valid value. // Can be reset to false when the unlocker or any // writer releases the lock, or a reader fully // releases the lock. It may not be set to false // by a reader that decrements the count to // non-zero. protected by mutex spinlock bool suppress_fatal_errors; // If true, try to proceed even in the face // of broken invariants. This is used within // fatal signal handlers to improve the // chances of debug logging information being // output successfully. int priority; // Priority of thread (updated every so often). // State values: // kAvailable: This PerThreadSynch is available. // kQueued: This PerThreadSynch is unavailable, it's currently queued on a // Mutex or CondVar waistlist. // // Transitions from kQueued to kAvailable require a release // barrier. This is needed as a waiter may use "state" to // independently observe that it's no longer queued. // // Transitions from kAvailable to kQueued require no barrier, they // are externally ordered by the Mutex. enum State { kAvailable, kQueued }; std::atomic state; // The wait parameters of the current wait. waitp is null if the // thread is not waiting. Transitions from null to non-null must // occur before the enqueue commit point (state = kQueued in // Enqueue() and CondVarEnqueue()). Transitions from non-null to // null must occur after the wait is finished (state = kAvailable in // Mutex::Block() and CondVar::WaitCommon()). This field may be // changed only by the thread that describes this PerThreadSynch. A // special case is Fer(), which calls Enqueue() on another thread, // but with an identical SynchWaitParams pointer, thus leaving the // pointer unchanged. SynchWaitParams* waitp; intptr_t readers; // Number of readers in mutex. // When priority will next be read (cycles). int64_t next_priority_read_cycles; // Locks held; used during deadlock detection. // Allocated in Synch_GetAllLocks() and freed in ReclaimThreadIdentity(). SynchLocksHeld *all_locks; }; // The instances of this class are allocated in NewThreadIdentity() with an // alignment of PerThreadSynch::kAlignment. struct ThreadIdentity { // Must be the first member. The Mutex implementation requires that // the PerThreadSynch object associated with each thread is // PerThreadSynch::kAlignment aligned. We provide this alignment on // ThreadIdentity itself. PerThreadSynch per_thread_synch; // Private: Reserved for absl::synchronization_internal::Waiter. struct WaiterState { alignas(void*) char data[128]; } waiter_state; // Used by PerThreadSem::{Get,Set}ThreadBlockedCounter(). std::atomic* blocked_count_ptr; // The following variables are mostly read/written just by the // thread itself. The only exception is that these are read by // a ticker thread as a hint. std::atomic ticker; // Tick counter, incremented once per second. std::atomic wait_start; // Ticker value when thread started waiting. std::atomic is_idle; // Has thread become idle yet? ThreadIdentity* next; }; // Returns the ThreadIdentity object representing the calling thread; guaranteed // to be unique for its lifetime. The returned object will remain valid for the // program's lifetime; although it may be re-assigned to a subsequent thread. // If one does not exist, return nullptr instead. // // Does not malloc(*), and is async-signal safe. // [*] Technically pthread_setspecific() does malloc on first use; however this // is handled internally within tcmalloc's initialization already. // // New ThreadIdentity objects can be constructed and associated with a thread // by calling GetOrCreateCurrentThreadIdentity() in per-thread-sem.h. ThreadIdentity* CurrentThreadIdentityIfPresent(); using ThreadIdentityReclaimerFunction = void (*)(void*); // Sets the current thread identity to the given value. 'reclaimer' is a // pointer to the global function for cleaning up instances on thread // destruction. void SetCurrentThreadIdentity(ThreadIdentity* identity, ThreadIdentityReclaimerFunction reclaimer); // Removes the currently associated ThreadIdentity from the running thread. // This must be called from inside the ThreadIdentityReclaimerFunction, and only // from that function. void ClearCurrentThreadIdentity(); // May be chosen at compile time via: -DABSL_FORCE_THREAD_IDENTITY_MODE= #ifdef ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC #error ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC cannot be directly set #else #define ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC 0 #endif #ifdef ABSL_THREAD_IDENTITY_MODE_USE_TLS #error ABSL_THREAD_IDENTITY_MODE_USE_TLS cannot be directly set #else #define ABSL_THREAD_IDENTITY_MODE_USE_TLS 1 #endif #ifdef ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #error ABSL_THREAD_IDENTITY_MODE_USE_CPP11 cannot be directly set #else #define ABSL_THREAD_IDENTITY_MODE_USE_CPP11 2 #endif #ifdef ABSL_THREAD_IDENTITY_MODE #error ABSL_THREAD_IDENTITY_MODE cannot be directly set #elif defined(ABSL_FORCE_THREAD_IDENTITY_MODE) #define ABSL_THREAD_IDENTITY_MODE ABSL_FORCE_THREAD_IDENTITY_MODE #elif defined(_WIN32) && !defined(__MINGW32__) #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #elif defined(__APPLE__) && defined(ABSL_HAVE_THREAD_LOCAL) #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #elif ABSL_PER_THREAD_TLS && defined(__GOOGLE_GRTE_VERSION__) && \ (__GOOGLE_GRTE_VERSION__ >= 20140228L) // Support for async-safe TLS was specifically added in GRTEv4. It's not // present in the upstream eglibc. // Note: Current default for production systems. #define ABSL_THREAD_IDENTITY_MODE ABSL_THREAD_IDENTITY_MODE_USE_TLS #else #define ABSL_THREAD_IDENTITY_MODE \ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC #endif #if ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_TLS || \ ABSL_THREAD_IDENTITY_MODE == ABSL_THREAD_IDENTITY_MODE_USE_CPP11 #if ABSL_PER_THREAD_TLS ABSL_CONST_INIT extern ABSL_PER_THREAD_TLS_KEYWORD ThreadIdentity* thread_identity_ptr; #elif defined(ABSL_HAVE_THREAD_LOCAL) ABSL_CONST_INIT extern thread_local ThreadIdentity* thread_identity_ptr; #else #error Thread-local storage not detected on this platform #endif // thread_local variables cannot be in headers exposed by DLLs or in certain // build configurations on Apple platforms. However, it is important for // performance reasons in general that `CurrentThreadIdentityIfPresent` be // inlined. In the other cases we opt to have the function not be inlined. Note // that `CurrentThreadIdentityIfPresent` is declared above so we can exclude // this entire inline definition. #if !defined(__APPLE__) && !defined(ABSL_BUILD_DLL) && \ !defined(ABSL_CONSUME_DLL) #define ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT 1 #endif #ifdef ABSL_INTERNAL_INLINE_CURRENT_THREAD_IDENTITY_IF_PRESENT inline ThreadIdentity* CurrentThreadIdentityIfPresent() { return thread_identity_ptr; } #endif #elif ABSL_THREAD_IDENTITY_MODE != \ ABSL_THREAD_IDENTITY_MODE_USE_POSIX_SETSPECIFIC #error Unknown ABSL_THREAD_IDENTITY_MODE #endif } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_THREAD_IDENTITY_H_ abseil-20220623.1/absl/base/internal/thread_identity_benchmark.cc000066400000000000000000000024331430371345100245250ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "benchmark/benchmark.h" #include "absl/base/internal/thread_identity.h" #include "absl/synchronization/internal/create_thread_identity.h" #include "absl/synchronization/internal/per_thread_sem.h" namespace { void BM_SafeCurrentThreadIdentity(benchmark::State& state) { for (auto _ : state) { benchmark::DoNotOptimize( absl::synchronization_internal::GetOrCreateCurrentThreadIdentity()); } } BENCHMARK(BM_SafeCurrentThreadIdentity); void BM_UnsafeCurrentThreadIdentity(benchmark::State& state) { for (auto _ : state) { benchmark::DoNotOptimize( absl::base_internal::CurrentThreadIdentityIfPresent()); } } BENCHMARK(BM_UnsafeCurrentThreadIdentity); } // namespace abseil-20220623.1/absl/base/internal/thread_identity_test.cc000066400000000000000000000107371430371345100235600ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/thread_identity.h" #include // NOLINT(build/c++11) #include #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/internal/spinlock.h" #include "absl/base/macros.h" #include "absl/base/thread_annotations.h" #include "absl/synchronization/internal/per_thread_sem.h" #include "absl/synchronization/mutex.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { namespace { ABSL_CONST_INIT static absl::base_internal::SpinLock map_lock( absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); ABSL_CONST_INIT static int num_identities_reused ABSL_GUARDED_BY(map_lock); static const void* const kCheckNoIdentity = reinterpret_cast(1); static void TestThreadIdentityCurrent(const void* assert_no_identity) { ThreadIdentity* identity; // We have to test this conditionally, because if the test framework relies // on Abseil, then some previous action may have already allocated an // identity. if (assert_no_identity == kCheckNoIdentity) { identity = CurrentThreadIdentityIfPresent(); EXPECT_TRUE(identity == nullptr); } identity = synchronization_internal::GetOrCreateCurrentThreadIdentity(); EXPECT_TRUE(identity != nullptr); ThreadIdentity* identity_no_init; identity_no_init = CurrentThreadIdentityIfPresent(); EXPECT_TRUE(identity == identity_no_init); // Check that per_thread_synch is correctly aligned. EXPECT_EQ(0, reinterpret_cast(&identity->per_thread_synch) % PerThreadSynch::kAlignment); EXPECT_EQ(identity, identity->per_thread_synch.thread_identity()); absl::base_internal::SpinLockHolder l(&map_lock); num_identities_reused++; } TEST(ThreadIdentityTest, BasicIdentityWorks) { // This tests for the main() thread. TestThreadIdentityCurrent(nullptr); } TEST(ThreadIdentityTest, BasicIdentityWorksThreaded) { // Now try the same basic test with multiple threads being created and // destroyed. This makes sure that: // - New threads are created without a ThreadIdentity. // - We re-allocate ThreadIdentity objects from the free-list. // - If a thread implementation chooses to recycle threads, that // correct re-initialization occurs. static const int kNumLoops = 3; static const int kNumThreads = 32; for (int iter = 0; iter < kNumLoops; iter++) { std::vector threads; for (int i = 0; i < kNumThreads; ++i) { threads.push_back( std::thread(TestThreadIdentityCurrent, kCheckNoIdentity)); } for (auto& thread : threads) { thread.join(); } } // We should have recycled ThreadIdentity objects above; while (external) // library threads allocating their own identities may preclude some // reuse, we should have sufficient repetitions to exclude this. absl::base_internal::SpinLockHolder l(&map_lock); EXPECT_LT(kNumThreads, num_identities_reused); } TEST(ThreadIdentityTest, ReusedThreadIdentityMutexTest) { // This test repeatly creates and joins a series of threads, each of // which acquires and releases shared Mutex locks. This verifies // Mutex operations work correctly under a reused // ThreadIdentity. Note that the most likely failure mode of this // test is a crash or deadlock. static const int kNumLoops = 10; static const int kNumThreads = 12; static const int kNumMutexes = 3; static const int kNumLockLoops = 5; Mutex mutexes[kNumMutexes]; for (int iter = 0; iter < kNumLoops; ++iter) { std::vector threads; for (int thread = 0; thread < kNumThreads; ++thread) { threads.push_back(std::thread([&]() { for (int l = 0; l < kNumLockLoops; ++l) { for (int m = 0; m < kNumMutexes; ++m) { MutexLock lock(&mutexes[m]); } } })); } for (auto& thread : threads) { thread.join(); } } } } // namespace } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/throw_delegate.cc000066400000000000000000000120061430371345100223250ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/throw_delegate.h" #include #include #include #include #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // NOTE: The various STL exception throwing functions are placed within the // #ifdef blocks so the symbols aren't exposed on platforms that don't support // them, such as the Android NDK. For example, ANGLE fails to link when building // within AOSP without them, since the STL functions don't exist. namespace { #ifdef ABSL_HAVE_EXCEPTIONS template [[noreturn]] void Throw(const T& error) { throw error; } #endif } // namespace void ThrowStdLogicError(const std::string& what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::logic_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); std::abort(); #endif } void ThrowStdLogicError(const char* what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::logic_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg); std::abort(); #endif } void ThrowStdInvalidArgument(const std::string& what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::invalid_argument(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); std::abort(); #endif } void ThrowStdInvalidArgument(const char* what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::invalid_argument(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg); std::abort(); #endif } void ThrowStdDomainError(const std::string& what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::domain_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); std::abort(); #endif } void ThrowStdDomainError(const char* what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::domain_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg); std::abort(); #endif } void ThrowStdLengthError(const std::string& what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::length_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); std::abort(); #endif } void ThrowStdLengthError(const char* what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::length_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg); std::abort(); #endif } void ThrowStdOutOfRange(const std::string& what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::out_of_range(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); std::abort(); #endif } void ThrowStdOutOfRange(const char* what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::out_of_range(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg); std::abort(); #endif } void ThrowStdRuntimeError(const std::string& what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::runtime_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); std::abort(); #endif } void ThrowStdRuntimeError(const char* what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::runtime_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg); std::abort(); #endif } void ThrowStdRangeError(const std::string& what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::range_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); std::abort(); #endif } void ThrowStdRangeError(const char* what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::range_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg); std::abort(); #endif } void ThrowStdOverflowError(const std::string& what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::overflow_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); std::abort(); #endif } void ThrowStdOverflowError(const char* what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::overflow_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg); std::abort(); #endif } void ThrowStdUnderflowError(const std::string& what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::underflow_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg.c_str()); std::abort(); #endif } void ThrowStdUnderflowError(const char* what_arg) { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::underflow_error(what_arg)); #else ABSL_RAW_LOG(FATAL, "%s", what_arg); std::abort(); #endif } void ThrowStdBadFunctionCall() { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::bad_function_call()); #else std::abort(); #endif } void ThrowStdBadAlloc() { #ifdef ABSL_HAVE_EXCEPTIONS Throw(std::bad_alloc()); #else std::abort(); #endif } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/internal/throw_delegate.h000066400000000000000000000064031430371345100221730ustar00rootroot00000000000000// // Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ #define ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // Helper functions that allow throwing exceptions consistently from anywhere. // The main use case is for header-based libraries (eg templates), as they will // be built by many different targets with their own compiler options. // In particular, this will allow a safe way to throw exceptions even if the // caller is compiled with -fno-exceptions. This is intended for implementing // things like map<>::at(), which the standard documents as throwing an // exception on error. // // Using other techniques like #if tricks could lead to ODR violations. // // You shouldn't use it unless you're writing code that you know will be built // both with and without exceptions and you need to conform to an interface // that uses exceptions. [[noreturn]] void ThrowStdLogicError(const std::string& what_arg); [[noreturn]] void ThrowStdLogicError(const char* what_arg); [[noreturn]] void ThrowStdInvalidArgument(const std::string& what_arg); [[noreturn]] void ThrowStdInvalidArgument(const char* what_arg); [[noreturn]] void ThrowStdDomainError(const std::string& what_arg); [[noreturn]] void ThrowStdDomainError(const char* what_arg); [[noreturn]] void ThrowStdLengthError(const std::string& what_arg); [[noreturn]] void ThrowStdLengthError(const char* what_arg); [[noreturn]] void ThrowStdOutOfRange(const std::string& what_arg); [[noreturn]] void ThrowStdOutOfRange(const char* what_arg); [[noreturn]] void ThrowStdRuntimeError(const std::string& what_arg); [[noreturn]] void ThrowStdRuntimeError(const char* what_arg); [[noreturn]] void ThrowStdRangeError(const std::string& what_arg); [[noreturn]] void ThrowStdRangeError(const char* what_arg); [[noreturn]] void ThrowStdOverflowError(const std::string& what_arg); [[noreturn]] void ThrowStdOverflowError(const char* what_arg); [[noreturn]] void ThrowStdUnderflowError(const std::string& what_arg); [[noreturn]] void ThrowStdUnderflowError(const char* what_arg); [[noreturn]] void ThrowStdBadFunctionCall(); [[noreturn]] void ThrowStdBadAlloc(); // ThrowStdBadArrayNewLength() cannot be consistently supported because // std::bad_array_new_length is missing in libstdc++ until 4.9.0. // https://gcc.gnu.org/onlinedocs/gcc-4.8.3/libstdc++/api/a01379_source.html // https://gcc.gnu.org/onlinedocs/gcc-4.9.0/libstdc++/api/a01327_source.html // libcxx (as of 3.2) and msvc (as of 2015) both have it. // [[noreturn]] void ThrowStdBadArrayNewLength(); } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_INTERNAL_THROW_DELEGATE_H_ abseil-20220623.1/absl/base/internal/tsan_mutex_interface.h000066400000000000000000000047311430371345100234070ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // This file is intended solely for spinlock.h. // It provides ThreadSanitizer annotations for custom mutexes. // See for meaning of these annotations. #ifndef ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ #define ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ #include "absl/base/config.h" // ABSL_INTERNAL_HAVE_TSAN_INTERFACE // Macro intended only for internal use. // // Checks whether LLVM Thread Sanitizer interfaces are available. // First made available in LLVM 5.0 (Sep 2017). #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE #error "ABSL_INTERNAL_HAVE_TSAN_INTERFACE cannot be directly set." #endif #if defined(ABSL_HAVE_THREAD_SANITIZER) && defined(__has_include) #if __has_include() #define ABSL_INTERNAL_HAVE_TSAN_INTERFACE 1 #endif #endif #ifdef ABSL_INTERNAL_HAVE_TSAN_INTERFACE #include #define ABSL_TSAN_MUTEX_CREATE __tsan_mutex_create #define ABSL_TSAN_MUTEX_DESTROY __tsan_mutex_destroy #define ABSL_TSAN_MUTEX_PRE_LOCK __tsan_mutex_pre_lock #define ABSL_TSAN_MUTEX_POST_LOCK __tsan_mutex_post_lock #define ABSL_TSAN_MUTEX_PRE_UNLOCK __tsan_mutex_pre_unlock #define ABSL_TSAN_MUTEX_POST_UNLOCK __tsan_mutex_post_unlock #define ABSL_TSAN_MUTEX_PRE_SIGNAL __tsan_mutex_pre_signal #define ABSL_TSAN_MUTEX_POST_SIGNAL __tsan_mutex_post_signal #define ABSL_TSAN_MUTEX_PRE_DIVERT __tsan_mutex_pre_divert #define ABSL_TSAN_MUTEX_POST_DIVERT __tsan_mutex_post_divert #else #define ABSL_TSAN_MUTEX_CREATE(...) #define ABSL_TSAN_MUTEX_DESTROY(...) #define ABSL_TSAN_MUTEX_PRE_LOCK(...) #define ABSL_TSAN_MUTEX_POST_LOCK(...) #define ABSL_TSAN_MUTEX_PRE_UNLOCK(...) #define ABSL_TSAN_MUTEX_POST_UNLOCK(...) #define ABSL_TSAN_MUTEX_PRE_SIGNAL(...) #define ABSL_TSAN_MUTEX_POST_SIGNAL(...) #define ABSL_TSAN_MUTEX_PRE_DIVERT(...) #define ABSL_TSAN_MUTEX_POST_DIVERT(...) #endif #endif // ABSL_BASE_INTERNAL_TSAN_MUTEX_INTERFACE_H_ abseil-20220623.1/absl/base/internal/unaligned_access.h000066400000000000000000000045701430371345100224700ustar00rootroot00000000000000// // Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ #define ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" // unaligned APIs // Portable handling of unaligned loads, stores, and copies. // The unaligned API is C++ only. The declarations use C++ features // (namespaces, inline) which are absent or incompatible in C. #if defined(__cplusplus) namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { inline uint16_t UnalignedLoad16(const void *p) { uint16_t t; memcpy(&t, p, sizeof t); return t; } inline uint32_t UnalignedLoad32(const void *p) { uint32_t t; memcpy(&t, p, sizeof t); return t; } inline uint64_t UnalignedLoad64(const void *p) { uint64_t t; memcpy(&t, p, sizeof t); return t; } inline void UnalignedStore16(void *p, uint16_t v) { memcpy(p, &v, sizeof v); } inline void UnalignedStore32(void *p, uint32_t v) { memcpy(p, &v, sizeof v); } inline void UnalignedStore64(void *p, uint64_t v) { memcpy(p, &v, sizeof v); } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #define ABSL_INTERNAL_UNALIGNED_LOAD16(_p) \ (absl::base_internal::UnalignedLoad16(_p)) #define ABSL_INTERNAL_UNALIGNED_LOAD32(_p) \ (absl::base_internal::UnalignedLoad32(_p)) #define ABSL_INTERNAL_UNALIGNED_LOAD64(_p) \ (absl::base_internal::UnalignedLoad64(_p)) #define ABSL_INTERNAL_UNALIGNED_STORE16(_p, _val) \ (absl::base_internal::UnalignedStore16(_p, _val)) #define ABSL_INTERNAL_UNALIGNED_STORE32(_p, _val) \ (absl::base_internal::UnalignedStore32(_p, _val)) #define ABSL_INTERNAL_UNALIGNED_STORE64(_p, _val) \ (absl::base_internal::UnalignedStore64(_p, _val)) #endif // defined(__cplusplus), end of unaligned API #endif // ABSL_BASE_INTERNAL_UNALIGNED_ACCESS_H_ abseil-20220623.1/absl/base/internal/unique_small_name_test.cc000066400000000000000000000043601430371345100240710ustar00rootroot00000000000000// Copyright 2020 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "gtest/gtest.h" #include "absl/base/optimization.h" #include "absl/strings/string_view.h" // This test by itself does not do anything fancy, but it serves as binary I can // query in shell test. namespace { template void DoNotOptimize(const T& var) { #ifdef __GNUC__ asm volatile("" : "+m"(const_cast(var))); #else std::cout << (void*)&var; #endif } int very_long_int_variable_name ABSL_INTERNAL_UNIQUE_SMALL_NAME() = 0; char very_long_str_variable_name[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = "abc"; TEST(UniqueSmallName, NonAutomaticVar) { EXPECT_EQ(very_long_int_variable_name, 0); EXPECT_EQ(absl::string_view(very_long_str_variable_name), "abc"); } int VeryLongFreeFunctionName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); TEST(UniqueSmallName, FreeFunction) { DoNotOptimize(&VeryLongFreeFunctionName); EXPECT_EQ(VeryLongFreeFunctionName(), 456); } int VeryLongFreeFunctionName() { return 456; } struct VeryLongStructName { explicit VeryLongStructName(int i); int VeryLongMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); static int VeryLongStaticMethodName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); private: int fld; }; TEST(UniqueSmallName, Struct) { VeryLongStructName var(10); DoNotOptimize(var); DoNotOptimize(&VeryLongStructName::VeryLongMethodName); DoNotOptimize(&VeryLongStructName::VeryLongStaticMethodName); EXPECT_EQ(var.VeryLongMethodName(), 10); EXPECT_EQ(VeryLongStructName::VeryLongStaticMethodName(), 123); } VeryLongStructName::VeryLongStructName(int i) : fld(i) {} int VeryLongStructName::VeryLongMethodName() { return fld; } int VeryLongStructName::VeryLongStaticMethodName() { return 123; } } // namespace abseil-20220623.1/absl/base/internal/unscaledcycleclock.cc000066400000000000000000000075521430371345100231740ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/unscaledcycleclock.h" #if ABSL_USE_UNSCALED_CYCLECLOCK #if defined(_WIN32) #include #endif #if defined(__powerpc__) || defined(__ppc__) #ifdef __GLIBC__ #include #elif defined(__FreeBSD__) // clang-format off // This order does actually matter =(. #include #include // clang-format on #include "absl/base/call_once.h" #endif #endif #include "absl/base/internal/sysinfo.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { #if defined(__i386__) int64_t UnscaledCycleClock::Now() { int64_t ret; __asm__ volatile("rdtsc" : "=A"(ret)); return ret; } double UnscaledCycleClock::Frequency() { return base_internal::NominalCPUFrequency(); } #elif defined(__x86_64__) double UnscaledCycleClock::Frequency() { return base_internal::NominalCPUFrequency(); } #elif defined(__powerpc__) || defined(__ppc__) int64_t UnscaledCycleClock::Now() { #ifdef __GLIBC__ return __ppc_get_timebase(); #else #ifdef __powerpc64__ int64_t tbr; asm volatile("mfspr %0, 268" : "=r"(tbr)); return tbr; #else int32_t tbu, tbl, tmp; asm volatile( "0:\n" "mftbu %[hi32]\n" "mftb %[lo32]\n" "mftbu %[tmp]\n" "cmpw %[tmp],%[hi32]\n" "bne 0b\n" : [ hi32 ] "=r"(tbu), [ lo32 ] "=r"(tbl), [ tmp ] "=r"(tmp)); return (static_cast(tbu) << 32) | tbl; #endif #endif } double UnscaledCycleClock::Frequency() { #ifdef __GLIBC__ return __ppc_get_timebase_freq(); #elif defined(_AIX) // This is the same constant value as returned by // __ppc_get_timebase_freq(). return static_cast(512000000); #elif defined(__FreeBSD__) static once_flag init_timebase_frequency_once; static double timebase_frequency = 0.0; base_internal::LowLevelCallOnce(&init_timebase_frequency_once, [&]() { size_t length = sizeof(timebase_frequency); sysctlbyname("kern.timecounter.tc.timebase.frequency", &timebase_frequency, &length, nullptr, 0); }); return timebase_frequency; #else #error Must implement UnscaledCycleClock::Frequency() #endif } #elif defined(__aarch64__) // System timer of ARMv8 runs at a different frequency than the CPU's. // The frequency is fixed, typically in the range 1-50MHz. It can be // read at CNTFRQ special register. We assume the OS has set up // the virtual timer properly. int64_t UnscaledCycleClock::Now() { int64_t virtual_timer_value; asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); return virtual_timer_value; } double UnscaledCycleClock::Frequency() { uint64_t aarch64_timer_frequency; asm volatile("mrs %0, cntfrq_el0" : "=r"(aarch64_timer_frequency)); return aarch64_timer_frequency; } #elif defined(__riscv) int64_t UnscaledCycleClock::Now() { int64_t virtual_timer_value; asm volatile("rdcycle %0" : "=r"(virtual_timer_value)); return virtual_timer_value; } double UnscaledCycleClock::Frequency() { return base_internal::NominalCPUFrequency(); } #elif defined(_M_IX86) || defined(_M_X64) #pragma intrinsic(__rdtsc) int64_t UnscaledCycleClock::Now() { return __rdtsc(); } double UnscaledCycleClock::Frequency() { return base_internal::NominalCPUFrequency(); } #endif } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_USE_UNSCALED_CYCLECLOCK abseil-20220623.1/absl/base/internal/unscaledcycleclock.h000066400000000000000000000112201430371345100230210ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // UnscaledCycleClock // An UnscaledCycleClock yields the value and frequency of a cycle counter // that increments at a rate that is approximately constant. // This class is for internal use only, you should consider using CycleClock // instead. // // Notes: // The cycle counter frequency is not necessarily the core clock frequency. // That is, CycleCounter cycles are not necessarily "CPU cycles". // // An arbitrary offset may have been added to the counter at power on. // // On some platforms, the rate and offset of the counter may differ // slightly when read from different CPUs of a multiprocessor. Usually, // we try to ensure that the operating system adjusts values periodically // so that values agree approximately. If you need stronger guarantees, // consider using alternate interfaces. // // The CPU is not required to maintain the ordering of a cycle counter read // with respect to surrounding instructions. #ifndef ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ #define ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ #include #if defined(__APPLE__) #include #endif #include "absl/base/port.h" // The following platforms have an implementation of a hardware counter. #if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__) || \ defined(__powerpc__) || defined(__ppc__) || defined(__riscv) || \ defined(_M_IX86) || (defined(_M_X64) && !defined(_M_ARM64EC)) #define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 1 #else #define ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION 0 #endif // The following platforms often disable access to the hardware // counter (through a sandbox) even if the underlying hardware has a // usable counter. The CycleTimer interface also requires a *scaled* // CycleClock that runs at atleast 1 MHz. We've found some Android // ARM64 devices where this is not the case, so we disable it by // default on Android ARM64. #if defined(__native_client__) || (defined(__APPLE__)) || \ (defined(__ANDROID__) && defined(__aarch64__)) #define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 0 #else #define ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT 1 #endif // UnscaledCycleClock is an optional internal feature. // Use "#if ABSL_USE_UNSCALED_CYCLECLOCK" to test for its presence. // Can be overridden at compile-time via -DABSL_USE_UNSCALED_CYCLECLOCK=0|1 #if !defined(ABSL_USE_UNSCALED_CYCLECLOCK) #define ABSL_USE_UNSCALED_CYCLECLOCK \ (ABSL_HAVE_UNSCALED_CYCLECLOCK_IMPLEMENTATION && \ ABSL_USE_UNSCALED_CYCLECLOCK_DEFAULT) #endif #if ABSL_USE_UNSCALED_CYCLECLOCK // This macro can be used to test if UnscaledCycleClock::Frequency() // is NominalCPUFrequency() on a particular platform. #if (defined(__i386__) || defined(__x86_64__) || defined(__riscv) || \ defined(_M_IX86) || defined(_M_X64)) #define ABSL_INTERNAL_UNSCALED_CYCLECLOCK_FREQUENCY_IS_CPU_FREQUENCY #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace time_internal { class UnscaledCycleClockWrapperForGetCurrentTime; } // namespace time_internal namespace base_internal { class CycleClock; class UnscaledCycleClockWrapperForInitializeFrequency; class UnscaledCycleClock { private: UnscaledCycleClock() = delete; // Return the value of a cycle counter that counts at a rate that is // approximately constant. static int64_t Now(); // Return the how much UnscaledCycleClock::Now() increases per second. // This is not necessarily the core CPU clock frequency. // It may be the nominal value report by the kernel, rather than a measured // value. static double Frequency(); // Allowed users friend class base_internal::CycleClock; friend class time_internal::UnscaledCycleClockWrapperForGetCurrentTime; friend class base_internal::UnscaledCycleClockWrapperForInitializeFrequency; }; #if defined(__x86_64__) inline int64_t UnscaledCycleClock::Now() { uint64_t low, high; __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); return (high << 32) | low; } #endif } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_USE_UNSCALED_CYCLECLOCK #endif // ABSL_BASE_INTERNAL_UNSCALEDCYCLECLOCK_H_ abseil-20220623.1/absl/base/invoke_test.cc000066400000000000000000000267221430371345100200600ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/invoke.h" #include #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { namespace { int Function(int a, int b) { return a - b; } void VoidFunction(int& a, int& b) { a += b; b = a - b; a -= b; } int ZeroArgFunction() { return -1937; } int Sink(std::unique_ptr p) { return *p; } std::unique_ptr Factory(int n) { return make_unique(n); } void NoOp() {} struct ConstFunctor { int operator()(int a, int b) const { return a - b; } }; struct MutableFunctor { int operator()(int a, int b) { return a - b; } }; struct EphemeralFunctor { int operator()(int a, int b) && { return a - b; } }; struct OverloadedFunctor { template std::string operator()(const Args&... args) & { return StrCat("&", args...); } template std::string operator()(const Args&... args) const& { return StrCat("const&", args...); } template std::string operator()(const Args&... args) && { return StrCat("&&", args...); } }; struct Class { int Method(int a, int b) { return a - b; } int ConstMethod(int a, int b) const { return a - b; } int RefMethod(int a, int b) & { return a - b; } int RefRefMethod(int a, int b) && { return a - b; } int NoExceptMethod(int a, int b) noexcept { return a - b; } int VolatileMethod(int a, int b) volatile { return a - b; } int member; }; struct FlipFlop { int ConstMethod() const { return member; } FlipFlop operator*() const { return {-member}; } int member; }; // CallMaybeWithArg(f) resolves either to invoke(f) or invoke(f, 42), depending // on which one is valid. template decltype(base_internal::invoke(std::declval())) CallMaybeWithArg( const F& f) { return base_internal::invoke(f); } template decltype(base_internal::invoke(std::declval(), 42)) CallMaybeWithArg( const F& f) { return base_internal::invoke(f, 42); } TEST(InvokeTest, Function) { EXPECT_EQ(1, base_internal::invoke(Function, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Function, 3, 2)); } TEST(InvokeTest, NonCopyableArgument) { EXPECT_EQ(42, base_internal::invoke(Sink, make_unique(42))); } TEST(InvokeTest, NonCopyableResult) { EXPECT_THAT(base_internal::invoke(Factory, 42), ::testing::Pointee(42)); } TEST(InvokeTest, VoidResult) { base_internal::invoke(NoOp); } TEST(InvokeTest, ConstFunctor) { EXPECT_EQ(1, base_internal::invoke(ConstFunctor(), 3, 2)); } TEST(InvokeTest, MutableFunctor) { MutableFunctor f; EXPECT_EQ(1, base_internal::invoke(f, 3, 2)); EXPECT_EQ(1, base_internal::invoke(MutableFunctor(), 3, 2)); } TEST(InvokeTest, EphemeralFunctor) { EphemeralFunctor f; EXPECT_EQ(1, base_internal::invoke(std::move(f), 3, 2)); EXPECT_EQ(1, base_internal::invoke(EphemeralFunctor(), 3, 2)); } TEST(InvokeTest, OverloadedFunctor) { OverloadedFunctor f; const OverloadedFunctor& cf = f; EXPECT_EQ("&", base_internal::invoke(f)); EXPECT_EQ("& 42", base_internal::invoke(f, " 42")); EXPECT_EQ("const&", base_internal::invoke(cf)); EXPECT_EQ("const& 42", base_internal::invoke(cf, " 42")); EXPECT_EQ("&&", base_internal::invoke(std::move(f))); OverloadedFunctor f2; EXPECT_EQ("&& 42", base_internal::invoke(std::move(f2), " 42")); } TEST(InvokeTest, ReferenceWrapper) { ConstFunctor cf; MutableFunctor mf; EXPECT_EQ(1, base_internal::invoke(std::cref(cf), 3, 2)); EXPECT_EQ(1, base_internal::invoke(std::ref(cf), 3, 2)); EXPECT_EQ(1, base_internal::invoke(std::ref(mf), 3, 2)); } TEST(InvokeTest, MemberFunction) { std::unique_ptr p(new Class); std::unique_ptr cp(new Class); std::unique_ptr vp(new Class); EXPECT_EQ(1, base_internal::invoke(&Class::Method, p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::Method, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::Method, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::RefMethod, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::RefRefMethod, std::move(*p), 3, 2)); // NOLINT EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::NoExceptMethod, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, cp.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, *cp, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, p.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *p, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, vp.get(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::VolatileMethod, *vp, 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::Method, make_unique(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, make_unique(), 3, 2)); EXPECT_EQ(1, base_internal::invoke(&Class::ConstMethod, make_unique(), 3, 2)); } TEST(InvokeTest, DataMember) { std::unique_ptr p(new Class{42}); std::unique_ptr cp(new Class{42}); EXPECT_EQ(42, base_internal::invoke(&Class::member, p)); EXPECT_EQ(42, base_internal::invoke(&Class::member, *p)); EXPECT_EQ(42, base_internal::invoke(&Class::member, p.get())); base_internal::invoke(&Class::member, p) = 42; base_internal::invoke(&Class::member, p.get()) = 42; EXPECT_EQ(42, base_internal::invoke(&Class::member, cp)); EXPECT_EQ(42, base_internal::invoke(&Class::member, *cp)); EXPECT_EQ(42, base_internal::invoke(&Class::member, cp.get())); } TEST(InvokeTest, FlipFlop) { FlipFlop obj = {42}; // This call could resolve to (obj.*&FlipFlop::ConstMethod)() or // ((*obj).*&FlipFlop::ConstMethod)(). We verify that it's the former. EXPECT_EQ(42, base_internal::invoke(&FlipFlop::ConstMethod, obj)); EXPECT_EQ(42, base_internal::invoke(&FlipFlop::member, obj)); } TEST(InvokeTest, SfinaeFriendly) { CallMaybeWithArg(NoOp); EXPECT_THAT(CallMaybeWithArg(Factory), ::testing::Pointee(42)); } TEST(IsInvocableRTest, CallableExactMatch) { static_assert( base_internal::is_invocable_r::value, "Should be true for exact match of types on a free function"); } TEST(IsInvocableRTest, CallableArgumentConversionMatch) { static_assert( base_internal::is_invocable_r::value, "Should be true for convertible argument type"); } TEST(IsInvocableRTest, CallableReturnConversionMatch) { static_assert(base_internal::is_invocable_r::value, "Should be true for convertible return type"); } TEST(IsInvocableRTest, CallableReturnVoid) { static_assert(base_internal::is_invocable_r::value, "Should be true for void expected and actual return types"); static_assert( base_internal::is_invocable_r::value, "Should be true for void expected and non-void actual return types"); } TEST(IsInvocableRTest, CallableRefQualifierMismatch) { static_assert(!base_internal::is_invocable_r::value, "Should be false for reference constness mismatch"); static_assert(!base_internal::is_invocable_r::value, "Should be false for reference value category mismatch"); } TEST(IsInvocableRTest, CallableArgumentTypeMismatch) { static_assert(!base_internal::is_invocable_r::value, "Should be false for argument type mismatch"); } TEST(IsInvocableRTest, CallableReturnTypeMismatch) { static_assert(!base_internal::is_invocable_r::value, "Should be false for return type mismatch"); } TEST(IsInvocableRTest, CallableTooFewArgs) { static_assert( !base_internal::is_invocable_r::value, "Should be false for too few arguments"); } TEST(IsInvocableRTest, CallableTooManyArgs) { static_assert(!base_internal::is_invocable_r::value, "Should be false for too many arguments"); } TEST(IsInvocableRTest, MemberFunctionAndReference) { static_assert(base_internal::is_invocable_r::value, "Should be true for exact match of types on a member function " "and class reference"); } TEST(IsInvocableRTest, MemberFunctionAndPointer) { static_assert(base_internal::is_invocable_r::value, "Should be true for exact match of types on a member function " "and class pointer"); } TEST(IsInvocableRTest, DataMemberAndReference) { static_assert(base_internal::is_invocable_r::value, "Should be true for exact match of types on a data member and " "class reference"); } TEST(IsInvocableRTest, DataMemberAndPointer) { static_assert(base_internal::is_invocable_r::value, "Should be true for exact match of types on a data member and " "class pointer"); } TEST(IsInvocableRTest, CallableZeroArgs) { static_assert( base_internal::is_invocable_r::value, "Should be true for exact match for a zero-arg free function"); } } // namespace } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/log_severity.cc000066400000000000000000000034551430371345100202370ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/log_severity.h" #include #include "absl/base/attributes.h" namespace absl { ABSL_NAMESPACE_BEGIN std::ostream& operator<<(std::ostream& os, absl::LogSeverity s) { if (s == absl::NormalizeLogSeverity(s)) return os << absl::LogSeverityName(s); return os << "absl::LogSeverity(" << static_cast(s) << ")"; } std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s) { switch (s) { case absl::LogSeverityAtLeast::kInfo: case absl::LogSeverityAtLeast::kWarning: case absl::LogSeverityAtLeast::kError: case absl::LogSeverityAtLeast::kFatal: return os << ">=" << static_cast(s); case absl::LogSeverityAtLeast::kInfinity: return os << "INFINITY"; } return os; } std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s) { switch (s) { case absl::LogSeverityAtMost::kInfo: case absl::LogSeverityAtMost::kWarning: case absl::LogSeverityAtMost::kError: case absl::LogSeverityAtMost::kFatal: return os << "<=" << static_cast(s); case absl::LogSeverityAtMost::kNegativeInfinity: return os << "NEGATIVE_INFINITY"; } return os; } ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/log_severity.h000066400000000000000000000147131430371345100201000ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_BASE_LOG_SEVERITY_H_ #define ABSL_BASE_LOG_SEVERITY_H_ #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN // absl::LogSeverity // // Four severity levels are defined. Logging APIs should terminate the program // when a message is logged at severity `kFatal`; the other levels have no // special semantics. // // Values other than the four defined levels (e.g. produced by `static_cast`) // are valid, but their semantics when passed to a function, macro, or flag // depend on the function, macro, or flag. The usual behavior is to normalize // such values to a defined severity level, however in some cases values other // than the defined levels are useful for comparison. // // Example: // // // Effectively disables all logging: // SetMinLogLevel(static_cast(100)); // // Abseil flags may be defined with type `LogSeverity`. Dependency layering // constraints require that the `AbslParseFlag()` overload be declared and // defined in the flags library itself rather than here. The `AbslUnparseFlag()` // overload is defined there as well for consistency. // // absl::LogSeverity Flag String Representation // // An `absl::LogSeverity` has a string representation used for parsing // command-line flags based on the enumerator name (e.g. `kFatal`) or // its unprefixed name (without the `k`) in any case-insensitive form. (E.g. // "FATAL", "fatal" or "Fatal" are all valid.) Unparsing such flags produces an // unprefixed string representation in all caps (e.g. "FATAL") or an integer. // // Additionally, the parser accepts arbitrary integers (as if the type were // `int`). // // Examples: // // --my_log_level=kInfo // --my_log_level=INFO // --my_log_level=info // --my_log_level=0 // // Unparsing a flag produces the same result as `absl::LogSeverityName()` for // the standard levels and a base-ten integer otherwise. enum class LogSeverity : int { kInfo = 0, kWarning = 1, kError = 2, kFatal = 3, }; // LogSeverities() // // Returns an iterable of all standard `absl::LogSeverity` values, ordered from // least to most severe. constexpr std::array LogSeverities() { return {{absl::LogSeverity::kInfo, absl::LogSeverity::kWarning, absl::LogSeverity::kError, absl::LogSeverity::kFatal}}; } // LogSeverityName() // // Returns the all-caps string representation (e.g. "INFO") of the specified // severity level if it is one of the standard levels and "UNKNOWN" otherwise. constexpr const char* LogSeverityName(absl::LogSeverity s) { return s == absl::LogSeverity::kInfo ? "INFO" : s == absl::LogSeverity::kWarning ? "WARNING" : s == absl::LogSeverity::kError ? "ERROR" : s == absl::LogSeverity::kFatal ? "FATAL" : "UNKNOWN"; } // NormalizeLogSeverity() // // Values less than `kInfo` normalize to `kInfo`; values greater than `kFatal` // normalize to `kError` (**NOT** `kFatal`). constexpr absl::LogSeverity NormalizeLogSeverity(absl::LogSeverity s) { return s < absl::LogSeverity::kInfo ? absl::LogSeverity::kInfo : s > absl::LogSeverity::kFatal ? absl::LogSeverity::kError : s; } constexpr absl::LogSeverity NormalizeLogSeverity(int s) { return absl::NormalizeLogSeverity(static_cast(s)); } // operator<< // // The exact representation of a streamed `absl::LogSeverity` is deliberately // unspecified; do not rely on it. std::ostream& operator<<(std::ostream& os, absl::LogSeverity s); // Enums representing a lower bound for LogSeverity. APIs that only operate on // messages of at least a certain level (for example, `SetMinLogLevel()`) use // this type to specify that level. absl::LogSeverityAtLeast::kInfinity is // a level above all threshold levels and therefore no log message will // ever meet this threshold. enum class LogSeverityAtLeast : int { kInfo = static_cast(absl::LogSeverity::kInfo), kWarning = static_cast(absl::LogSeverity::kWarning), kError = static_cast(absl::LogSeverity::kError), kFatal = static_cast(absl::LogSeverity::kFatal), kInfinity = 1000, }; std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtLeast s); // Enums representing an upper bound for LogSeverity. APIs that only operate on // messages of at most a certain level (for example, buffer all messages at or // below a certain level) use this type to specify that level. // absl::LogSeverityAtMost::kNegativeInfinity is a level below all threshold // levels and therefore will exclude all log messages. enum class LogSeverityAtMost : int { kNegativeInfinity = -1000, kInfo = static_cast(absl::LogSeverity::kInfo), kWarning = static_cast(absl::LogSeverity::kWarning), kError = static_cast(absl::LogSeverity::kError), kFatal = static_cast(absl::LogSeverity::kFatal), }; std::ostream& operator<<(std::ostream& os, absl::LogSeverityAtMost s); #define COMPOP(op1, op2, T) \ constexpr bool operator op1(absl::T lhs, absl::LogSeverity rhs) { \ return static_cast(lhs) op1 rhs; \ } \ constexpr bool operator op2(absl::LogSeverity lhs, absl::T rhs) { \ return lhs op2 static_cast(rhs); \ } // Comparisons between `LogSeverity` and `LogSeverityAtLeast`/ // `LogSeverityAtMost` are only supported in one direction. // Valid checks are: // LogSeverity >= LogSeverityAtLeast // LogSeverity < LogSeverityAtLeast // LogSeverity <= LogSeverityAtMost // LogSeverity > LogSeverityAtMost COMPOP(>, <, LogSeverityAtLeast) COMPOP(<=, >=, LogSeverityAtLeast) COMPOP(<, >, LogSeverityAtMost) COMPOP(>=, <=, LogSeverityAtMost) #undef COMPOP ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_LOG_SEVERITY_H_ abseil-20220623.1/absl/base/log_severity_test.cc000066400000000000000000000253221430371345100212730ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/log_severity.h" #include #include #include #include #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/flags/internal/flag.h" #include "absl/flags/marshalling.h" #include "absl/strings/str_cat.h" namespace { using ::testing::Eq; using ::testing::IsFalse; using ::testing::IsTrue; using ::testing::TestWithParam; using ::testing::Values; template std::string StreamHelper(T value) { std::ostringstream stream; stream << value; return stream.str(); } TEST(StreamTest, Works) { EXPECT_THAT(StreamHelper(static_cast(-100)), Eq("absl::LogSeverity(-100)")); EXPECT_THAT(StreamHelper(absl::LogSeverity::kInfo), Eq("INFO")); EXPECT_THAT(StreamHelper(absl::LogSeverity::kWarning), Eq("WARNING")); EXPECT_THAT(StreamHelper(absl::LogSeverity::kError), Eq("ERROR")); EXPECT_THAT(StreamHelper(absl::LogSeverity::kFatal), Eq("FATAL")); EXPECT_THAT(StreamHelper(static_cast(4)), Eq("absl::LogSeverity(4)")); } static_assert(absl::flags_internal::FlagUseValueAndInitBitStorage< absl::LogSeverity>::value, "Flags of type absl::LogSeverity ought to be lock-free."); using ParseFlagFromOutOfRangeIntegerTest = TestWithParam; INSTANTIATE_TEST_SUITE_P( Instantiation, ParseFlagFromOutOfRangeIntegerTest, Values(static_cast(std::numeric_limits::min()) - 1, static_cast(std::numeric_limits::max()) + 1)); TEST_P(ParseFlagFromOutOfRangeIntegerTest, ReturnsError) { const std::string to_parse = absl::StrCat(GetParam()); absl::LogSeverity value; std::string error; EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value; } using ParseFlagFromAlmostOutOfRangeIntegerTest = TestWithParam; INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromAlmostOutOfRangeIntegerTest, Values(std::numeric_limits::min(), std::numeric_limits::max())); TEST_P(ParseFlagFromAlmostOutOfRangeIntegerTest, YieldsExpectedValue) { const auto expected = static_cast(GetParam()); const std::string to_parse = absl::StrCat(GetParam()); absl::LogSeverity value; std::string error; ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; EXPECT_THAT(value, Eq(expected)); } using ParseFlagFromIntegerMatchingEnumeratorTest = TestWithParam>; INSTANTIATE_TEST_SUITE_P( Instantiation, ParseFlagFromIntegerMatchingEnumeratorTest, Values(std::make_tuple("0", absl::LogSeverity::kInfo), std::make_tuple(" 0", absl::LogSeverity::kInfo), std::make_tuple("-0", absl::LogSeverity::kInfo), std::make_tuple("+0", absl::LogSeverity::kInfo), std::make_tuple("00", absl::LogSeverity::kInfo), std::make_tuple("0 ", absl::LogSeverity::kInfo), std::make_tuple("0x0", absl::LogSeverity::kInfo), std::make_tuple("1", absl::LogSeverity::kWarning), std::make_tuple("+1", absl::LogSeverity::kWarning), std::make_tuple("2", absl::LogSeverity::kError), std::make_tuple("3", absl::LogSeverity::kFatal))); TEST_P(ParseFlagFromIntegerMatchingEnumeratorTest, YieldsExpectedValue) { const absl::string_view to_parse = std::get<0>(GetParam()); const absl::LogSeverity expected = std::get<1>(GetParam()); absl::LogSeverity value; std::string error; ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; EXPECT_THAT(value, Eq(expected)); } using ParseFlagFromOtherIntegerTest = TestWithParam>; INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromOtherIntegerTest, Values(std::make_tuple("-1", -1), std::make_tuple("4", 4), std::make_tuple("010", 10), std::make_tuple("0x10", 16))); TEST_P(ParseFlagFromOtherIntegerTest, YieldsExpectedValue) { const absl::string_view to_parse = std::get<0>(GetParam()); const auto expected = static_cast(std::get<1>(GetParam())); absl::LogSeverity value; std::string error; ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; EXPECT_THAT(value, Eq(expected)); } using ParseFlagFromEnumeratorTest = TestWithParam>; INSTANTIATE_TEST_SUITE_P( Instantiation, ParseFlagFromEnumeratorTest, Values(std::make_tuple("INFO", absl::LogSeverity::kInfo), std::make_tuple("info", absl::LogSeverity::kInfo), std::make_tuple("kInfo", absl::LogSeverity::kInfo), std::make_tuple("iNfO", absl::LogSeverity::kInfo), std::make_tuple("kInFo", absl::LogSeverity::kInfo), std::make_tuple("WARNING", absl::LogSeverity::kWarning), std::make_tuple("warning", absl::LogSeverity::kWarning), std::make_tuple("kWarning", absl::LogSeverity::kWarning), std::make_tuple("WaRnInG", absl::LogSeverity::kWarning), std::make_tuple("KwArNiNg", absl::LogSeverity::kWarning), std::make_tuple("ERROR", absl::LogSeverity::kError), std::make_tuple("error", absl::LogSeverity::kError), std::make_tuple("kError", absl::LogSeverity::kError), std::make_tuple("eRrOr", absl::LogSeverity::kError), std::make_tuple("kErRoR", absl::LogSeverity::kError), std::make_tuple("FATAL", absl::LogSeverity::kFatal), std::make_tuple("fatal", absl::LogSeverity::kFatal), std::make_tuple("kFatal", absl::LogSeverity::kFatal), std::make_tuple("FaTaL", absl::LogSeverity::kFatal), std::make_tuple("KfAtAl", absl::LogSeverity::kFatal))); TEST_P(ParseFlagFromEnumeratorTest, YieldsExpectedValue) { const absl::string_view to_parse = std::get<0>(GetParam()); const absl::LogSeverity expected = std::get<1>(GetParam()); absl::LogSeverity value; std::string error; ASSERT_THAT(absl::ParseFlag(to_parse, &value, &error), IsTrue()) << error; EXPECT_THAT(value, Eq(expected)); } using ParseFlagFromGarbageTest = TestWithParam; INSTANTIATE_TEST_SUITE_P(Instantiation, ParseFlagFromGarbageTest, Values("", "\0", " ", "garbage", "kkinfo", "I")); TEST_P(ParseFlagFromGarbageTest, ReturnsError) { const absl::string_view to_parse = GetParam(); absl::LogSeverity value; std::string error; EXPECT_THAT(absl::ParseFlag(to_parse, &value, &error), IsFalse()) << value; } using UnparseFlagToEnumeratorTest = TestWithParam>; INSTANTIATE_TEST_SUITE_P( Instantiation, UnparseFlagToEnumeratorTest, Values(std::make_tuple(absl::LogSeverity::kInfo, "INFO"), std::make_tuple(absl::LogSeverity::kWarning, "WARNING"), std::make_tuple(absl::LogSeverity::kError, "ERROR"), std::make_tuple(absl::LogSeverity::kFatal, "FATAL"))); TEST_P(UnparseFlagToEnumeratorTest, ReturnsExpectedValueAndRoundTrips) { const absl::LogSeverity to_unparse = std::get<0>(GetParam()); const absl::string_view expected = std::get<1>(GetParam()); const std::string stringified_value = absl::UnparseFlag(to_unparse); EXPECT_THAT(stringified_value, Eq(expected)); absl::LogSeverity reparsed_value; std::string error; EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error), IsTrue()); EXPECT_THAT(reparsed_value, Eq(to_unparse)); } using UnparseFlagToOtherIntegerTest = TestWithParam; INSTANTIATE_TEST_SUITE_P(Instantiation, UnparseFlagToOtherIntegerTest, Values(std::numeric_limits::min(), -1, 4, std::numeric_limits::max())); TEST_P(UnparseFlagToOtherIntegerTest, ReturnsExpectedValueAndRoundTrips) { const absl::LogSeverity to_unparse = static_cast(GetParam()); const std::string expected = absl::StrCat(GetParam()); const std::string stringified_value = absl::UnparseFlag(to_unparse); EXPECT_THAT(stringified_value, Eq(expected)); absl::LogSeverity reparsed_value; std::string error; EXPECT_THAT(absl::ParseFlag(stringified_value, &reparsed_value, &error), IsTrue()); EXPECT_THAT(reparsed_value, Eq(to_unparse)); } TEST(LogThresholdTest, LogSeverityAtLeastTest) { EXPECT_LT(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kFatal); EXPECT_GT(absl::LogSeverityAtLeast::kError, absl::LogSeverity::kInfo); EXPECT_LE(absl::LogSeverityAtLeast::kInfo, absl::LogSeverity::kError); EXPECT_GE(absl::LogSeverity::kError, absl::LogSeverityAtLeast::kInfo); } TEST(LogThresholdTest, LogSeverityAtMostTest) { EXPECT_GT(absl::LogSeverity::kError, absl::LogSeverityAtMost::kWarning); EXPECT_LT(absl::LogSeverityAtMost::kError, absl::LogSeverity::kFatal); EXPECT_GE(absl::LogSeverityAtMost::kFatal, absl::LogSeverity::kError); EXPECT_LE(absl::LogSeverity::kWarning, absl::LogSeverityAtMost::kError); } TEST(LogThresholdTest, Extremes) { EXPECT_LT(absl::LogSeverity::kFatal, absl::LogSeverityAtLeast::kInfinity); EXPECT_GT(absl::LogSeverity::kInfo, absl::LogSeverityAtMost::kNegativeInfinity); } TEST(LogThresholdTest, Output) { EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfo), Eq(">=INFO")); EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kWarning), Eq(">=WARNING")); EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kError), Eq(">=ERROR")); EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kFatal), Eq(">=FATAL")); EXPECT_THAT(StreamHelper(absl::LogSeverityAtLeast::kInfinity), Eq("INFINITY")); EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kInfo), Eq("<=INFO")); EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kWarning), Eq("<=WARNING")); EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kError), Eq("<=ERROR")); EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kFatal), Eq("<=FATAL")); EXPECT_THAT(StreamHelper(absl::LogSeverityAtMost::kNegativeInfinity), Eq("NEGATIVE_INFINITY")); } } // namespace abseil-20220623.1/absl/base/macros.h000066400000000000000000000132121430371345100166420ustar00rootroot00000000000000// // Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: macros.h // ----------------------------------------------------------------------------- // // This header file defines the set of language macros used within Abseil code. // For the set of macros used to determine supported compilers and platforms, // see absl/base/config.h instead. // // This code is compiled directly on many platforms, including client // platforms like Windows, Mac, and embedded systems. Before making // any changes here, make sure that you're not breaking any platforms. #ifndef ABSL_BASE_MACROS_H_ #define ABSL_BASE_MACROS_H_ #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/optimization.h" #include "absl/base/port.h" // ABSL_ARRAYSIZE() // // Returns the number of elements in an array as a compile-time constant, which // can be used in defining new arrays. If you use this macro on a pointer by // mistake, you will get a compile-time error. #define ABSL_ARRAYSIZE(array) \ (sizeof(::absl::macros_internal::ArraySizeHelper(array))) namespace absl { ABSL_NAMESPACE_BEGIN namespace macros_internal { // Note: this internal template function declaration is used by ABSL_ARRAYSIZE. // The function doesn't need a definition, as we only use its type. template auto ArraySizeHelper(const T (&array)[N]) -> char (&)[N]; } // namespace macros_internal ABSL_NAMESPACE_END } // namespace absl // ABSL_BAD_CALL_IF() // // Used on a function overload to trap bad calls: any call that matches the // overload will cause a compile-time error. This macro uses a clang-specific // "enable_if" attribute, as described at // https://clang.llvm.org/docs/AttributeReference.html#enable-if // // Overloads which use this macro should be bracketed by // `#ifdef ABSL_BAD_CALL_IF`. // // Example: // // int isdigit(int c); // #ifdef ABSL_BAD_CALL_IF // int isdigit(int c) // ABSL_BAD_CALL_IF(c <= -1 || c > 255, // "'c' must have the value of an unsigned char or EOF"); // #endif // ABSL_BAD_CALL_IF #if ABSL_HAVE_ATTRIBUTE(enable_if) #define ABSL_BAD_CALL_IF(expr, msg) \ __attribute__((enable_if(expr, "Bad call trap"), unavailable(msg))) #endif // ABSL_ASSERT() // // In C++11, `assert` can't be used portably within constexpr functions. // ABSL_ASSERT functions as a runtime assert but works in C++11 constexpr // functions. Example: // // constexpr double Divide(double a, double b) { // return ABSL_ASSERT(b != 0), a / b; // } // // This macro is inspired by // https://akrzemi1.wordpress.com/2017/05/18/asserts-in-constexpr-functions/ #if defined(NDEBUG) #define ABSL_ASSERT(expr) \ (false ? static_cast(expr) : static_cast(0)) #else #define ABSL_ASSERT(expr) \ (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ : [] { assert(false && #expr); }()) // NOLINT #endif // `ABSL_INTERNAL_HARDENING_ABORT()` controls how `ABSL_HARDENING_ASSERT()` // aborts the program in release mode (when NDEBUG is defined). The // implementation should abort the program as quickly as possible and ideally it // should not be possible to ignore the abort request. #if (ABSL_HAVE_BUILTIN(__builtin_trap) && \ ABSL_HAVE_BUILTIN(__builtin_unreachable)) || \ (defined(__GNUC__) && !defined(__clang__)) #define ABSL_INTERNAL_HARDENING_ABORT() \ do { \ __builtin_trap(); \ __builtin_unreachable(); \ } while (false) #else #define ABSL_INTERNAL_HARDENING_ABORT() abort() #endif // ABSL_HARDENING_ASSERT() // // `ABSL_HARDENING_ASSERT()` is like `ABSL_ASSERT()`, but used to implement // runtime assertions that should be enabled in hardened builds even when // `NDEBUG` is defined. // // When `NDEBUG` is not defined, `ABSL_HARDENING_ASSERT()` is identical to // `ABSL_ASSERT()`. // // See `ABSL_OPTION_HARDENED` in `absl/base/options.h` for more information on // hardened mode. #if ABSL_OPTION_HARDENED == 1 && defined(NDEBUG) #define ABSL_HARDENING_ASSERT(expr) \ (ABSL_PREDICT_TRUE((expr)) ? static_cast(0) \ : [] { ABSL_INTERNAL_HARDENING_ABORT(); }()) #else #define ABSL_HARDENING_ASSERT(expr) ABSL_ASSERT(expr) #endif #ifdef ABSL_HAVE_EXCEPTIONS #define ABSL_INTERNAL_TRY try #define ABSL_INTERNAL_CATCH_ANY catch (...) #define ABSL_INTERNAL_RETHROW do { throw; } while (false) #else // ABSL_HAVE_EXCEPTIONS #define ABSL_INTERNAL_TRY if (true) #define ABSL_INTERNAL_CATCH_ANY else if (false) #define ABSL_INTERNAL_RETHROW do {} while (false) #endif // ABSL_HAVE_EXCEPTIONS // `ABSL_INTERNAL_UNREACHABLE` is an unreachable statement. A program which // reaches one has undefined behavior, and the compiler may optimize // accordingly. #if defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) #define ABSL_INTERNAL_UNREACHABLE __builtin_unreachable() #elif defined(_MSC_VER) #define ABSL_INTERNAL_UNREACHABLE __assume(0) #else #define ABSL_INTERNAL_UNREACHABLE #endif #endif // ABSL_BASE_MACROS_H_ abseil-20220623.1/absl/base/optimization.h000066400000000000000000000223531430371345100201120ustar00rootroot00000000000000// // Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: optimization.h // ----------------------------------------------------------------------------- // // This header file defines portable macros for performance optimization. #ifndef ABSL_BASE_OPTIMIZATION_H_ #define ABSL_BASE_OPTIMIZATION_H_ #include #include "absl/base/config.h" // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION // // Instructs the compiler to avoid optimizing tail-call recursion. This macro is // useful when you wish to preserve the existing function order within a stack // trace for logging, debugging, or profiling purposes. // // Example: // // int f() { // int result = g(); // ABSL_BLOCK_TAIL_CALL_OPTIMIZATION(); // return result; // } #if defined(__pnacl__) #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } #elif defined(__clang__) // Clang will not tail call given inline volatile assembly. #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") #elif defined(__GNUC__) // GCC will not tail call given inline volatile assembly. #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __asm__ __volatile__("") #elif defined(_MSC_VER) #include // The __nop() intrinsic blocks the optimisation. #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() __nop() #else #define ABSL_BLOCK_TAIL_CALL_OPTIMIZATION() if (volatile int x = 0) { (void)x; } #endif // ABSL_CACHELINE_SIZE // // Explicitly defines the size of the L1 cache for purposes of alignment. // Setting the cacheline size allows you to specify that certain objects be // aligned on a cacheline boundary with `ABSL_CACHELINE_ALIGNED` declarations. // (See below.) // // NOTE: this macro should be replaced with the following C++17 features, when // those are generally available: // // * `std::hardware_constructive_interference_size` // * `std::hardware_destructive_interference_size` // // See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html // for more information. #if defined(__GNUC__) // Cache line alignment #if defined(__i386__) || defined(__x86_64__) #define ABSL_CACHELINE_SIZE 64 #elif defined(__powerpc64__) #define ABSL_CACHELINE_SIZE 128 #elif defined(__aarch64__) // We would need to read special register ctr_el0 to find out L1 dcache size. // This value is a good estimate based on a real aarch64 machine. #define ABSL_CACHELINE_SIZE 64 #elif defined(__arm__) // Cache line sizes for ARM: These values are not strictly correct since // cache line sizes depend on implementations, not architectures. There // are even implementations with cache line sizes configurable at boot // time. #if defined(__ARM_ARCH_5T__) #define ABSL_CACHELINE_SIZE 32 #elif defined(__ARM_ARCH_7A__) #define ABSL_CACHELINE_SIZE 64 #endif #endif #ifndef ABSL_CACHELINE_SIZE // A reasonable default guess. Note that overestimates tend to waste more // space, while underestimates tend to waste more time. #define ABSL_CACHELINE_SIZE 64 #endif // ABSL_CACHELINE_ALIGNED // // Indicates that the declared object be cache aligned using // `ABSL_CACHELINE_SIZE` (see above). Cacheline aligning objects allows you to // load a set of related objects in the L1 cache for performance improvements. // Cacheline aligning objects properly allows constructive memory sharing and // prevents destructive (or "false") memory sharing. // // NOTE: callers should replace uses of this macro with `alignas()` using // `std::hardware_constructive_interference_size` and/or // `std::hardware_destructive_interference_size` when C++17 becomes available to // them. // // See http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0154r1.html // for more information. // // On some compilers, `ABSL_CACHELINE_ALIGNED` expands to an `__attribute__` // or `__declspec` attribute. For compilers where this is not known to work, // the macro expands to nothing. // // No further guarantees are made here. The result of applying the macro // to variables and types is always implementation-defined. // // WARNING: It is easy to use this attribute incorrectly, even to the point // of causing bugs that are difficult to diagnose, crash, etc. It does not // of itself guarantee that objects are aligned to a cache line. // // NOTE: Some compilers are picky about the locations of annotations such as // this attribute, so prefer to put it at the beginning of your declaration. // For example, // // ABSL_CACHELINE_ALIGNED static Foo* foo = ... // // class ABSL_CACHELINE_ALIGNED Bar { ... // // Recommendations: // // 1) Consult compiler documentation; this comment is not kept in sync as // toolchains evolve. // 2) Verify your use has the intended effect. This often requires inspecting // the generated machine code. // 3) Prefer applying this attribute to individual variables. Avoid // applying it to types. This tends to localize the effect. #define ABSL_CACHELINE_ALIGNED __attribute__((aligned(ABSL_CACHELINE_SIZE))) #elif defined(_MSC_VER) #define ABSL_CACHELINE_SIZE 64 #define ABSL_CACHELINE_ALIGNED __declspec(align(ABSL_CACHELINE_SIZE)) #else #define ABSL_CACHELINE_SIZE 64 #define ABSL_CACHELINE_ALIGNED #endif // ABSL_PREDICT_TRUE, ABSL_PREDICT_FALSE // // Enables the compiler to prioritize compilation using static analysis for // likely paths within a boolean branch. // // Example: // // if (ABSL_PREDICT_TRUE(expression)) { // return result; // Faster if more likely // } else { // return 0; // } // // Compilers can use the information that a certain branch is not likely to be // taken (for instance, a CHECK failure) to optimize for the common case in // the absence of better information (ie. compiling gcc with `-fprofile-arcs`). // // Recommendation: Modern CPUs dynamically predict branch execution paths, // typically with accuracy greater than 97%. As a result, annotating every // branch in a codebase is likely counterproductive; however, annotating // specific branches that are both hot and consistently mispredicted is likely // to yield performance improvements. #if ABSL_HAVE_BUILTIN(__builtin_expect) || \ (defined(__GNUC__) && !defined(__clang__)) #define ABSL_PREDICT_FALSE(x) (__builtin_expect(false || (x), false)) #define ABSL_PREDICT_TRUE(x) (__builtin_expect(false || (x), true)) #else #define ABSL_PREDICT_FALSE(x) (x) #define ABSL_PREDICT_TRUE(x) (x) #endif // ABSL_ASSUME(cond) // // Informs the compiler that a condition is always true and that it can assume // it to be true for optimization purposes. // // WARNING: If the condition is false, the program can produce undefined and // potentially dangerous behavior. // // In !NDEBUG mode, the condition is checked with an assert(). // // NOTE: The expression must not have side effects, as it may only be evaluated // in some compilation modes and not others. Some compilers may issue a warning // if the compiler cannot prove the expression has no side effects. For example, // the expression should not use a function call since the compiler cannot prove // that a function call does not have side effects. // // Example: // // int x = ...; // ABSL_ASSUME(x >= 0); // // The compiler can optimize the division to a simple right shift using the // // assumption specified above. // int y = x / 16; // #if !defined(NDEBUG) #define ABSL_ASSUME(cond) assert(cond) #elif ABSL_HAVE_BUILTIN(__builtin_assume) #define ABSL_ASSUME(cond) __builtin_assume(cond) #elif defined(__GNUC__) || ABSL_HAVE_BUILTIN(__builtin_unreachable) #define ABSL_ASSUME(cond) \ do { \ if (!(cond)) __builtin_unreachable(); \ } while (0) #elif defined(_MSC_VER) #define ABSL_ASSUME(cond) __assume(cond) #else #define ABSL_ASSUME(cond) \ do { \ static_cast(false && (cond)); \ } while (0) #endif // ABSL_INTERNAL_UNIQUE_SMALL_NAME(cond) // This macro forces small unique name on a static file level symbols like // static local variables or static functions. This is intended to be used in // macro definitions to optimize the cost of generated code. Do NOT use it on // symbols exported from translation unit since it may cause a link time // conflict. // // Example: // // #define MY_MACRO(txt) // namespace { // char VeryVeryLongVarName[] ABSL_INTERNAL_UNIQUE_SMALL_NAME() = txt; // const char* VeryVeryLongFuncName() ABSL_INTERNAL_UNIQUE_SMALL_NAME(); // const char* VeryVeryLongFuncName() { return txt; } // } // #if defined(__GNUC__) #define ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #x #define ABSL_INTERNAL_UNIQUE_SMALL_NAME1(x) ABSL_INTERNAL_UNIQUE_SMALL_NAME2(x) #define ABSL_INTERNAL_UNIQUE_SMALL_NAME() \ asm(ABSL_INTERNAL_UNIQUE_SMALL_NAME1(.absl.__COUNTER__)) #else #define ABSL_INTERNAL_UNIQUE_SMALL_NAME() #endif #endif // ABSL_BASE_OPTIMIZATION_H_ abseil-20220623.1/absl/base/optimization_test.cc000066400000000000000000000101331430371345100213000ustar00rootroot00000000000000// Copyright 2020 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/optimization.h" #include "gtest/gtest.h" #include "absl/types/optional.h" namespace { // Tests for the ABSL_PREDICT_TRUE and ABSL_PREDICT_FALSE macros. // The tests only verify that the macros are functionally correct - i.e. code // behaves as if they weren't used. They don't try to check their impact on // optimization. TEST(PredictTest, PredictTrue) { EXPECT_TRUE(ABSL_PREDICT_TRUE(true)); EXPECT_FALSE(ABSL_PREDICT_TRUE(false)); EXPECT_TRUE(ABSL_PREDICT_TRUE(1 == 1)); EXPECT_FALSE(ABSL_PREDICT_TRUE(1 == 2)); if (ABSL_PREDICT_TRUE(false)) ADD_FAILURE(); if (!ABSL_PREDICT_TRUE(true)) ADD_FAILURE(); EXPECT_TRUE(ABSL_PREDICT_TRUE(true) && true); EXPECT_TRUE(ABSL_PREDICT_TRUE(true) || false); } TEST(PredictTest, PredictFalse) { EXPECT_TRUE(ABSL_PREDICT_FALSE(true)); EXPECT_FALSE(ABSL_PREDICT_FALSE(false)); EXPECT_TRUE(ABSL_PREDICT_FALSE(1 == 1)); EXPECT_FALSE(ABSL_PREDICT_FALSE(1 == 2)); if (ABSL_PREDICT_FALSE(false)) ADD_FAILURE(); if (!ABSL_PREDICT_FALSE(true)) ADD_FAILURE(); EXPECT_TRUE(ABSL_PREDICT_FALSE(true) && true); EXPECT_TRUE(ABSL_PREDICT_FALSE(true) || false); } TEST(PredictTest, OneEvaluation) { // Verify that the expression is only evaluated once. int x = 0; if (ABSL_PREDICT_TRUE((++x) == 0)) ADD_FAILURE(); EXPECT_EQ(x, 1); if (ABSL_PREDICT_FALSE((++x) == 0)) ADD_FAILURE(); EXPECT_EQ(x, 2); } TEST(PredictTest, OperatorOrder) { // Verify that operator order inside and outside the macro behaves well. // These would fail for a naive '#define ABSL_PREDICT_TRUE(x) x' EXPECT_TRUE(ABSL_PREDICT_TRUE(1 && 2) == true); EXPECT_TRUE(ABSL_PREDICT_FALSE(1 && 2) == true); EXPECT_TRUE(!ABSL_PREDICT_TRUE(1 == 2)); EXPECT_TRUE(!ABSL_PREDICT_FALSE(1 == 2)); } TEST(PredictTest, Pointer) { const int x = 3; const int *good_intptr = &x; const int *null_intptr = nullptr; EXPECT_TRUE(ABSL_PREDICT_TRUE(good_intptr)); EXPECT_FALSE(ABSL_PREDICT_TRUE(null_intptr)); EXPECT_TRUE(ABSL_PREDICT_FALSE(good_intptr)); EXPECT_FALSE(ABSL_PREDICT_FALSE(null_intptr)); } TEST(PredictTest, Optional) { // Note: An optional's truth value is the value's existence, not its truth. absl::optional has_value(false); absl::optional no_value; EXPECT_TRUE(ABSL_PREDICT_TRUE(has_value)); EXPECT_FALSE(ABSL_PREDICT_TRUE(no_value)); EXPECT_TRUE(ABSL_PREDICT_FALSE(has_value)); EXPECT_FALSE(ABSL_PREDICT_FALSE(no_value)); } class ImplictlyConvertibleToBool { public: explicit ImplictlyConvertibleToBool(bool value) : value_(value) {} operator bool() const { // NOLINT(google-explicit-constructor) return value_; } private: bool value_; }; TEST(PredictTest, ImplicitBoolConversion) { const ImplictlyConvertibleToBool is_true(true); const ImplictlyConvertibleToBool is_false(false); if (!ABSL_PREDICT_TRUE(is_true)) ADD_FAILURE(); if (ABSL_PREDICT_TRUE(is_false)) ADD_FAILURE(); if (!ABSL_PREDICT_FALSE(is_true)) ADD_FAILURE(); if (ABSL_PREDICT_FALSE(is_false)) ADD_FAILURE(); } class ExplictlyConvertibleToBool { public: explicit ExplictlyConvertibleToBool(bool value) : value_(value) {} explicit operator bool() const { return value_; } private: bool value_; }; TEST(PredictTest, ExplicitBoolConversion) { const ExplictlyConvertibleToBool is_true(true); const ExplictlyConvertibleToBool is_false(false); if (!ABSL_PREDICT_TRUE(is_true)) ADD_FAILURE(); if (ABSL_PREDICT_TRUE(is_false)) ADD_FAILURE(); if (!ABSL_PREDICT_FALSE(is_true)) ADD_FAILURE(); if (ABSL_PREDICT_FALSE(is_false)) ADD_FAILURE(); } } // namespace abseil-20220623.1/absl/base/options.h000066400000000000000000000264441430371345100170640ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: options.h // ----------------------------------------------------------------------------- // // This file contains Abseil configuration options for setting specific // implementations instead of letting Abseil determine which implementation to // use at compile-time. Setting these options may be useful for package or build // managers who wish to guarantee ABI stability within binary builds (which are // otherwise difficult to enforce). // // *** IMPORTANT NOTICE FOR PACKAGE MANAGERS: It is important that // maintainers of package managers who wish to package Abseil read and // understand this file! *** // // Abseil contains a number of possible configuration endpoints, based on // parameters such as the detected platform, language version, or command-line // flags used to invoke the underlying binary. As is the case with all // libraries, binaries which contain Abseil code must ensure that separate // packages use the same compiled copy of Abseil to avoid a diamond dependency // problem, which can occur if two packages built with different Abseil // configuration settings are linked together. Diamond dependency problems in // C++ may manifest as violations to the One Definition Rule (ODR) (resulting in // linker errors), or undefined behavior (resulting in crashes). // // Diamond dependency problems can be avoided if all packages utilize the same // exact version of Abseil. Building from source code with the same compilation // parameters is the easiest way to avoid such dependency problems. However, for // package managers who cannot control such compilation parameters, we are // providing the file to allow you to inject ABI (Application Binary Interface) // stability across builds. Settings options in this file will neither change // API nor ABI, providing a stable copy of Abseil between packages. // // Care must be taken to keep options within these configurations isolated // from any other dynamic settings, such as command-line flags which could alter // these options. This file is provided specifically to help build and package // managers provide a stable copy of Abseil within their libraries and binaries; // other developers should not have need to alter the contents of this file. // // ----------------------------------------------------------------------------- // Usage // ----------------------------------------------------------------------------- // // For any particular package release, set the appropriate definitions within // this file to whatever value makes the most sense for your package(s). Note // that, by default, most of these options, at the moment, affect the // implementation of types; future options may affect other implementation // details. // // NOTE: the defaults within this file all assume that Abseil can select the // proper Abseil implementation at compile-time, which will not be sufficient // to guarantee ABI stability to package managers. #ifndef ABSL_BASE_OPTIONS_H_ #define ABSL_BASE_OPTIONS_H_ // Include a standard library header to allow configuration based on the // standard library in use. #ifdef __cplusplus #include #endif // ----------------------------------------------------------------------------- // Type Compatibility Options // ----------------------------------------------------------------------------- // // ABSL_OPTION_USE_STD_ANY // // This option controls whether absl::any is implemented as an alias to // std::any, or as an independent implementation. // // A value of 0 means to use Abseil's implementation. This requires only C++11 // support, and is expected to work on every toolchain we support. // // A value of 1 means to use an alias to std::any. This requires that all code // using Abseil is built in C++17 mode or later. // // A value of 2 means to detect the C++ version being used to compile Abseil, // and use an alias only if a working std::any is available. This option is // useful when you are building your entire program, including all of its // dependencies, from source. It should not be used otherwise -- for example, // if you are distributing Abseil in a binary package manager -- since in // mode 2, absl::any will name a different type, with a different mangled name // and binary layout, depending on the compiler flags passed by the end user. // For more info, see https://abseil.io/about/design/dropin-types. // // User code should not inspect this macro. To check in the preprocessor if // absl::any is a typedef of std::any, use the feature macro ABSL_USES_STD_ANY. #define ABSL_OPTION_USE_STD_ANY 2 // ABSL_OPTION_USE_STD_OPTIONAL // // This option controls whether absl::optional is implemented as an alias to // std::optional, or as an independent implementation. // // A value of 0 means to use Abseil's implementation. This requires only C++11 // support, and is expected to work on every toolchain we support. // // A value of 1 means to use an alias to std::optional. This requires that all // code using Abseil is built in C++17 mode or later. // // A value of 2 means to detect the C++ version being used to compile Abseil, // and use an alias only if a working std::optional is available. This option // is useful when you are building your program from source. It should not be // used otherwise -- for example, if you are distributing Abseil in a binary // package manager -- since in mode 2, absl::optional will name a different // type, with a different mangled name and binary layout, depending on the // compiler flags passed by the end user. For more info, see // https://abseil.io/about/design/dropin-types. // User code should not inspect this macro. To check in the preprocessor if // absl::optional is a typedef of std::optional, use the feature macro // ABSL_USES_STD_OPTIONAL. #define ABSL_OPTION_USE_STD_OPTIONAL 2 // ABSL_OPTION_USE_STD_STRING_VIEW // // This option controls whether absl::string_view is implemented as an alias to // std::string_view, or as an independent implementation. // // A value of 0 means to use Abseil's implementation. This requires only C++11 // support, and is expected to work on every toolchain we support. // // A value of 1 means to use an alias to std::string_view. This requires that // all code using Abseil is built in C++17 mode or later. // // A value of 2 means to detect the C++ version being used to compile Abseil, // and use an alias only if a working std::string_view is available. This // option is useful when you are building your program from source. It should // not be used otherwise -- for example, if you are distributing Abseil in a // binary package manager -- since in mode 2, absl::string_view will name a // different type, with a different mangled name and binary layout, depending on // the compiler flags passed by the end user. For more info, see // https://abseil.io/about/design/dropin-types. // // User code should not inspect this macro. To check in the preprocessor if // absl::string_view is a typedef of std::string_view, use the feature macro // ABSL_USES_STD_STRING_VIEW. #define ABSL_OPTION_USE_STD_STRING_VIEW 2 // ABSL_OPTION_USE_STD_VARIANT // // This option controls whether absl::variant is implemented as an alias to // std::variant, or as an independent implementation. // // A value of 0 means to use Abseil's implementation. This requires only C++11 // support, and is expected to work on every toolchain we support. // // A value of 1 means to use an alias to std::variant. This requires that all // code using Abseil is built in C++17 mode or later. // // A value of 2 means to detect the C++ version being used to compile Abseil, // and use an alias only if a working std::variant is available. This option // is useful when you are building your program from source. It should not be // used otherwise -- for example, if you are distributing Abseil in a binary // package manager -- since in mode 2, absl::variant will name a different // type, with a different mangled name and binary layout, depending on the // compiler flags passed by the end user. For more info, see // https://abseil.io/about/design/dropin-types. // // User code should not inspect this macro. To check in the preprocessor if // absl::variant is a typedef of std::variant, use the feature macro // ABSL_USES_STD_VARIANT. #define ABSL_OPTION_USE_STD_VARIANT 2 // ABSL_OPTION_USE_INLINE_NAMESPACE // ABSL_OPTION_INLINE_NAMESPACE_NAME // // These options controls whether all entities in the absl namespace are // contained within an inner inline namespace. This does not affect the // user-visible API of Abseil, but it changes the mangled names of all symbols. // // This can be useful as a version tag if you are distributing Abseil in // precompiled form. This will prevent a binary library build of Abseil with // one inline namespace being used with headers configured with a different // inline namespace name. Binary packagers are reminded that Abseil does not // guarantee any ABI stability in Abseil, so any update of Abseil or // configuration change in such a binary package should be combined with a // new, unique value for the inline namespace name. // // A value of 0 means not to use inline namespaces. // // A value of 1 means to use an inline namespace with the given name inside // namespace absl. If this is set, ABSL_OPTION_INLINE_NAMESPACE_NAME must also // be changed to a new, unique identifier name. In particular "head" is not // allowed. #define ABSL_OPTION_USE_INLINE_NAMESPACE 1 #define ABSL_OPTION_INLINE_NAMESPACE_NAME lts_20220623 // ABSL_OPTION_HARDENED // // This option enables a "hardened" build in release mode (in this context, // release mode is defined as a build where the `NDEBUG` macro is defined). // // A value of 0 means that "hardened" mode is not enabled. // // A value of 1 means that "hardened" mode is enabled. // // Hardened builds have additional security checks enabled when `NDEBUG` is // defined. Defining `NDEBUG` is normally used to turn `assert()` macro into a // no-op, as well as disabling other bespoke program consistency checks. By // defining ABSL_OPTION_HARDENED to 1, a select set of checks remain enabled in // release mode. These checks guard against programming errors that may lead to // security vulnerabilities. In release mode, when one of these programming // errors is encountered, the program will immediately abort, possibly without // any attempt at logging. // // The checks enabled by this option are not free; they do incur runtime cost. // // The checks enabled by this option are always active when `NDEBUG` is not // defined, even in the case when ABSL_OPTION_HARDENED is defined to 0. The // checks enabled by this option may abort the program in a different way and // log additional information when `NDEBUG` is not defined. #define ABSL_OPTION_HARDENED 0 #endif // ABSL_BASE_OPTIONS_H_ abseil-20220623.1/absl/base/policy_checks.h000066400000000000000000000103421430371345100201760ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: policy_checks.h // ----------------------------------------------------------------------------- // // This header enforces a minimum set of policies at build time, such as the // supported compiler and library versions. Unsupported configurations are // reported with `#error`. This enforcement is best effort, so successfully // compiling this header does not guarantee a supported configuration. #ifndef ABSL_BASE_POLICY_CHECKS_H_ #define ABSL_BASE_POLICY_CHECKS_H_ // Included for the __GLIBC_PREREQ macro used below. #include // Included for the _STLPORT_VERSION macro used below. #if defined(__cplusplus) #include #endif // ----------------------------------------------------------------------------- // Operating System Check // ----------------------------------------------------------------------------- #if defined(__CYGWIN__) #error "Cygwin is not supported." #endif // ----------------------------------------------------------------------------- // Toolchain Check // ----------------------------------------------------------------------------- // We support MSVC++ 14.0 update 2 and later. // This minimum will go up. #if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023918 && !defined(__clang__) #error "This package requires Visual Studio 2015 Update 2 or higher." #endif // We support gcc 4.7 and later. // This minimum will go up. #if defined(__GNUC__) && !defined(__clang__) #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 7) #error "This package requires gcc 4.7 or higher." #endif #endif // We support Apple Xcode clang 4.2.1 (version 421.11.65) and later. // This corresponds to Apple Xcode version 4.5. // This minimum will go up. #if defined(__apple_build_version__) && __apple_build_version__ < 4211165 #error "This package requires __apple_build_version__ of 4211165 or higher." #endif // ----------------------------------------------------------------------------- // C++ Version Check // ----------------------------------------------------------------------------- // Enforce C++11 as the minimum. Note that Visual Studio has not // advanced __cplusplus despite being good enough for our purposes, so // so we exempt it from the check. #if defined(__cplusplus) && !defined(_MSC_VER) #if __cplusplus < 201103L #error "C++ versions less than C++11 are not supported." #endif #endif // ----------------------------------------------------------------------------- // Standard Library Check // ----------------------------------------------------------------------------- #if defined(_STLPORT_VERSION) #error "STLPort is not supported." #endif // ----------------------------------------------------------------------------- // `char` Size Check // ----------------------------------------------------------------------------- // Abseil currently assumes CHAR_BIT == 8. If you would like to use Abseil on a // platform where this is not the case, please provide us with the details about // your platform so we can consider relaxing this requirement. #if CHAR_BIT != 8 #error "Abseil assumes CHAR_BIT == 8." #endif // ----------------------------------------------------------------------------- // `int` Size Check // ----------------------------------------------------------------------------- // Abseil currently assumes that an int is 4 bytes. If you would like to use // Abseil on a platform where this is not the case, please provide us with the // details about your platform so we can consider relaxing this requirement. #if INT_MAX < 2147483647 #error "Abseil assumes that int is at least 4 bytes. " #endif #endif // ABSL_BASE_POLICY_CHECKS_H_ abseil-20220623.1/absl/base/port.h000066400000000000000000000016001430371345100163400ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // This files is a forwarding header for other headers containing various // portability macros and functions. #ifndef ABSL_BASE_PORT_H_ #define ABSL_BASE_PORT_H_ #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/optimization.h" #endif // ABSL_BASE_PORT_H_ abseil-20220623.1/absl/base/raw_logging_test.cc000066400000000000000000000052111430371345100210520ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // This test serves primarily as a compilation test for base/raw_logging.h. // Raw logging testing is covered by logging_unittest.cc, which is not as // portable as this test. #include "absl/base/internal/raw_logging.h" #include #include "gtest/gtest.h" #include "absl/strings/str_cat.h" namespace { TEST(RawLoggingCompilationTest, Log) { ABSL_RAW_LOG(INFO, "RAW INFO: %d", 1); ABSL_RAW_LOG(INFO, "RAW INFO: %d %d", 1, 2); ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d", 1, 2, 3); ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d", 1, 2, 3, 4); ABSL_RAW_LOG(INFO, "RAW INFO: %d %d %d %d %d", 1, 2, 3, 4, 5); ABSL_RAW_LOG(WARNING, "RAW WARNING: %d", 1); ABSL_RAW_LOG(ERROR, "RAW ERROR: %d", 1); } TEST(RawLoggingCompilationTest, PassingCheck) { ABSL_RAW_CHECK(true, "RAW CHECK"); } // Not all platforms support output from raw log, so we don't verify any // particular output for RAW check failures (expecting the empty string // accomplishes this). This test is primarily a compilation test, but we // are verifying process death when EXPECT_DEATH works for a platform. const char kExpectedDeathOutput[] = ""; TEST(RawLoggingDeathTest, FailingCheck) { EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_CHECK(1 == 0, "explanation"), kExpectedDeathOutput); } TEST(RawLoggingDeathTest, LogFatal) { EXPECT_DEATH_IF_SUPPORTED(ABSL_RAW_LOG(FATAL, "my dog has fleas"), kExpectedDeathOutput); } TEST(InternalLog, CompilationTest) { ABSL_INTERNAL_LOG(INFO, "Internal Log"); std::string log_msg = "Internal Log"; ABSL_INTERNAL_LOG(INFO, log_msg); ABSL_INTERNAL_LOG(INFO, log_msg + " 2"); float d = 1.1f; ABSL_INTERNAL_LOG(INFO, absl::StrCat("Internal log ", 3, " + ", d)); } TEST(InternalLogDeathTest, FailingCheck) { EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_CHECK(1 == 0, "explanation"), kExpectedDeathOutput); } TEST(InternalLogDeathTest, LogFatal) { EXPECT_DEATH_IF_SUPPORTED(ABSL_INTERNAL_LOG(FATAL, "my dog has fleas"), kExpectedDeathOutput); } } // namespace abseil-20220623.1/absl/base/spinlock_test_common.cc000066400000000000000000000230001430371345100217410ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // A bunch of threads repeatedly hash an array of ints protected by a // spinlock. If the spinlock is working properly, all elements of the // array should be equal at the end of the test. #include #include #include #include // NOLINT(build/c++11) #include #include #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/low_level_scheduling.h" #include "absl/base/internal/scheduling_mode.h" #include "absl/base/internal/spinlock.h" #include "absl/base/internal/sysinfo.h" #include "absl/base/macros.h" #include "absl/synchronization/blocking_counter.h" #include "absl/synchronization/notification.h" constexpr int32_t kNumThreads = 10; constexpr int32_t kIters = 1000; namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // This is defined outside of anonymous namespace so that it can be // a friend of SpinLock to access protected methods for testing. struct SpinLockTest { static uint32_t EncodeWaitCycles(int64_t wait_start_time, int64_t wait_end_time) { return SpinLock::EncodeWaitCycles(wait_start_time, wait_end_time); } static uint64_t DecodeWaitCycles(uint32_t lock_value) { return SpinLock::DecodeWaitCycles(lock_value); } }; namespace { static constexpr int kArrayLength = 10; static uint32_t values[kArrayLength]; ABSL_CONST_INIT static SpinLock static_cooperative_spinlock( absl::kConstInit, base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); ABSL_CONST_INIT static SpinLock static_noncooperative_spinlock( absl::kConstInit, base_internal::SCHEDULE_KERNEL_ONLY); // Simple integer hash function based on the public domain lookup2 hash. // http://burtleburtle.net/bob/c/lookup2.c static uint32_t Hash32(uint32_t a, uint32_t c) { uint32_t b = 0x9e3779b9UL; // The golden ratio; an arbitrary value. a -= b; a -= c; a ^= (c >> 13); b -= c; b -= a; b ^= (a << 8); c -= a; c -= b; c ^= (b >> 13); a -= b; a -= c; a ^= (c >> 12); b -= c; b -= a; b ^= (a << 16); c -= a; c -= b; c ^= (b >> 5); a -= b; a -= c; a ^= (c >> 3); b -= c; b -= a; b ^= (a << 10); c -= a; c -= b; c ^= (b >> 15); return c; } static void TestFunction(int thread_salt, SpinLock* spinlock) { for (int i = 0; i < kIters; i++) { SpinLockHolder h(spinlock); for (int j = 0; j < kArrayLength; j++) { const int index = (j + thread_salt) % kArrayLength; values[index] = Hash32(values[index], thread_salt); std::this_thread::yield(); } } } static void ThreadedTest(SpinLock* spinlock) { std::vector threads; threads.reserve(kNumThreads); for (int i = 0; i < kNumThreads; ++i) { threads.push_back(std::thread(TestFunction, i, spinlock)); } for (auto& thread : threads) { thread.join(); } SpinLockHolder h(spinlock); for (int i = 1; i < kArrayLength; i++) { EXPECT_EQ(values[0], values[i]); } } #ifndef ABSL_HAVE_THREAD_SANITIZER static_assert(std::is_trivially_destructible(), ""); #endif TEST(SpinLock, StackNonCooperativeDisablesScheduling) { SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); spinlock.Lock(); EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); spinlock.Unlock(); } TEST(SpinLock, StaticNonCooperativeDisablesScheduling) { static_noncooperative_spinlock.Lock(); EXPECT_FALSE(base_internal::SchedulingGuard::ReschedulingIsAllowed()); static_noncooperative_spinlock.Unlock(); } TEST(SpinLock, WaitCyclesEncoding) { // These are implementation details not exported by SpinLock. const int kProfileTimestampShift = 7; const int kLockwordReservedShift = 3; const uint32_t kSpinLockSleeper = 8; // We should be able to encode up to (1^kMaxCycleBits - 1) without clamping // but the lower kProfileTimestampShift will be dropped. const int kMaxCyclesShift = 32 - kLockwordReservedShift + kProfileTimestampShift; const uint64_t kMaxCycles = (int64_t{1} << kMaxCyclesShift) - 1; // These bits should be zero after encoding. const uint32_t kLockwordReservedMask = (1 << kLockwordReservedShift) - 1; // These bits are dropped when wait cycles are encoded. const uint64_t kProfileTimestampMask = (1 << kProfileTimestampShift) - 1; // Test a bunch of random values std::default_random_engine generator; // Shift to avoid overflow below. std::uniform_int_distribution time_distribution( 0, std::numeric_limits::max() >> 4); std::uniform_int_distribution cycle_distribution(0, kMaxCycles); for (int i = 0; i < 100; i++) { int64_t start_time = time_distribution(generator); int64_t cycles = cycle_distribution(generator); int64_t end_time = start_time + cycles; uint32_t lock_value = SpinLockTest::EncodeWaitCycles(start_time, end_time); EXPECT_EQ(0, lock_value & kLockwordReservedMask); uint64_t decoded = SpinLockTest::DecodeWaitCycles(lock_value); EXPECT_EQ(0, decoded & kProfileTimestampMask); EXPECT_EQ(cycles & ~kProfileTimestampMask, decoded); } // Test corner cases int64_t start_time = time_distribution(generator); EXPECT_EQ(kSpinLockSleeper, SpinLockTest::EncodeWaitCycles(start_time, start_time)); EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(0)); EXPECT_EQ(0, SpinLockTest::DecodeWaitCycles(kLockwordReservedMask)); EXPECT_EQ(kMaxCycles & ~kProfileTimestampMask, SpinLockTest::DecodeWaitCycles(~kLockwordReservedMask)); // Check that we cannot produce kSpinLockSleeper during encoding. int64_t sleeper_cycles = kSpinLockSleeper << (kProfileTimestampShift - kLockwordReservedShift); uint32_t sleeper_value = SpinLockTest::EncodeWaitCycles(start_time, start_time + sleeper_cycles); EXPECT_NE(sleeper_value, kSpinLockSleeper); // Test clamping uint32_t max_value = SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles); uint64_t max_value_decoded = SpinLockTest::DecodeWaitCycles(max_value); uint64_t expected_max_value_decoded = kMaxCycles & ~kProfileTimestampMask; EXPECT_EQ(expected_max_value_decoded, max_value_decoded); const int64_t step = (1 << kProfileTimestampShift); uint32_t after_max_value = SpinLockTest::EncodeWaitCycles(start_time, start_time + kMaxCycles + step); uint64_t after_max_value_decoded = SpinLockTest::DecodeWaitCycles(after_max_value); EXPECT_EQ(expected_max_value_decoded, after_max_value_decoded); uint32_t before_max_value = SpinLockTest::EncodeWaitCycles( start_time, start_time + kMaxCycles - step); uint64_t before_max_value_decoded = SpinLockTest::DecodeWaitCycles(before_max_value); EXPECT_GT(expected_max_value_decoded, before_max_value_decoded); } TEST(SpinLockWithThreads, StackSpinLock) { SpinLock spinlock; ThreadedTest(&spinlock); } TEST(SpinLockWithThreads, StackCooperativeSpinLock) { SpinLock spinlock(base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); ThreadedTest(&spinlock); } TEST(SpinLockWithThreads, StackNonCooperativeSpinLock) { SpinLock spinlock(base_internal::SCHEDULE_KERNEL_ONLY); ThreadedTest(&spinlock); } TEST(SpinLockWithThreads, StaticCooperativeSpinLock) { ThreadedTest(&static_cooperative_spinlock); } TEST(SpinLockWithThreads, StaticNonCooperativeSpinLock) { ThreadedTest(&static_noncooperative_spinlock); } TEST(SpinLockWithThreads, DoesNotDeadlock) { struct Helper { static void NotifyThenLock(Notification* locked, SpinLock* spinlock, BlockingCounter* b) { locked->WaitForNotification(); // Wait for LockThenWait() to hold "s". b->DecrementCount(); SpinLockHolder l(spinlock); } static void LockThenWait(Notification* locked, SpinLock* spinlock, BlockingCounter* b) { SpinLockHolder l(spinlock); locked->Notify(); b->Wait(); } static void DeadlockTest(SpinLock* spinlock, int num_spinners) { Notification locked; BlockingCounter counter(num_spinners); std::vector threads; threads.push_back( std::thread(Helper::LockThenWait, &locked, spinlock, &counter)); for (int i = 0; i < num_spinners; ++i) { threads.push_back( std::thread(Helper::NotifyThenLock, &locked, spinlock, &counter)); } for (auto& thread : threads) { thread.join(); } } }; SpinLock stack_cooperative_spinlock( base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL); SpinLock stack_noncooperative_spinlock(base_internal::SCHEDULE_KERNEL_ONLY); Helper::DeadlockTest(&stack_cooperative_spinlock, base_internal::NumCPUs() * 2); Helper::DeadlockTest(&stack_noncooperative_spinlock, base_internal::NumCPUs() * 2); Helper::DeadlockTest(&static_cooperative_spinlock, base_internal::NumCPUs() * 2); Helper::DeadlockTest(&static_noncooperative_spinlock, base_internal::NumCPUs() * 2); } } // namespace } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/base/thread_annotations.h000066400000000000000000000274341430371345100212550ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: thread_annotations.h // ----------------------------------------------------------------------------- // // This header file contains macro definitions for thread safety annotations // that allow developers to document the locking policies of multi-threaded // code. The annotations can also help program analysis tools to identify // potential thread safety issues. // // These annotations are implemented using compiler attributes. Using the macros // defined here instead of raw attributes allow for portability and future // compatibility. // // When referring to mutexes in the arguments of the attributes, you should // use variable names or more complex expressions (e.g. my_object->mutex_) // that evaluate to a concrete mutex object whenever possible. If the mutex // you want to refer to is not in scope, you may use a member pointer // (e.g. &MyClass::mutex_) to refer to a mutex in some (unknown) object. #ifndef ABSL_BASE_THREAD_ANNOTATIONS_H_ #define ABSL_BASE_THREAD_ANNOTATIONS_H_ #include "absl/base/attributes.h" #include "absl/base/config.h" // TODO(mbonadei): Remove after the backward compatibility period. #include "absl/base/internal/thread_annotations.h" // IWYU pragma: export // ABSL_GUARDED_BY() // // Documents if a shared field or global variable needs to be protected by a // mutex. ABSL_GUARDED_BY() allows the user to specify a particular mutex that // should be held when accessing the annotated variable. // // Although this annotation (and ABSL_PT_GUARDED_BY, below) cannot be applied to // local variables, a local variable and its associated mutex can often be // combined into a small class or struct, thereby allowing the annotation. // // Example: // // class Foo { // Mutex mu_; // int p1_ ABSL_GUARDED_BY(mu_); // ... // }; #if ABSL_HAVE_ATTRIBUTE(guarded_by) #define ABSL_GUARDED_BY(x) __attribute__((guarded_by(x))) #else #define ABSL_GUARDED_BY(x) #endif // ABSL_PT_GUARDED_BY() // // Documents if the memory location pointed to by a pointer should be guarded // by a mutex when dereferencing the pointer. // // Example: // class Foo { // Mutex mu_; // int *p1_ ABSL_PT_GUARDED_BY(mu_); // ... // }; // // Note that a pointer variable to a shared memory location could itself be a // shared variable. // // Example: // // // `q_`, guarded by `mu1_`, points to a shared memory location that is // // guarded by `mu2_`: // int *q_ ABSL_GUARDED_BY(mu1_) ABSL_PT_GUARDED_BY(mu2_); #if ABSL_HAVE_ATTRIBUTE(pt_guarded_by) #define ABSL_PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x))) #else #define ABSL_PT_GUARDED_BY(x) #endif // ABSL_ACQUIRED_AFTER() / ABSL_ACQUIRED_BEFORE() // // Documents the acquisition order between locks that can be held // simultaneously by a thread. For any two locks that need to be annotated // to establish an acquisition order, only one of them needs the annotation. // (i.e. You don't have to annotate both locks with both ABSL_ACQUIRED_AFTER // and ABSL_ACQUIRED_BEFORE.) // // As with ABSL_GUARDED_BY, this is only applicable to mutexes that are shared // fields or global variables. // // Example: // // Mutex m1_; // Mutex m2_ ABSL_ACQUIRED_AFTER(m1_); #if ABSL_HAVE_ATTRIBUTE(acquired_after) #define ABSL_ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__))) #else #define ABSL_ACQUIRED_AFTER(...) #endif #if ABSL_HAVE_ATTRIBUTE(acquired_before) #define ABSL_ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__))) #else #define ABSL_ACQUIRED_BEFORE(...) #endif // ABSL_EXCLUSIVE_LOCKS_REQUIRED() / ABSL_SHARED_LOCKS_REQUIRED() // // Documents a function that expects a mutex to be held prior to entry. // The mutex is expected to be held both on entry to, and exit from, the // function. // // An exclusive lock allows read-write access to the guarded data member(s), and // only one thread can acquire a lock exclusively at any one time. A shared lock // allows read-only access, and any number of threads can acquire a shared lock // concurrently. // // Generally, non-const methods should be annotated with // ABSL_EXCLUSIVE_LOCKS_REQUIRED, while const methods should be annotated with // ABSL_SHARED_LOCKS_REQUIRED. // // Example: // // Mutex mu1, mu2; // int a ABSL_GUARDED_BY(mu1); // int b ABSL_GUARDED_BY(mu2); // // void foo() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu1, mu2) { ... } // void bar() const ABSL_SHARED_LOCKS_REQUIRED(mu1, mu2) { ... } #if ABSL_HAVE_ATTRIBUTE(exclusive_locks_required) #define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) \ __attribute__((exclusive_locks_required(__VA_ARGS__))) #else #define ABSL_EXCLUSIVE_LOCKS_REQUIRED(...) #endif #if ABSL_HAVE_ATTRIBUTE(shared_locks_required) #define ABSL_SHARED_LOCKS_REQUIRED(...) \ __attribute__((shared_locks_required(__VA_ARGS__))) #else #define ABSL_SHARED_LOCKS_REQUIRED(...) #endif // ABSL_LOCKS_EXCLUDED() // // Documents the locks that cannot be held by callers of this function, as they // might be acquired by this function (Abseil's `Mutex` locks are // non-reentrant). #if ABSL_HAVE_ATTRIBUTE(locks_excluded) #define ABSL_LOCKS_EXCLUDED(...) __attribute__((locks_excluded(__VA_ARGS__))) #else #define ABSL_LOCKS_EXCLUDED(...) #endif // ABSL_LOCK_RETURNED() // // Documents a function that returns a mutex without acquiring it. For example, // a public getter method that returns a pointer to a private mutex should // be annotated with ABSL_LOCK_RETURNED. #if ABSL_HAVE_ATTRIBUTE(lock_returned) #define ABSL_LOCK_RETURNED(x) __attribute__((lock_returned(x))) #else #define ABSL_LOCK_RETURNED(x) #endif // ABSL_LOCKABLE // // Documents if a class/type is a lockable type (such as the `Mutex` class). #if ABSL_HAVE_ATTRIBUTE(lockable) #define ABSL_LOCKABLE __attribute__((lockable)) #else #define ABSL_LOCKABLE #endif // ABSL_SCOPED_LOCKABLE // // Documents if a class does RAII locking (such as the `MutexLock` class). // The constructor should use `LOCK_FUNCTION()` to specify the mutex that is // acquired, and the destructor should use `UNLOCK_FUNCTION()` with no // arguments; the analysis will assume that the destructor unlocks whatever the // constructor locked. #if ABSL_HAVE_ATTRIBUTE(scoped_lockable) #define ABSL_SCOPED_LOCKABLE __attribute__((scoped_lockable)) #else #define ABSL_SCOPED_LOCKABLE #endif // ABSL_EXCLUSIVE_LOCK_FUNCTION() // // Documents functions that acquire a lock in the body of a function, and do // not release it. #if ABSL_HAVE_ATTRIBUTE(exclusive_lock_function) #define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) \ __attribute__((exclusive_lock_function(__VA_ARGS__))) #else #define ABSL_EXCLUSIVE_LOCK_FUNCTION(...) #endif // ABSL_SHARED_LOCK_FUNCTION() // // Documents functions that acquire a shared (reader) lock in the body of a // function, and do not release it. #if ABSL_HAVE_ATTRIBUTE(shared_lock_function) #define ABSL_SHARED_LOCK_FUNCTION(...) \ __attribute__((shared_lock_function(__VA_ARGS__))) #else #define ABSL_SHARED_LOCK_FUNCTION(...) #endif // ABSL_UNLOCK_FUNCTION() // // Documents functions that expect a lock to be held on entry to the function, // and release it in the body of the function. #if ABSL_HAVE_ATTRIBUTE(unlock_function) #define ABSL_UNLOCK_FUNCTION(...) __attribute__((unlock_function(__VA_ARGS__))) #else #define ABSL_UNLOCK_FUNCTION(...) #endif // ABSL_EXCLUSIVE_TRYLOCK_FUNCTION() / ABSL_SHARED_TRYLOCK_FUNCTION() // // Documents functions that try to acquire a lock, and return success or failure // (or a non-boolean value that can be interpreted as a boolean). // The first argument should be `true` for functions that return `true` on // success, or `false` for functions that return `false` on success. The second // argument specifies the mutex that is locked on success. If unspecified, this // mutex is assumed to be `this`. #if ABSL_HAVE_ATTRIBUTE(exclusive_trylock_function) #define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) \ __attribute__((exclusive_trylock_function(__VA_ARGS__))) #else #define ABSL_EXCLUSIVE_TRYLOCK_FUNCTION(...) #endif #if ABSL_HAVE_ATTRIBUTE(shared_trylock_function) #define ABSL_SHARED_TRYLOCK_FUNCTION(...) \ __attribute__((shared_trylock_function(__VA_ARGS__))) #else #define ABSL_SHARED_TRYLOCK_FUNCTION(...) #endif // ABSL_ASSERT_EXCLUSIVE_LOCK() / ABSL_ASSERT_SHARED_LOCK() // // Documents functions that dynamically check to see if a lock is held, and fail // if it is not held. #if ABSL_HAVE_ATTRIBUTE(assert_exclusive_lock) #define ABSL_ASSERT_EXCLUSIVE_LOCK(...) \ __attribute__((assert_exclusive_lock(__VA_ARGS__))) #else #define ABSL_ASSERT_EXCLUSIVE_LOCK(...) #endif #if ABSL_HAVE_ATTRIBUTE(assert_shared_lock) #define ABSL_ASSERT_SHARED_LOCK(...) \ __attribute__((assert_shared_lock(__VA_ARGS__))) #else #define ABSL_ASSERT_SHARED_LOCK(...) #endif // ABSL_NO_THREAD_SAFETY_ANALYSIS // // Turns off thread safety checking within the body of a particular function. // This annotation is used to mark functions that are known to be correct, but // the locking behavior is more complicated than the analyzer can handle. #if ABSL_HAVE_ATTRIBUTE(no_thread_safety_analysis) #define ABSL_NO_THREAD_SAFETY_ANALYSIS \ __attribute__((no_thread_safety_analysis)) #else #define ABSL_NO_THREAD_SAFETY_ANALYSIS #endif //------------------------------------------------------------------------------ // Tool-Supplied Annotations //------------------------------------------------------------------------------ // ABSL_TS_UNCHECKED should be placed around lock expressions that are not valid // C++ syntax, but which are present for documentation purposes. These // annotations will be ignored by the analysis. #define ABSL_TS_UNCHECKED(x) "" // ABSL_TS_FIXME is used to mark lock expressions that are not valid C++ syntax. // It is used by automated tools to mark and disable invalid expressions. // The annotation should either be fixed, or changed to ABSL_TS_UNCHECKED. #define ABSL_TS_FIXME(x) "" // Like ABSL_NO_THREAD_SAFETY_ANALYSIS, this turns off checking within the body // of a particular function. However, this attribute is used to mark functions // that are incorrect and need to be fixed. It is used by automated tools to // avoid breaking the build when the analysis is updated. // Code owners are expected to eventually fix the routine. #define ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME ABSL_NO_THREAD_SAFETY_ANALYSIS // Similar to ABSL_NO_THREAD_SAFETY_ANALYSIS_FIXME, this macro marks a // ABSL_GUARDED_BY annotation that needs to be fixed, because it is producing // thread safety warning. It disables the ABSL_GUARDED_BY. #define ABSL_GUARDED_BY_FIXME(x) // Disables warnings for a single read operation. This can be used to avoid // warnings when it is known that the read is not actually involved in a race, // but the compiler cannot confirm that. #define ABSL_TS_UNCHECKED_READ(x) absl::base_internal::ts_unchecked_read(x) namespace absl { ABSL_NAMESPACE_BEGIN namespace base_internal { // Takes a reference to a guarded data member, and returns an unguarded // reference. // Do not use this function directly, use ABSL_TS_UNCHECKED_READ instead. template inline const T& ts_unchecked_read(const T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { return v; } template inline T& ts_unchecked_read(T& v) ABSL_NO_THREAD_SAFETY_ANALYSIS { return v; } } // namespace base_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_BASE_THREAD_ANNOTATIONS_H_ abseil-20220623.1/absl/base/throw_delegate_test.cc000066400000000000000000000067421430371345100215620ustar00rootroot00000000000000// Copyright 2017 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/internal/throw_delegate.h" #include #include #include #include "absl/base/config.h" #include "gtest/gtest.h" namespace { using absl::base_internal::ThrowStdLogicError; using absl::base_internal::ThrowStdInvalidArgument; using absl::base_internal::ThrowStdDomainError; using absl::base_internal::ThrowStdLengthError; using absl::base_internal::ThrowStdOutOfRange; using absl::base_internal::ThrowStdRuntimeError; using absl::base_internal::ThrowStdRangeError; using absl::base_internal::ThrowStdOverflowError; using absl::base_internal::ThrowStdUnderflowError; using absl::base_internal::ThrowStdBadFunctionCall; using absl::base_internal::ThrowStdBadAlloc; constexpr const char* what_arg = "The quick brown fox jumps over the lazy dog"; template void ExpectThrowChar(void (*f)(const char*)) { #ifdef ABSL_HAVE_EXCEPTIONS try { f(what_arg); FAIL() << "Didn't throw"; } catch (const E& e) { EXPECT_STREQ(e.what(), what_arg); } #else EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg); #endif } template void ExpectThrowString(void (*f)(const std::string&)) { #ifdef ABSL_HAVE_EXCEPTIONS try { f(what_arg); FAIL() << "Didn't throw"; } catch (const E& e) { EXPECT_STREQ(e.what(), what_arg); } #else EXPECT_DEATH_IF_SUPPORTED(f(what_arg), what_arg); #endif } template void ExpectThrowNoWhat(void (*f)()) { #ifdef ABSL_HAVE_EXCEPTIONS try { f(); FAIL() << "Didn't throw"; } catch (const E& e) { } #else EXPECT_DEATH_IF_SUPPORTED(f(), ""); #endif } TEST(ThrowHelper, Test) { // Not using EXPECT_THROW because we want to check the .what() message too. ExpectThrowChar(ThrowStdLogicError); ExpectThrowChar(ThrowStdInvalidArgument); ExpectThrowChar(ThrowStdDomainError); ExpectThrowChar(ThrowStdLengthError); ExpectThrowChar(ThrowStdOutOfRange); ExpectThrowChar(ThrowStdRuntimeError); ExpectThrowChar(ThrowStdRangeError); ExpectThrowChar(ThrowStdOverflowError); ExpectThrowChar(ThrowStdUnderflowError); ExpectThrowString(ThrowStdLogicError); ExpectThrowString(ThrowStdInvalidArgument); ExpectThrowString(ThrowStdDomainError); ExpectThrowString(ThrowStdLengthError); ExpectThrowString(ThrowStdOutOfRange); ExpectThrowString(ThrowStdRuntimeError); ExpectThrowString(ThrowStdRangeError); ExpectThrowString(ThrowStdOverflowError); ExpectThrowString(ThrowStdUnderflowError); ExpectThrowNoWhat(ThrowStdBadFunctionCall); ExpectThrowNoWhat(ThrowStdBadAlloc); } } // namespace abseil-20220623.1/absl/cleanup/000077500000000000000000000000001430371345100157235ustar00rootroot00000000000000abseil-20220623.1/absl/cleanup/BUILD.bazel000066400000000000000000000031001430371345100175730ustar00rootroot00000000000000# Copyright 2021 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", "ABSL_DEFAULT_LINKOPTS", "ABSL_TEST_COPTS", ) package(default_visibility = ["//visibility:public"]) licenses(["notice"]) cc_library( name = "cleanup_internal", hdrs = ["internal/cleanup.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:base_internal", "//absl/base:core_headers", "//absl/utility", ], ) cc_library( name = "cleanup", hdrs = [ "cleanup.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":cleanup_internal", "//absl/base:config", "//absl/base:core_headers", ], ) cc_test( name = "cleanup_test", size = "small", srcs = [ "cleanup_test.cc", ], copts = ABSL_TEST_COPTS, deps = [ ":cleanup", "//absl/base:config", "//absl/utility", "@com_google_googletest//:gtest_main", ], ) abseil-20220623.1/absl/cleanup/CMakeLists.txt000066400000000000000000000022511430371345100204630ustar00rootroot00000000000000# Copyright 2021 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Internal-only target, do not depend on directly. absl_cc_library( NAME cleanup_internal HDRS "internal/cleanup.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::base_internal absl::core_headers absl::utility PUBLIC ) absl_cc_library( NAME cleanup HDRS "cleanup.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::cleanup_internal absl::config absl::core_headers PUBLIC ) absl_cc_test( NAME cleanup_test SRCS "cleanup_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::cleanup absl::config absl::utility GTest::gmock_main ) abseil-20220623.1/absl/cleanup/cleanup.h000066400000000000000000000110421430371345100175210ustar00rootroot00000000000000// Copyright 2021 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: cleanup.h // ----------------------------------------------------------------------------- // // `absl::Cleanup` implements the scope guard idiom, invoking the contained // callback's `operator()() &&` on scope exit. // // Example: // // ``` // absl::Status CopyGoodData(const char* source_path, const char* sink_path) { // FILE* source_file = fopen(source_path, "r"); // if (source_file == nullptr) { // return absl::NotFoundError("No source file"); // No cleanups execute // } // // // C++17 style cleanup using class template argument deduction // absl::Cleanup source_closer = [source_file] { fclose(source_file); }; // // FILE* sink_file = fopen(sink_path, "w"); // if (sink_file == nullptr) { // return absl::NotFoundError("No sink file"); // First cleanup executes // } // // // C++11 style cleanup using the factory function // auto sink_closer = absl::MakeCleanup([sink_file] { fclose(sink_file); }); // // Data data; // while (ReadData(source_file, &data)) { // if (!data.IsGood()) { // absl::Status result = absl::FailedPreconditionError("Read bad data"); // return result; // Both cleanups execute // } // SaveData(sink_file, &data); // } // // return absl::OkStatus(); // Both cleanups execute // } // ``` // // Methods: // // `std::move(cleanup).Cancel()` will prevent the callback from executing. // // `std::move(cleanup).Invoke()` will execute the callback early, before // destruction, and prevent the callback from executing in the destructor. // // Usage: // // `absl::Cleanup` is not an interface type. It is only intended to be used // within the body of a function. It is not a value type and instead models a // control flow construct. Check out `defer` in Golang for something similar. #ifndef ABSL_CLEANUP_CLEANUP_H_ #define ABSL_CLEANUP_CLEANUP_H_ #include #include "absl/base/config.h" #include "absl/base/macros.h" #include "absl/cleanup/internal/cleanup.h" namespace absl { ABSL_NAMESPACE_BEGIN template class ABSL_MUST_USE_RESULT Cleanup final { static_assert(cleanup_internal::WasDeduced(), "Explicit template parameters are not supported."); static_assert(cleanup_internal::ReturnsVoid(), "Callbacks that return values are not supported."); public: Cleanup(Callback callback) : storage_(std::move(callback)) {} // NOLINT Cleanup(Cleanup&& other) = default; void Cancel() && { ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); storage_.DestroyCallback(); } void Invoke() && { ABSL_HARDENING_ASSERT(storage_.IsCallbackEngaged()); storage_.InvokeCallback(); storage_.DestroyCallback(); } ~Cleanup() { if (storage_.IsCallbackEngaged()) { storage_.InvokeCallback(); storage_.DestroyCallback(); } } private: cleanup_internal::Storage storage_; }; // `absl::Cleanup c = /* callback */;` // // C++17 type deduction API for creating an instance of `absl::Cleanup` #if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) template Cleanup(Callback callback) -> Cleanup; #endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) // `auto c = absl::MakeCleanup(/* callback */);` // // C++11 type deduction API for creating an instance of `absl::Cleanup` template absl::Cleanup MakeCleanup(Callback callback) { static_assert(cleanup_internal::WasDeduced(), "Explicit template parameters are not supported."); static_assert(cleanup_internal::ReturnsVoid(), "Callbacks that return values are not supported."); return {std::move(callback)}; } ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CLEANUP_CLEANUP_H_ abseil-20220623.1/absl/cleanup/cleanup_test.cc000066400000000000000000000202631430371345100207230ustar00rootroot00000000000000// Copyright 2021 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/cleanup/cleanup.h" #include #include #include #include "gtest/gtest.h" #include "absl/base/config.h" #include "absl/utility/utility.h" namespace { using Tag = absl::cleanup_internal::Tag; template constexpr bool IsSame() { return (std::is_same::value); } struct IdentityFactory { template static Callback AsCallback(Callback callback) { return Callback(std::move(callback)); } }; // `FunctorClass` is a type used for testing `absl::Cleanup`. It is intended to // represent users that make their own move-only callback types outside of // `std::function` and lambda literals. class FunctorClass { using Callback = std::function; public: explicit FunctorClass(Callback callback) : callback_(std::move(callback)) {} FunctorClass(FunctorClass&& other) : callback_(absl::exchange(other.callback_, Callback())) {} FunctorClass(const FunctorClass&) = delete; FunctorClass& operator=(const FunctorClass&) = delete; FunctorClass& operator=(FunctorClass&&) = delete; void operator()() const& = delete; void operator()() && { ASSERT_TRUE(callback_); callback_(); callback_ = nullptr; } private: Callback callback_; }; struct FunctorClassFactory { template static FunctorClass AsCallback(Callback callback) { return FunctorClass(std::move(callback)); } }; struct StdFunctionFactory { template static std::function AsCallback(Callback callback) { return std::function(std::move(callback)); } }; using CleanupTestParams = ::testing::Types; template struct CleanupTest : public ::testing::Test {}; TYPED_TEST_SUITE(CleanupTest, CleanupTestParams); bool fn_ptr_called = false; void FnPtrFunction() { fn_ptr_called = true; } TYPED_TEST(CleanupTest, FactoryProducesCorrectType) { { auto callback = TypeParam::AsCallback([] {}); auto cleanup = absl::MakeCleanup(std::move(callback)); static_assert( IsSame, decltype(cleanup)>(), ""); } { auto cleanup = absl::MakeCleanup(&FnPtrFunction); static_assert(IsSame, decltype(cleanup)>(), ""); } { auto cleanup = absl::MakeCleanup(FnPtrFunction); static_assert(IsSame, decltype(cleanup)>(), ""); } } #if defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) TYPED_TEST(CleanupTest, CTADProducesCorrectType) { { auto callback = TypeParam::AsCallback([] {}); absl::Cleanup cleanup = std::move(callback); static_assert( IsSame, decltype(cleanup)>(), ""); } { absl::Cleanup cleanup = &FnPtrFunction; static_assert(IsSame, decltype(cleanup)>(), ""); } { absl::Cleanup cleanup = FnPtrFunction; static_assert(IsSame, decltype(cleanup)>(), ""); } } TYPED_TEST(CleanupTest, FactoryAndCTADProduceSameType) { { auto callback = IdentityFactory::AsCallback([] {}); auto factory_cleanup = absl::MakeCleanup(callback); absl::Cleanup deduction_cleanup = callback; static_assert( IsSame(), ""); } { auto factory_cleanup = absl::MakeCleanup(FunctorClassFactory::AsCallback([] {})); absl::Cleanup deduction_cleanup = FunctorClassFactory::AsCallback([] {}); static_assert( IsSame(), ""); } { auto factory_cleanup = absl::MakeCleanup(StdFunctionFactory::AsCallback([] {})); absl::Cleanup deduction_cleanup = StdFunctionFactory::AsCallback([] {}); static_assert( IsSame(), ""); } { auto factory_cleanup = absl::MakeCleanup(&FnPtrFunction); absl::Cleanup deduction_cleanup = &FnPtrFunction; static_assert( IsSame(), ""); } { auto factory_cleanup = absl::MakeCleanup(FnPtrFunction); absl::Cleanup deduction_cleanup = FnPtrFunction; static_assert( IsSame(), ""); } } #endif // defined(ABSL_HAVE_CLASS_TEMPLATE_ARGUMENT_DEDUCTION) TYPED_TEST(CleanupTest, BasicUsage) { bool called = false; { auto cleanup = absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); EXPECT_FALSE(called); // Constructor shouldn't invoke the callback } EXPECT_TRUE(called); // Destructor should invoke the callback } TYPED_TEST(CleanupTest, BasicUsageWithFunctionPointer) { fn_ptr_called = false; { auto cleanup = absl::MakeCleanup(TypeParam::AsCallback(&FnPtrFunction)); EXPECT_FALSE(fn_ptr_called); // Constructor shouldn't invoke the callback } EXPECT_TRUE(fn_ptr_called); // Destructor should invoke the callback } TYPED_TEST(CleanupTest, Cancel) { bool called = false; { auto cleanup = absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); EXPECT_FALSE(called); // Constructor shouldn't invoke the callback std::move(cleanup).Cancel(); EXPECT_FALSE(called); // Cancel shouldn't invoke the callback } EXPECT_FALSE(called); // Destructor shouldn't invoke the callback } TYPED_TEST(CleanupTest, Invoke) { bool called = false; { auto cleanup = absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); EXPECT_FALSE(called); // Constructor shouldn't invoke the callback std::move(cleanup).Invoke(); EXPECT_TRUE(called); // Invoke should invoke the callback called = false; // Reset tracker before destructor runs } EXPECT_FALSE(called); // Destructor shouldn't invoke the callback } TYPED_TEST(CleanupTest, Move) { bool called = false; { auto moved_from_cleanup = absl::MakeCleanup(TypeParam::AsCallback([&called] { called = true; })); EXPECT_FALSE(called); // Constructor shouldn't invoke the callback { auto moved_to_cleanup = std::move(moved_from_cleanup); EXPECT_FALSE(called); // Move shouldn't invoke the callback } EXPECT_TRUE(called); // Destructor should invoke the callback called = false; // Reset tracker before destructor runs } EXPECT_FALSE(called); // Destructor shouldn't invoke the callback } int DestructionCount = 0; struct DestructionCounter { void operator()() {} ~DestructionCounter() { ++DestructionCount; } }; TYPED_TEST(CleanupTest, DestructorDestroys) { { auto cleanup = absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter())); DestructionCount = 0; } EXPECT_EQ(DestructionCount, 1); // Engaged cleanup destroys } TYPED_TEST(CleanupTest, CancelDestroys) { { auto cleanup = absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter())); DestructionCount = 0; std::move(cleanup).Cancel(); EXPECT_EQ(DestructionCount, 1); // Cancel destroys } EXPECT_EQ(DestructionCount, 1); // Canceled cleanup does not double destroy } TYPED_TEST(CleanupTest, InvokeDestroys) { { auto cleanup = absl::MakeCleanup(TypeParam::AsCallback(DestructionCounter())); DestructionCount = 0; std::move(cleanup).Invoke(); EXPECT_EQ(DestructionCount, 1); // Invoke destroys } EXPECT_EQ(DestructionCount, 1); // Invoked cleanup does not double destroy } } // namespace abseil-20220623.1/absl/cleanup/internal/000077500000000000000000000000001430371345100175375ustar00rootroot00000000000000abseil-20220623.1/absl/cleanup/internal/cleanup.h000066400000000000000000000053131430371345100213410ustar00rootroot00000000000000// Copyright 2021 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CLEANUP_INTERNAL_CLEANUP_H_ #define ABSL_CLEANUP_INTERNAL_CLEANUP_H_ #include #include #include #include "absl/base/internal/invoke.h" #include "absl/base/macros.h" #include "absl/base/thread_annotations.h" #include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace cleanup_internal { struct Tag {}; template constexpr bool WasDeduced() { return (std::is_same::value) && (sizeof...(Args) == 0); } template constexpr bool ReturnsVoid() { return (std::is_same, void>::value); } template class Storage { public: Storage() = delete; explicit Storage(Callback callback) { // Placement-new into a character buffer is used for eager destruction when // the cleanup is invoked or cancelled. To ensure this optimizes well, the // behavior is implemented locally instead of using an absl::optional. ::new (GetCallbackBuffer()) Callback(std::move(callback)); is_callback_engaged_ = true; } Storage(Storage&& other) { ABSL_HARDENING_ASSERT(other.IsCallbackEngaged()); ::new (GetCallbackBuffer()) Callback(std::move(other.GetCallback())); is_callback_engaged_ = true; other.DestroyCallback(); } Storage(const Storage& other) = delete; Storage& operator=(Storage&& other) = delete; Storage& operator=(const Storage& other) = delete; void* GetCallbackBuffer() { return static_cast(+callback_buffer_); } Callback& GetCallback() { return *reinterpret_cast(GetCallbackBuffer()); } bool IsCallbackEngaged() const { return is_callback_engaged_; } void DestroyCallback() { is_callback_engaged_ = false; GetCallback().~Callback(); } void InvokeCallback() ABSL_NO_THREAD_SAFETY_ANALYSIS { std::move(GetCallback())(); } private: bool is_callback_engaged_; alignas(Callback) char callback_buffer_[sizeof(Callback)]; }; } // namespace cleanup_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CLEANUP_INTERNAL_CLEANUP_H_ abseil-20220623.1/absl/container/000077500000000000000000000000001430371345100162565ustar00rootroot00000000000000abseil-20220623.1/absl/container/BUILD.bazel000066400000000000000000000606771430371345100201540ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # load( "//absl:copts/configure_copts.bzl", "ABSL_DEFAULT_COPTS", "ABSL_DEFAULT_LINKOPTS", "ABSL_TEST_COPTS", ) package(default_visibility = ["//visibility:public"]) licenses(["notice"]) cc_library( name = "compressed_tuple", hdrs = ["internal/compressed_tuple.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/utility", ], ) cc_test( name = "compressed_tuple_test", srcs = ["internal/compressed_tuple_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":compressed_tuple", ":test_instance_tracker", "//absl/memory", "//absl/types:any", "//absl/types:optional", "//absl/utility", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "fixed_array", hdrs = ["fixed_array.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":compressed_tuple", "//absl/algorithm", "//absl/base:config", "//absl/base:core_headers", "//absl/base:dynamic_annotations", "//absl/base:throw_delegate", "//absl/memory", ], ) cc_test( name = "fixed_array_test", srcs = ["fixed_array_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":counting_allocator", ":fixed_array", "//absl/base:config", "//absl/base:exception_testing", "//absl/hash:hash_testing", "//absl/memory", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "fixed_array_exception_safety_test", srcs = ["fixed_array_exception_safety_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":fixed_array", "//absl/base:config", "//absl/base:exception_safety_testing", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "fixed_array_benchmark", srcs = ["fixed_array_benchmark.cc"], copts = ABSL_TEST_COPTS + ["$(STACK_FRAME_UNLIMITED)"], linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], deps = [ ":fixed_array", "@com_github_google_benchmark//:benchmark_main", ], ) cc_library( name = "inlined_vector_internal", hdrs = ["internal/inlined_vector.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":compressed_tuple", "//absl/base:core_headers", "//absl/memory", "//absl/meta:type_traits", "//absl/types:span", ], ) cc_library( name = "inlined_vector", hdrs = ["inlined_vector.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":inlined_vector_internal", "//absl/algorithm", "//absl/base:core_headers", "//absl/base:throw_delegate", "//absl/memory", ], ) cc_library( name = "counting_allocator", testonly = 1, hdrs = ["internal/counting_allocator.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = ["//visibility:private"], deps = ["//absl/base:config"], ) cc_test( name = "inlined_vector_test", srcs = ["inlined_vector_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":counting_allocator", ":inlined_vector", ":test_instance_tracker", "//absl/base:config", "//absl/base:core_headers", "//absl/base:exception_testing", "//absl/base:raw_logging_internal", "//absl/hash:hash_testing", "//absl/memory", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "inlined_vector_benchmark", srcs = ["inlined_vector_benchmark.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], deps = [ ":inlined_vector", "//absl/base:core_headers", "//absl/base:raw_logging_internal", "//absl/strings", "@com_github_google_benchmark//:benchmark_main", ], ) cc_test( name = "inlined_vector_exception_safety_test", srcs = ["inlined_vector_exception_safety_test.cc"], copts = ABSL_TEST_COPTS, deps = [ ":inlined_vector", "//absl/base:config", "//absl/base:exception_safety_testing", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "test_instance_tracker", testonly = 1, srcs = ["internal/test_instance_tracker.cc"], hdrs = ["internal/test_instance_tracker.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = [ "//absl:__subpackages__", ], deps = ["//absl/types:compare"], ) cc_test( name = "test_instance_tracker_test", srcs = ["internal/test_instance_tracker_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":test_instance_tracker", "@com_google_googletest//:gtest_main", ], ) NOTEST_TAGS_MOBILE = [ "no_test_android_arm", "no_test_android_arm64", "no_test_android_x86", "no_test_ios_x86_64", ] cc_library( name = "flat_hash_map", hdrs = ["flat_hash_map.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":container_memory", ":hash_function_defaults", ":raw_hash_map", "//absl/algorithm:container", "//absl/base:core_headers", "//absl/memory", ], ) cc_test( name = "flat_hash_map_test", srcs = ["flat_hash_map_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["no_test_loonix"], deps = [ ":flat_hash_map", ":hash_generator_testing", ":unordered_map_constructor_test", ":unordered_map_lookup_test", ":unordered_map_members_test", ":unordered_map_modifiers_test", "//absl/base:raw_logging_internal", "//absl/types:any", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "flat_hash_set", hdrs = ["flat_hash_set.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":container_memory", ":hash_function_defaults", ":raw_hash_set", "//absl/algorithm:container", "//absl/base:core_headers", "//absl/memory", ], ) cc_test( name = "flat_hash_set_test", srcs = ["flat_hash_set_test.cc"], copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["no_test_loonix"], deps = [ ":flat_hash_set", ":hash_generator_testing", ":unordered_set_constructor_test", ":unordered_set_lookup_test", ":unordered_set_members_test", ":unordered_set_modifiers_test", "//absl/base:raw_logging_internal", "//absl/memory", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "node_hash_map", hdrs = ["node_hash_map.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":container_memory", ":hash_function_defaults", ":node_slot_policy", ":raw_hash_map", "//absl/algorithm:container", "//absl/base:core_headers", "//absl/memory", ], ) cc_test( name = "node_hash_map_test", srcs = ["node_hash_map_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["no_test_loonix"], deps = [ ":hash_generator_testing", ":node_hash_map", ":tracked", ":unordered_map_constructor_test", ":unordered_map_lookup_test", ":unordered_map_members_test", ":unordered_map_modifiers_test", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "node_hash_set", hdrs = ["node_hash_set.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_function_defaults", ":node_slot_policy", ":raw_hash_set", "//absl/algorithm:container", "//absl/base:core_headers", "//absl/memory", ], ) cc_test( name = "node_hash_set_test", srcs = ["node_hash_set_test.cc"], copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"], linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["no_test_loonix"], deps = [ ":node_hash_set", ":unordered_set_constructor_test", ":unordered_set_lookup_test", ":unordered_set_members_test", ":unordered_set_modifiers_test", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "container_memory", hdrs = ["internal/container_memory.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:config", "//absl/memory", "//absl/meta:type_traits", "//absl/utility", ], ) cc_test( name = "container_memory_test", srcs = ["internal/container_memory_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["no_test_loonix"], deps = [ ":container_memory", ":test_instance_tracker", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "hash_function_defaults", hdrs = ["internal/hash_function_defaults.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:config", "//absl/hash", "//absl/strings", "//absl/strings:cord", ], ) cc_test( name = "hash_function_defaults_test", srcs = ["internal/hash_function_defaults_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"], deps = [ ":hash_function_defaults", "//absl/hash", "//absl/random", "//absl/strings", "//absl/strings:cord", "//absl/strings:cord_test_helpers", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "hash_generator_testing", testonly = 1, srcs = ["internal/hash_generator_testing.cc"], hdrs = ["internal/hash_generator_testing.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_policy_testing", "//absl/memory", "//absl/meta:type_traits", "//absl/strings", ], ) cc_library( name = "hash_policy_testing", testonly = 1, hdrs = ["internal/hash_policy_testing.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/hash", "//absl/strings", ], ) cc_test( name = "hash_policy_testing_test", srcs = ["internal/hash_policy_testing_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_policy_testing", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "hash_policy_traits", hdrs = ["internal/hash_policy_traits.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = ["//absl/meta:type_traits"], ) cc_test( name = "hash_policy_traits_test", srcs = ["internal/hash_policy_traits_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_policy_traits", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "hashtable_debug", hdrs = ["internal/hashtable_debug.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hashtable_debug_hooks", ], ) cc_library( name = "hashtable_debug_hooks", hdrs = ["internal/hashtable_debug_hooks.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:config", ], ) cc_library( name = "hashtablez_sampler", srcs = [ "internal/hashtablez_sampler.cc", "internal/hashtablez_sampler_force_weak_definition.cc", ], hdrs = ["internal/hashtablez_sampler.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base", "//absl/base:config", "//absl/base:core_headers", "//absl/debugging:stacktrace", "//absl/memory", "//absl/profiling:exponential_biased", "//absl/profiling:sample_recorder", "//absl/synchronization", "//absl/utility", ], ) cc_test( name = "hashtablez_sampler_test", srcs = ["internal/hashtablez_sampler_test.cc"], linkopts = ABSL_DEFAULT_LINKOPTS, tags = [ "no_test_wasm", ], deps = [ ":hashtablez_sampler", "//absl/base:config", "//absl/base:core_headers", "//absl/profiling:sample_recorder", "//absl/synchronization", "//absl/synchronization:thread_pool", "//absl/time", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "node_slot_policy", hdrs = ["internal/node_slot_policy.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = ["//absl/base:config"], ) cc_test( name = "node_slot_policy_test", srcs = ["internal/node_slot_policy_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_policy_traits", ":node_slot_policy", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "raw_hash_map", hdrs = ["internal/raw_hash_map.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":container_memory", ":raw_hash_set", "//absl/base:throw_delegate", ], ) cc_library( name = "common", hdrs = ["internal/common.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/meta:type_traits", "//absl/types:optional", ], ) cc_library( name = "raw_hash_set", srcs = ["internal/raw_hash_set.cc"], hdrs = ["internal/raw_hash_set.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":common", ":compressed_tuple", ":container_memory", ":hash_policy_traits", ":hashtable_debug_hooks", ":hashtablez_sampler", "//absl/base:config", "//absl/base:core_headers", "//absl/base:endian", "//absl/base:prefetch", "//absl/memory", "//absl/meta:type_traits", "//absl/numeric:bits", "//absl/utility", ], ) cc_test( name = "raw_hash_set_test", srcs = ["internal/raw_hash_set_test.cc"], copts = ABSL_TEST_COPTS, linkstatic = 1, tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"], deps = [ ":container_memory", ":hash_function_defaults", ":hash_policy_testing", ":hashtable_debug", ":raw_hash_set", "//absl/base", "//absl/base:config", "//absl/base:core_headers", "//absl/base:prefetch", "//absl/base:raw_logging_internal", "//absl/strings", "@com_google_googletest//:gtest_main", ], ) cc_binary( name = "raw_hash_set_benchmark", testonly = 1, srcs = ["internal/raw_hash_set_benchmark.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], visibility = ["//visibility:private"], deps = [ ":hash_function_defaults", ":raw_hash_set", "//absl/base:raw_logging_internal", "//absl/strings:str_format", "@com_github_google_benchmark//:benchmark_main", ], ) cc_binary( name = "raw_hash_set_probe_benchmark", testonly = 1, srcs = ["internal/raw_hash_set_probe_benchmark.cc"], copts = ABSL_TEST_COPTS, linkopts = select({ "//conditions:default": [], }) + ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], visibility = ["//visibility:private"], deps = [ ":flat_hash_map", ":hash_function_defaults", ":hashtable_debug", ":raw_hash_set", "//absl/random", "//absl/random:distributions", "//absl/strings", "//absl/strings:str_format", ], ) cc_test( name = "raw_hash_set_allocator_test", size = "small", srcs = ["internal/raw_hash_set_allocator_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":raw_hash_set", ":tracked", "//absl/base:core_headers", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "layout", hdrs = ["internal/layout.h"], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:config", "//absl/base:core_headers", "//absl/meta:type_traits", "//absl/strings", "//absl/types:span", "//absl/utility", ], ) cc_test( name = "layout_test", size = "small", srcs = ["internal/layout_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = NOTEST_TAGS_MOBILE + ["no_test_loonix"], visibility = ["//visibility:private"], deps = [ ":layout", "//absl/base:config", "//absl/base:core_headers", "//absl/base:raw_logging_internal", "//absl/types:span", "@com_google_googletest//:gtest_main", ], ) cc_binary( name = "layout_benchmark", testonly = 1, srcs = ["internal/layout_benchmark.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], visibility = ["//visibility:private"], deps = [ ":layout", "//absl/base:core_headers", "//absl/base:raw_logging_internal", "@com_github_google_benchmark//:benchmark_main", ], ) cc_library( name = "tracked", testonly = 1, hdrs = ["internal/tracked.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/base:config", ], ) cc_library( name = "unordered_map_constructor_test", testonly = 1, hdrs = ["internal/unordered_map_constructor_test.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_generator_testing", ":hash_policy_testing", "@com_google_googletest//:gtest", ], ) cc_library( name = "unordered_map_lookup_test", testonly = 1, hdrs = ["internal/unordered_map_lookup_test.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_generator_testing", ":hash_policy_testing", "@com_google_googletest//:gtest", ], ) cc_library( name = "unordered_map_modifiers_test", testonly = 1, hdrs = ["internal/unordered_map_modifiers_test.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_generator_testing", ":hash_policy_testing", "@com_google_googletest//:gtest", ], ) cc_library( name = "unordered_set_constructor_test", testonly = 1, hdrs = ["internal/unordered_set_constructor_test.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_generator_testing", ":hash_policy_testing", "//absl/meta:type_traits", "@com_google_googletest//:gtest", ], ) cc_library( name = "unordered_set_members_test", testonly = 1, hdrs = ["internal/unordered_set_members_test.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/meta:type_traits", "@com_google_googletest//:gtest", ], ) cc_library( name = "unordered_map_members_test", testonly = 1, hdrs = ["internal/unordered_map_members_test.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ "//absl/meta:type_traits", "@com_google_googletest//:gtest", ], ) cc_library( name = "unordered_set_lookup_test", testonly = 1, hdrs = ["internal/unordered_set_lookup_test.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_generator_testing", ":hash_policy_testing", "@com_google_googletest//:gtest", ], ) cc_library( name = "unordered_set_modifiers_test", testonly = 1, hdrs = ["internal/unordered_set_modifiers_test.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, deps = [ ":hash_generator_testing", ":hash_policy_testing", "@com_google_googletest//:gtest", ], ) cc_test( name = "unordered_set_test", srcs = ["internal/unordered_set_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["no_test_loonix"], deps = [ ":unordered_set_constructor_test", ":unordered_set_lookup_test", ":unordered_set_members_test", ":unordered_set_modifiers_test", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "unordered_map_test", srcs = ["internal/unordered_map_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["no_test_loonix"], deps = [ ":unordered_map_constructor_test", ":unordered_map_lookup_test", ":unordered_map_members_test", ":unordered_map_modifiers_test", "@com_google_googletest//:gtest_main", ], ) cc_test( name = "sample_element_size_test", srcs = ["sample_element_size_test.cc"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["no_test_loonix"], visibility = ["//visibility:private"], deps = [ ":flat_hash_map", ":flat_hash_set", ":node_hash_map", ":node_hash_set", "@com_google_googletest//:gtest_main", ], ) cc_library( name = "btree", srcs = [ "internal/btree.h", "internal/btree_container.h", ], hdrs = [ "btree_map.h", "btree_set.h", ], copts = ABSL_DEFAULT_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = ["//visibility:public"], deps = [ ":common", ":compressed_tuple", ":container_memory", ":layout", "//absl/base:core_headers", "//absl/base:raw_logging_internal", "//absl/base:throw_delegate", "//absl/memory", "//absl/meta:type_traits", "//absl/strings", "//absl/strings:cord", "//absl/types:compare", "//absl/utility", ], ) cc_library( name = "btree_test_common", testonly = 1, hdrs = ["btree_test.h"], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, visibility = ["//visibility:private"], deps = [ ":btree", ":flat_hash_set", "//absl/strings", "//absl/strings:cord", "//absl/time", ], ) cc_test( name = "btree_test", size = "large", srcs = [ "btree_test.cc", ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, shard_count = 10, tags = [ "no_test_ios", "no_test_wasm", ], visibility = ["//visibility:private"], deps = [ ":btree", ":btree_test_common", ":counting_allocator", ":test_instance_tracker", "//absl/base:core_headers", "//absl/base:raw_logging_internal", "//absl/flags:flag", "//absl/hash:hash_testing", "//absl/memory", "//absl/meta:type_traits", "//absl/strings", "//absl/types:compare", "@com_google_googletest//:gtest_main", ], ) cc_binary( name = "btree_benchmark", testonly = 1, srcs = [ "btree_benchmark.cc", ], copts = ABSL_TEST_COPTS, linkopts = ABSL_DEFAULT_LINKOPTS, tags = ["benchmark"], visibility = ["//visibility:private"], deps = [ ":btree", ":btree_test_common", ":flat_hash_map", ":flat_hash_set", ":hashtable_debug", "//absl/base:raw_logging_internal", "//absl/flags:flag", "//absl/hash", "//absl/memory", "//absl/strings:cord", "//absl/strings:str_format", "//absl/time", "@com_github_google_benchmark//:benchmark_main", ], ) abseil-20220623.1/absl/container/CMakeLists.txt000066400000000000000000000406571430371345100210320ustar00rootroot00000000000000# # Copyright 2017 The Abseil Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # absl_cc_library( NAME btree HDRS "btree_map.h" "btree_set.h" "internal/btree.h" "internal/btree_container.h" COPTS ${ABSL_DEFAULT_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS absl::container_common absl::compare absl::compressed_tuple absl::container_memory absl::cord absl::core_headers absl::layout absl::memory absl::raw_logging_internal absl::strings absl::throw_delegate absl::type_traits absl::utility ) # Internal-only target, do not depend on directly. absl_cc_library( NAME btree_test_common hdrs "btree_test.h" COPTS ${ABSL_TEST_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS absl::btree absl::cord absl::flat_hash_set absl::strings absl::time TESTONLY ) absl_cc_test( NAME btree_test SRCS "btree_test.cc" COPTS ${ABSL_TEST_COPTS} LINKOPTS ${ABSL_DEFAULT_LINKOPTS} DEPS absl::btree absl::btree_test_common absl::compare absl::core_headers absl::counting_allocator absl::flags absl::hash_testing absl::raw_logging_internal absl::strings absl::test_instance_tracker absl::type_traits GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME compressed_tuple HDRS "internal/compressed_tuple.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::utility PUBLIC ) absl_cc_test( NAME compressed_tuple_test SRCS "internal/compressed_tuple_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::any absl::compressed_tuple absl::memory absl::optional absl::test_instance_tracker absl::utility GTest::gmock_main ) absl_cc_library( NAME fixed_array HDRS "fixed_array.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::compressed_tuple absl::algorithm absl::config absl::core_headers absl::dynamic_annotations absl::throw_delegate absl::memory PUBLIC ) absl_cc_test( NAME fixed_array_test SRCS "fixed_array_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::fixed_array absl::counting_allocator absl::config absl::exception_testing absl::hash_testing absl::memory GTest::gmock_main ) absl_cc_test( NAME fixed_array_exception_safety_test SRCS "fixed_array_exception_safety_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::fixed_array absl::config absl::exception_safety_testing GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME inlined_vector_internal HDRS "internal/inlined_vector.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::compressed_tuple absl::core_headers absl::memory absl::span absl::type_traits PUBLIC ) absl_cc_library( NAME inlined_vector HDRS "inlined_vector.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::algorithm absl::core_headers absl::inlined_vector_internal absl::throw_delegate absl::memory PUBLIC ) # Internal-only target, do not depend on directly. absl_cc_library( NAME counting_allocator HDRS "internal/counting_allocator.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config ) absl_cc_test( NAME inlined_vector_test SRCS "inlined_vector_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::counting_allocator absl::inlined_vector absl::test_instance_tracker absl::config absl::core_headers absl::exception_testing absl::hash_testing absl::memory absl::raw_logging_internal absl::strings GTest::gmock_main ) absl_cc_test( NAME inlined_vector_exception_safety_test SRCS "inlined_vector_exception_safety_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::inlined_vector absl::config absl::exception_safety_testing GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME test_instance_tracker HDRS "internal/test_instance_tracker.h" SRCS "internal/test_instance_tracker.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::compare TESTONLY ) absl_cc_test( NAME test_instance_tracker_test SRCS "internal/test_instance_tracker_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::test_instance_tracker GTest::gmock_main ) absl_cc_library( NAME flat_hash_map HDRS "flat_hash_map.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory absl::core_headers absl::hash_function_defaults absl::raw_hash_map absl::algorithm_container absl::memory PUBLIC ) absl_cc_test( NAME flat_hash_map_test SRCS "flat_hash_map_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::flat_hash_map absl::hash_generator_testing absl::unordered_map_constructor_test absl::unordered_map_lookup_test absl::unordered_map_members_test absl::unordered_map_modifiers_test absl::any absl::raw_logging_internal GTest::gmock_main ) absl_cc_library( NAME flat_hash_set HDRS "flat_hash_set.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory absl::hash_function_defaults absl::raw_hash_set absl::algorithm_container absl::core_headers absl::memory PUBLIC ) absl_cc_test( NAME flat_hash_set_test SRCS "flat_hash_set_test.cc" COPTS ${ABSL_TEST_COPTS} "-DUNORDERED_SET_CXX17" DEPS absl::flat_hash_set absl::hash_generator_testing absl::unordered_set_constructor_test absl::unordered_set_lookup_test absl::unordered_set_members_test absl::unordered_set_modifiers_test absl::memory absl::raw_logging_internal absl::strings GTest::gmock_main ) absl_cc_library( NAME node_hash_map HDRS "node_hash_map.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory absl::core_headers absl::hash_function_defaults absl::node_slot_policy absl::raw_hash_map absl::algorithm_container absl::memory PUBLIC ) absl_cc_test( NAME node_hash_map_test SRCS "node_hash_map_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_generator_testing absl::node_hash_map absl::tracked absl::unordered_map_constructor_test absl::unordered_map_lookup_test absl::unordered_map_members_test absl::unordered_map_modifiers_test GTest::gmock_main ) absl_cc_library( NAME node_hash_set HDRS "node_hash_set.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::core_headers absl::hash_function_defaults absl::node_slot_policy absl::raw_hash_set absl::algorithm_container absl::memory PUBLIC ) absl_cc_test( NAME node_hash_set_test SRCS "node_hash_set_test.cc" COPTS ${ABSL_TEST_COPTS} "-DUNORDERED_SET_CXX17" DEPS absl::hash_generator_testing absl::node_hash_set absl::unordered_set_constructor_test absl::unordered_set_lookup_test absl::unordered_set_members_test absl::unordered_set_modifiers_test GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME container_memory HDRS "internal/container_memory.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config absl::memory absl::type_traits absl::utility PUBLIC ) absl_cc_test( NAME container_memory_test SRCS "internal/container_memory_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::container_memory absl::strings absl::test_instance_tracker GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME hash_function_defaults HDRS "internal/hash_function_defaults.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config absl::cord absl::hash absl::strings PUBLIC ) absl_cc_test( NAME hash_function_defaults_test SRCS "internal/hash_function_defaults_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::cord absl::cord_test_helpers absl::hash_function_defaults absl::hash absl::random_random absl::strings GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME hash_generator_testing HDRS "internal/hash_generator_testing.h" SRCS "internal/hash_generator_testing.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_policy_testing absl::memory absl::meta absl::strings TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME hash_policy_testing HDRS "internal/hash_policy_testing.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash absl::strings TESTONLY ) absl_cc_test( NAME hash_policy_testing_test SRCS "internal/hash_policy_testing_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_policy_testing GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME hash_policy_traits HDRS "internal/hash_policy_traits.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::meta PUBLIC ) absl_cc_test( NAME hash_policy_traits_test SRCS "internal/hash_policy_traits_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_policy_traits GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME hashtablez_sampler HDRS "internal/hashtablez_sampler.h" SRCS "internal/hashtablez_sampler.cc" "internal/hashtablez_sampler_force_weak_definition.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::base absl::config absl::exponential_biased absl::sample_recorder absl::synchronization ) absl_cc_test( NAME hashtablez_sampler_test SRCS "internal/hashtablez_sampler_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::config absl::hashtablez_sampler GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME hashtable_debug HDRS "internal/hashtable_debug.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::hashtable_debug_hooks ) # Internal-only target, do not depend on directly. absl_cc_library( NAME hashtable_debug_hooks HDRS "internal/hashtable_debug_hooks.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config PUBLIC ) # Internal-only target, do not depend on directly. absl_cc_library( NAME node_slot_policy HDRS "internal/node_slot_policy.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config PUBLIC ) absl_cc_test( NAME node_slot_policy_test SRCS "internal/node_slot_policy_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_policy_traits absl::node_slot_policy GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME raw_hash_map HDRS "internal/raw_hash_map.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::container_memory absl::raw_hash_set absl::throw_delegate PUBLIC ) # Internal-only target, do not depend on directly. absl_cc_library( NAME container_common HDRS "internal/common.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::type_traits ) # Internal-only target, do not depend on directly. absl_cc_library( NAME raw_hash_set HDRS "internal/raw_hash_set.h" SRCS "internal/raw_hash_set.cc" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::bits absl::compressed_tuple absl::config absl::container_common absl::container_memory absl::core_headers absl::endian absl::hash_policy_traits absl::hashtable_debug_hooks absl::memory absl::meta absl::optional absl::prefetch absl::utility absl::hashtablez_sampler PUBLIC ) absl_cc_test( NAME raw_hash_set_test SRCS "internal/raw_hash_set_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::container_memory absl::hash_function_defaults absl::hash_policy_testing absl::hashtable_debug absl::raw_hash_set absl::base absl::config absl::core_headers absl::prefetch absl::raw_logging_internal absl::strings GTest::gmock_main ) absl_cc_test( NAME raw_hash_set_allocator_test SRCS "internal/raw_hash_set_allocator_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::raw_hash_set absl::tracked absl::core_headers GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME layout HDRS "internal/layout.h" COPTS ${ABSL_DEFAULT_COPTS} DEPS absl::config absl::core_headers absl::meta absl::strings absl::span absl::utility PUBLIC ) absl_cc_test( NAME layout_test SRCS "internal/layout_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::layout absl::config absl::core_headers absl::raw_logging_internal absl::span GTest::gmock_main ) # Internal-only target, do not depend on directly. absl_cc_library( NAME tracked HDRS "internal/tracked.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::config TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_constructor_test HDRS "internal/unordered_map_constructor_test.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_generator_testing absl::hash_policy_testing GTest::gmock TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_lookup_test HDRS "internal/unordered_map_lookup_test.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_generator_testing absl::hash_policy_testing GTest::gmock TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_members_test HDRS "internal/unordered_map_members_test.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::type_traits GTest::gmock TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_map_modifiers_test HDRS "internal/unordered_map_modifiers_test.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_generator_testing absl::hash_policy_testing GTest::gmock TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_constructor_test HDRS "internal/unordered_set_constructor_test.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_generator_testing absl::hash_policy_testing GTest::gmock TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_lookup_test HDRS "internal/unordered_set_lookup_test.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_generator_testing absl::hash_policy_testing GTest::gmock TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_members_test HDRS "internal/unordered_set_members_test.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::type_traits GTest::gmock TESTONLY ) # Internal-only target, do not depend on directly. absl_cc_library( NAME unordered_set_modifiers_test HDRS "internal/unordered_set_modifiers_test.h" COPTS ${ABSL_TEST_COPTS} DEPS absl::hash_generator_testing absl::hash_policy_testing GTest::gmock TESTONLY ) absl_cc_test( NAME unordered_set_test SRCS "internal/unordered_set_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::unordered_set_constructor_test absl::unordered_set_lookup_test absl::unordered_set_members_test absl::unordered_set_modifiers_test GTest::gmock_main ) absl_cc_test( NAME unordered_map_test SRCS "internal/unordered_map_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::unordered_map_constructor_test absl::unordered_map_lookup_test absl::unordered_map_members_test absl::unordered_map_modifiers_test GTest::gmock_main ) absl_cc_test( NAME sample_element_size_test SRCS "sample_element_size_test.cc" COPTS ${ABSL_TEST_COPTS} DEPS absl::flat_hash_map absl::flat_hash_set absl::node_hash_map absl::node_hash_set GTest::gmock_main ) abseil-20220623.1/absl/container/btree_benchmark.cc000066400000000000000000000653621430371345100217140ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include #include #include #include #include #include #include #include #include #include "benchmark/benchmark.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" #include "absl/container/btree_test.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/internal/hashtable_debug.h" #include "absl/flags/flag.h" #include "absl/hash/hash.h" #include "absl/memory/memory.h" #include "absl/strings/cord.h" #include "absl/strings/str_format.h" #include "absl/time/time.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { constexpr size_t kBenchmarkValues = 1 << 20; // How many times we add and remove sub-batches in one batch of *AddRem // benchmarks. constexpr size_t kAddRemBatchSize = 1 << 2; // Generates n values in the range [0, 4 * n]. template std::vector GenerateValues(int n) { constexpr int kSeed = 23; return GenerateValuesWithSeed(n, 4 * n, kSeed); } // Benchmark insertion of values into a container. template void BM_InsertImpl(benchmark::State& state, bool sorted) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); if (sorted) { std::sort(values.begin(), values.end()); } T container(values.begin(), values.end()); // Remove and re-insert 10% of the keys per batch. const int batch_size = (kBenchmarkValues + 9) / 10; while (state.KeepRunningBatch(batch_size)) { state.PauseTiming(); const auto i = static_cast(state.iterations()); for (int j = i; j < i + batch_size; j++) { int x = j % kBenchmarkValues; container.erase(key_of_value(values[x])); } state.ResumeTiming(); for (int j = i; j < i + batch_size; j++) { int x = j % kBenchmarkValues; container.insert(values[x]); } } } template void BM_Insert(benchmark::State& state) { BM_InsertImpl(state, false); } template void BM_InsertSorted(benchmark::State& state) { BM_InsertImpl(state, true); } // Benchmark inserting the first few elements in a container. In b-tree, this is // when the root node grows. template void BM_InsertSmall(benchmark::State& state) { using V = typename remove_pair_const::type; const int kSize = 8; std::vector values = GenerateValues(kSize); T container; while (state.KeepRunningBatch(kSize)) { for (int i = 0; i < kSize; ++i) { benchmark::DoNotOptimize(container.insert(values[i])); } state.PauseTiming(); // Do not measure the time it takes to clear the container. container.clear(); state.ResumeTiming(); } } template void BM_LookupImpl(benchmark::State& state, bool sorted) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); if (sorted) { std::sort(values.begin(), values.end()); } T container(values.begin(), values.end()); while (state.KeepRunning()) { int idx = state.iterations() % kBenchmarkValues; benchmark::DoNotOptimize(container.find(key_of_value(values[idx]))); } } // Benchmark lookup of values in a container. template void BM_Lookup(benchmark::State& state) { BM_LookupImpl(state, false); } // Benchmark lookup of values in a full container, meaning that values // are inserted in-order to take advantage of biased insertion, which // yields a full tree. template void BM_FullLookup(benchmark::State& state) { BM_LookupImpl(state, true); } // Benchmark erasing values from a container. template void BM_Erase(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); T container(values.begin(), values.end()); // Remove and re-insert 10% of the keys per batch. const int batch_size = (kBenchmarkValues + 9) / 10; while (state.KeepRunningBatch(batch_size)) { const int i = state.iterations(); for (int j = i; j < i + batch_size; j++) { int x = j % kBenchmarkValues; container.erase(key_of_value(values[x])); } state.PauseTiming(); for (int j = i; j < i + batch_size; j++) { int x = j % kBenchmarkValues; container.insert(values[x]); } state.ResumeTiming(); } } // Benchmark erasing multiple values from a container. template void BM_EraseRange(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; std::vector values = GenerateValues(kBenchmarkValues); T container(values.begin(), values.end()); // Remove and re-insert 10% of the keys per batch. const int batch_size = (kBenchmarkValues + 9) / 10; while (state.KeepRunningBatch(batch_size)) { const int i = state.iterations(); const int start_index = i % kBenchmarkValues; state.PauseTiming(); { std::vector removed; removed.reserve(batch_size); auto itr = container.find(key_of_value(values[start_index])); auto start = itr; for (int j = 0; j < batch_size; j++) { if (itr == container.end()) { state.ResumeTiming(); container.erase(start, itr); state.PauseTiming(); itr = container.begin(); start = itr; } removed.push_back(*itr++); } state.ResumeTiming(); container.erase(start, itr); state.PauseTiming(); container.insert(removed.begin(), removed.end()); } state.ResumeTiming(); } } // Predicate that erases every other element. We can't use a lambda because // C++11 doesn't support generic lambdas. // TODO(b/207389011): consider adding benchmarks that remove different fractions // of keys (e.g. 10%, 90%). struct EraseIfPred { uint64_t i = 0; template bool operator()(const T&) { return ++i % 2; } }; // Benchmark erasing multiple values from a container with a predicate. template void BM_EraseIf(benchmark::State& state) { using V = typename remove_pair_const::type; std::vector values = GenerateValues(kBenchmarkValues); // Removes half of the keys per batch. const int batch_size = (kBenchmarkValues + 1) / 2; EraseIfPred pred; while (state.KeepRunningBatch(batch_size)) { state.PauseTiming(); { T container(values.begin(), values.end()); state.ResumeTiming(); erase_if(container, pred); benchmark::DoNotOptimize(container); state.PauseTiming(); } state.ResumeTiming(); } } // Benchmark steady-state insert (into first half of range) and remove (from // second half of range), treating the container approximately like a queue with // log-time access for all elements. This benchmark does not test the case where // insertion and removal happen in the same region of the tree. This benchmark // counts two value constructors. template void BM_QueueAddRem(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance"); T container; const size_t half = kBenchmarkValues / 2; std::vector remove_keys(half); std::vector add_keys(half); // We want to do the exact same work repeatedly, and the benchmark can end // after a different number of iterations depending on the speed of the // individual run so we use a large batch size here and ensure that we do // deterministic work every batch. while (state.KeepRunningBatch(half * kAddRemBatchSize)) { state.PauseTiming(); container.clear(); for (size_t i = 0; i < half; ++i) { remove_keys[i] = i; add_keys[i] = i; } constexpr int kSeed = 5; std::mt19937_64 rand(kSeed); std::shuffle(remove_keys.begin(), remove_keys.end(), rand); std::shuffle(add_keys.begin(), add_keys.end(), rand); // Note needs lazy generation of values. Generator g(kBenchmarkValues * kAddRemBatchSize); for (size_t i = 0; i < half; ++i) { container.insert(g(add_keys[i])); container.insert(g(half + remove_keys[i])); } // There are three parts each of size "half": // 1 is being deleted from [offset - half, offset) // 2 is standing [offset, offset + half) // 3 is being inserted into [offset + half, offset + 2 * half) size_t offset = 0; for (size_t i = 0; i < kAddRemBatchSize; ++i) { std::shuffle(remove_keys.begin(), remove_keys.end(), rand); std::shuffle(add_keys.begin(), add_keys.end(), rand); offset += half; state.ResumeTiming(); for (size_t idx = 0; idx < half; ++idx) { container.erase(key_of_value(g(offset - half + remove_keys[idx]))); container.insert(g(offset + half + add_keys[idx])); } state.PauseTiming(); } state.ResumeTiming(); } } // Mixed insertion and deletion in the same range using pre-constructed values. template void BM_MixedAddRem(benchmark::State& state) { using V = typename remove_pair_const::type; typename KeyOfValue::type key_of_value; ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance"); T container; // Create two random shuffles std::vector remove_keys(kBenchmarkValues); std::vector add_keys(kBenchmarkValues); // We want to do the exact same work repeatedly, and the benchmark can end // after a different number of iterations depending on the speed of the // individual run so we use a large batch size here and ensure that we do // deterministic work every batch. while (state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) { state.PauseTiming(); container.clear(); constexpr int kSeed = 7; std::mt19937_64 rand(kSeed); std::vector values = GenerateValues(kBenchmarkValues * 2); // Insert the first half of the values (already in random order) container.insert(values.begin(), values.begin() + kBenchmarkValues); // Insert the first half of the values (already in random order) for (size_t i = 0; i < kBenchmarkValues; ++i) { // remove_keys and add_keys will be swapped before each round, // therefore fill add_keys here w/ the keys being inserted, so // they'll be the first to be removed. remove_keys[i] = i + kBenchmarkValues; add_keys[i] = i; } for (size_t i = 0; i < kAddRemBatchSize; ++i) { remove_keys.swap(add_keys); std::shuffle(remove_keys.begin(), remove_keys.end(), rand); std::shuffle(add_keys.begin(), add_keys.end(), rand); state.ResumeTiming(); for (size_t idx = 0; idx < kBenchmarkValues; ++idx) { container.erase(key_of_value(values[remove_keys[idx]])); container.insert(values[add_keys[idx]]); } state.PauseTiming(); } state.ResumeTiming(); } } // Insertion at end, removal from the beginning. This benchmark // counts two value constructors. // TODO(ezb): we could add a GenerateNext version of generator that could reduce // noise for string-like types. template void BM_Fifo(benchmark::State& state) { using V = typename remove_pair_const::type; T container; // Need lazy generation of values as state.max_iterations is large. Generator g(kBenchmarkValues + state.max_iterations); for (int i = 0; i < kBenchmarkValues; i++) { container.insert(g(i)); } while (state.KeepRunning()) { container.erase(container.begin()); container.insert(container.end(), g(state.iterations() + kBenchmarkValues)); } } // Iteration (forward) through the tree template void BM_FwdIter(benchmark::State& state) { using V = typename remove_pair_const::type; using R = typename T::value_type const*; std::vector values = GenerateValues(kBenchmarkValues); T container(values.begin(), values.end()); auto iter = container.end(); R r = nullptr; while (state.KeepRunning()) { if (iter == container.end()) iter = container.begin(); r = &(*iter); ++iter; } benchmark::DoNotOptimize(r); } // Benchmark random range-construction of a container. template void BM_RangeConstructionImpl(benchmark::State& state, bool sorted) { using V = typename remove_pair_const::type; std::vector values = GenerateValues(kBenchmarkValues); if (sorted) { std::sort(values.begin(), values.end()); } { T container(values.begin(), values.end()); } while (state.KeepRunning()) { T container(values.begin(), values.end()); benchmark::DoNotOptimize(container); } } template void BM_InsertRangeRandom(benchmark::State& state) { BM_RangeConstructionImpl(state, false); } template void BM_InsertRangeSorted(benchmark::State& state) { BM_RangeConstructionImpl(state, true); } #define STL_ORDERED_TYPES(value) \ using stl_set_##value = std::set; \ using stl_map_##value = std::map; \ using stl_multiset_##value = std::multiset; \ using stl_multimap_##value = std::multimap using StdString = std::string; STL_ORDERED_TYPES(int32_t); STL_ORDERED_TYPES(int64_t); STL_ORDERED_TYPES(StdString); STL_ORDERED_TYPES(Cord); STL_ORDERED_TYPES(Time); #define STL_UNORDERED_TYPES(value) \ using stl_unordered_set_##value = std::unordered_set; \ using stl_unordered_map_##value = std::unordered_map; \ using flat_hash_set_##value = flat_hash_set; \ using flat_hash_map_##value = flat_hash_map; \ using stl_unordered_multiset_##value = std::unordered_multiset; \ using stl_unordered_multimap_##value = \ std::unordered_multimap #define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash) \ using stl_unordered_set_##value = std::unordered_set; \ using stl_unordered_map_##value = std::unordered_map; \ using flat_hash_set_##value = flat_hash_set; \ using flat_hash_map_##value = flat_hash_map; \ using stl_unordered_multiset_##value = std::unordered_multiset; \ using stl_unordered_multimap_##value = \ std::unordered_multimap STL_UNORDERED_TYPES_CUSTOM_HASH(Cord, absl::Hash); STL_UNORDERED_TYPES(int32_t); STL_UNORDERED_TYPES(int64_t); STL_UNORDERED_TYPES(StdString); STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash); #define BTREE_TYPES(value) \ using btree_256_set_##value = \ btree_set, std::allocator>; \ using btree_256_map_##value = \ btree_map, \ std::allocator>>; \ using btree_256_multiset_##value = \ btree_multiset, std::allocator>; \ using btree_256_multimap_##value = \ btree_multimap, \ std::allocator>> BTREE_TYPES(int32_t); BTREE_TYPES(int64_t); BTREE_TYPES(StdString); BTREE_TYPES(Cord); BTREE_TYPES(Time); #define MY_BENCHMARK4(type, func) \ void BM_##type##_##func(benchmark::State& state) { BM_##func(state); } \ BENCHMARK(BM_##type##_##func) #define MY_BENCHMARK3_STL(type) \ MY_BENCHMARK4(type, Insert); \ MY_BENCHMARK4(type, InsertSorted); \ MY_BENCHMARK4(type, InsertSmall); \ MY_BENCHMARK4(type, Lookup); \ MY_BENCHMARK4(type, FullLookup); \ MY_BENCHMARK4(type, Erase); \ MY_BENCHMARK4(type, EraseRange); \ MY_BENCHMARK4(type, QueueAddRem); \ MY_BENCHMARK4(type, MixedAddRem); \ MY_BENCHMARK4(type, Fifo); \ MY_BENCHMARK4(type, FwdIter); \ MY_BENCHMARK4(type, InsertRangeRandom); \ MY_BENCHMARK4(type, InsertRangeSorted) #define MY_BENCHMARK3(type) \ MY_BENCHMARK4(type, EraseIf); \ MY_BENCHMARK3_STL(type) #define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \ MY_BENCHMARK3_STL(stl_##type); \ MY_BENCHMARK3_STL(stl_unordered_##type); \ MY_BENCHMARK3(btree_256_##type) #define MY_BENCHMARK2(type) \ MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \ MY_BENCHMARK3(flat_hash_##type) // Define MULTI_TESTING to see benchmarks for multi-containers also. // // You can use --copt=-DMULTI_TESTING. #ifdef MULTI_TESTING #define MY_BENCHMARK(type) \ MY_BENCHMARK2(set_##type); \ MY_BENCHMARK2(map_##type); \ MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \ MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type) #else #define MY_BENCHMARK(type) \ MY_BENCHMARK2(set_##type); \ MY_BENCHMARK2(map_##type) #endif MY_BENCHMARK(int32_t); MY_BENCHMARK(int64_t); MY_BENCHMARK(StdString); MY_BENCHMARK(Cord); MY_BENCHMARK(Time); // Define a type whose size and cost of moving are independently customizable. // When sizeof(value_type) increases, we expect btree to no longer have as much // cache-locality advantage over STL. When cost of moving increases, we expect // btree to actually do more work than STL because it has to move values around // and STL doesn't have to. template struct BigType { BigType() : BigType(0) {} explicit BigType(int x) { std::iota(values.begin(), values.end(), x); } void Copy(const BigType& other) { for (int i = 0; i < Size && i < Copies; ++i) values[i] = other.values[i]; // If Copies > Size, do extra copies. for (int i = Size, idx = 0; i < Copies; ++i) { int64_t tmp = other.values[idx]; benchmark::DoNotOptimize(tmp); idx = idx + 1 == Size ? 0 : idx + 1; } } BigType(const BigType& other) { Copy(other); } BigType& operator=(const BigType& other) { Copy(other); return *this; } // Compare only the first Copies elements if Copies is less than Size. bool operator<(const BigType& other) const { return std::lexicographical_compare( values.begin(), values.begin() + std::min(Size, Copies), other.values.begin(), other.values.begin() + std::min(Size, Copies)); } bool operator==(const BigType& other) const { return std::equal(values.begin(), values.begin() + std::min(Size, Copies), other.values.begin()); } // Support absl::Hash. template friend State AbslHashValue(State h, const BigType& b) { for (int i = 0; i < Size && i < Copies; ++i) h = State::combine(std::move(h), b.values[i]); return h; } std::array values; }; #define BIG_TYPE_BENCHMARKS(SIZE, COPIES) \ using stl_set_size##SIZE##copies##COPIES = std::set>; \ using stl_map_size##SIZE##copies##COPIES = \ std::map, intptr_t>; \ using stl_multiset_size##SIZE##copies##COPIES = \ std::multiset>; \ using stl_multimap_size##SIZE##copies##COPIES = \ std::multimap, intptr_t>; \ using stl_unordered_set_size##SIZE##copies##COPIES = \ std::unordered_set, \ absl::Hash>>; \ using stl_unordered_map_size##SIZE##copies##COPIES = \ std::unordered_map, intptr_t, \ absl::Hash>>; \ using flat_hash_set_size##SIZE##copies##COPIES = \ flat_hash_set>; \ using flat_hash_map_size##SIZE##copies##COPIES = \ flat_hash_map, intptr_t>; \ using stl_unordered_multiset_size##SIZE##copies##COPIES = \ std::unordered_multiset, \ absl::Hash>>; \ using stl_unordered_multimap_size##SIZE##copies##COPIES = \ std::unordered_multimap, intptr_t, \ absl::Hash>>; \ using btree_256_set_size##SIZE##copies##COPIES = \ btree_set>; \ using btree_256_map_size##SIZE##copies##COPIES = \ btree_map, intptr_t>; \ using btree_256_multiset_size##SIZE##copies##COPIES = \ btree_multiset>; \ using btree_256_multimap_size##SIZE##copies##COPIES = \ btree_multimap, intptr_t>; \ MY_BENCHMARK(size##SIZE##copies##COPIES) // Define BIG_TYPE_TESTING to see benchmarks for more big types. // // You can use --copt=-DBIG_TYPE_TESTING. #ifndef NODESIZE_TESTING #ifdef BIG_TYPE_TESTING BIG_TYPE_BENCHMARKS(1, 4); BIG_TYPE_BENCHMARKS(4, 1); BIG_TYPE_BENCHMARKS(4, 4); BIG_TYPE_BENCHMARKS(1, 8); BIG_TYPE_BENCHMARKS(8, 1); BIG_TYPE_BENCHMARKS(8, 8); BIG_TYPE_BENCHMARKS(1, 16); BIG_TYPE_BENCHMARKS(16, 1); BIG_TYPE_BENCHMARKS(16, 16); BIG_TYPE_BENCHMARKS(1, 32); BIG_TYPE_BENCHMARKS(32, 1); BIG_TYPE_BENCHMARKS(32, 32); #else BIG_TYPE_BENCHMARKS(32, 32); #endif #endif // Benchmark using unique_ptrs to large value types. In order to be able to use // the same benchmark code as the other types, use a type that holds a // unique_ptr and has a copy constructor. template struct BigTypePtr { BigTypePtr() : BigTypePtr(0) {} explicit BigTypePtr(int x) { ptr = absl::make_unique>(x); } BigTypePtr(const BigTypePtr& other) { ptr = absl::make_unique>(*other.ptr); } BigTypePtr(BigTypePtr&& other) noexcept = default; BigTypePtr& operator=(const BigTypePtr& other) { ptr = absl::make_unique>(*other.ptr); } BigTypePtr& operator=(BigTypePtr&& other) noexcept = default; bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; } bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; } std::unique_ptr> ptr; }; template double ContainerInfo(const btree_set>& b) { const double bytes_used = b.bytes_used() + b.size() * sizeof(BigType); const double bytes_per_value = bytes_used / b.size(); BtreeContainerInfoLog(b, bytes_used, bytes_per_value); return bytes_per_value; } template double ContainerInfo(const btree_map>& b) { const double bytes_used = b.bytes_used() + b.size() * sizeof(BigType); const double bytes_per_value = bytes_used / b.size(); BtreeContainerInfoLog(b, bytes_used, bytes_per_value); return bytes_per_value; } #define BIG_TYPE_PTR_BENCHMARKS(SIZE) \ using stl_set_size##SIZE##copies##SIZE##ptr = std::set>; \ using stl_map_size##SIZE##copies##SIZE##ptr = \ std::map>; \ using stl_unordered_set_size##SIZE##copies##SIZE##ptr = \ std::unordered_set, \ absl::Hash>>; \ using stl_unordered_map_size##SIZE##copies##SIZE##ptr = \ std::unordered_map>; \ using flat_hash_set_size##SIZE##copies##SIZE##ptr = \ flat_hash_set>; \ using flat_hash_map_size##SIZE##copies##SIZE##ptr = \ flat_hash_map>; \ using btree_256_set_size##SIZE##copies##SIZE##ptr = \ btree_set>; \ using btree_256_map_size##SIZE##copies##SIZE##ptr = \ btree_map>; \ MY_BENCHMARK3_STL(stl_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3_STL(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3_STL(stl_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3_STL(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \ MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr) BIG_TYPE_PTR_BENCHMARKS(32); } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/btree_map.h000066400000000000000000000753741430371345100204050ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: btree_map.h // ----------------------------------------------------------------------------- // // This header file defines B-tree maps: sorted associative containers mapping // keys to values. // // * `absl::btree_map<>` // * `absl::btree_multimap<>` // // These B-tree types are similar to the corresponding types in the STL // (`std::map` and `std::multimap`) and generally conform to the STL interfaces // of those types. However, because they are implemented using B-trees, they // are more efficient in most situations. // // Unlike `std::map` and `std::multimap`, which are commonly implemented using // red-black tree nodes, B-tree maps use more generic B-tree nodes able to hold // multiple values per node. Holding multiple values per node often makes // B-tree maps perform better than their `std::map` counterparts, because // multiple entries can be checked within the same cache hit. // // However, these types should not be considered drop-in replacements for // `std::map` and `std::multimap` as there are some API differences, which are // noted in this header file. The most consequential differences with respect to // migrating to b-tree from the STL types are listed in the next paragraph. // Other API differences are minor. // // Importantly, insertions and deletions may invalidate outstanding iterators, // pointers, and references to elements. Such invalidations are typically only // an issue if insertion and deletion operations are interleaved with the use of // more than one iterator, pointer, or reference simultaneously. For this // reason, `insert()` and `erase()` return a valid iterator at the current // position. Another important difference is that key-types must be // copy-constructible. #ifndef ABSL_CONTAINER_BTREE_MAP_H_ #define ABSL_CONTAINER_BTREE_MAP_H_ #include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/btree_container.h" // IWYU pragma: export namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template struct map_params; } // namespace container_internal // absl::btree_map<> // // An `absl::btree_map` is an ordered associative container of // unique keys and associated values designed to be a more efficient replacement // for `std::map` (in most cases). // // Keys are sorted using an (optional) comparison function, which defaults to // `std::less`. // // An `absl::btree_map` uses a default allocator of // `std::allocator>` to allocate (and deallocate) // nodes, and construct and destruct values within those nodes. You may // instead specify a custom allocator `A` (which in turn requires specifying a // custom comparator `C`) as in `absl::btree_map`. // template , typename Alloc = std::allocator>> class btree_map : public container_internal::btree_map_container< container_internal::btree>> { using Base = typename btree_map::btree_map_container; public: // Constructors and Assignment Operators // // A `btree_map` supports the same overload set as `std::map` // for construction and assignment: // // * Default constructor // // absl::btree_map map1; // // * Initializer List constructor // // absl::btree_map map2 = // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; // // * Copy constructor // // absl::btree_map map3(map2); // // * Copy assignment operator // // absl::btree_map map4; // map4 = map3; // // * Move constructor // // // Move is guaranteed efficient // absl::btree_map map5(std::move(map4)); // // * Move assignment operator // // // May be efficient if allocators are compatible // absl::btree_map map6; // map6 = std::move(map5); // // * Range constructor // // std::vector> v = {{1, "a"}, {2, "b"}}; // absl::btree_map map7(v.begin(), v.end()); btree_map() {} using Base::Base; // btree_map::begin() // // Returns an iterator to the beginning of the `btree_map`. using Base::begin; // btree_map::cbegin() // // Returns a const iterator to the beginning of the `btree_map`. using Base::cbegin; // btree_map::end() // // Returns an iterator to the end of the `btree_map`. using Base::end; // btree_map::cend() // // Returns a const iterator to the end of the `btree_map`. using Base::cend; // btree_map::empty() // // Returns whether or not the `btree_map` is empty. using Base::empty; // btree_map::max_size() // // Returns the largest theoretical possible number of elements within a // `btree_map` under current memory constraints. This value can be thought // of as the largest value of `std::distance(begin(), end())` for a // `btree_map`. using Base::max_size; // btree_map::size() // // Returns the number of elements currently within the `btree_map`. using Base::size; // btree_map::clear() // // Removes all elements from the `btree_map`. Invalidates any references, // pointers, or iterators referring to contained elements. using Base::clear; // btree_map::erase() // // Erases elements within the `btree_map`. If an erase occurs, any references, // pointers, or iterators are invalidated. // Overloads are listed below. // // iterator erase(iterator position): // iterator erase(const_iterator position): // // Erases the element at `position` of the `btree_map`, returning // the iterator pointing to the element after the one that was erased // (or end() if none exists). // // iterator erase(const_iterator first, const_iterator last): // // Erases the elements in the open interval [`first`, `last`), returning // the iterator pointing to the element after the interval that was erased // (or end() if none exists). // // template size_type erase(const K& key): // // Erases the element with the matching key, if it exists, returning the // number of elements erased (0 or 1). using Base::erase; // btree_map::insert() // // Inserts an element of the specified value into the `btree_map`, // returning an iterator pointing to the newly inserted element, provided that // an element with the given key does not already exist. If an insertion // occurs, any references, pointers, or iterators are invalidated. // Overloads are listed below. // // std::pair insert(const value_type& value): // // Inserts a value into the `btree_map`. Returns a pair consisting of an // iterator to the inserted element (or to the element that prevented the // insertion) and a bool denoting whether the insertion took place. // // std::pair insert(value_type&& value): // // Inserts a moveable value into the `btree_map`. Returns a pair // consisting of an iterator to the inserted element (or to the element that // prevented the insertion) and a bool denoting whether the insertion took // place. // // iterator insert(const_iterator hint, const value_type& value): // iterator insert(const_iterator hint, value_type&& value): // // Inserts a value, using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. Returns an iterator to the // inserted element, or to the existing element that prevented the // insertion. // // void insert(InputIterator first, InputIterator last): // // Inserts a range of values [`first`, `last`). // // void insert(std::initializer_list ilist): // // Inserts the elements within the initializer list `ilist`. using Base::insert; // btree_map::insert_or_assign() // // Inserts an element of the specified value into the `btree_map` provided // that a value with the given key does not already exist, or replaces the // corresponding mapped type with the forwarded `obj` argument if a key for // that value already exists, returning an iterator pointing to the newly // inserted element. Overloads are listed below. // // pair insert_or_assign(const key_type& k, M&& obj): // pair insert_or_assign(key_type&& k, M&& obj): // // Inserts/Assigns (or moves) the element of the specified key into the // `btree_map`. If the returned bool is true, insertion took place, and if // it's false, assignment took place. // // iterator insert_or_assign(const_iterator hint, // const key_type& k, M&& obj): // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj): // // Inserts/Assigns (or moves) the element of the specified key into the // `btree_map` using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. using Base::insert_or_assign; // btree_map::emplace() // // Inserts an element of the specified value by constructing it in-place // within the `btree_map`, provided that no element with the given key // already exists. // // The element may be constructed even if there already is an element with the // key in the container, in which case the newly constructed element will be // destroyed immediately. Prefer `try_emplace()` unless your key is not // copyable or moveable. // // If an insertion occurs, any references, pointers, or iterators are // invalidated. using Base::emplace; // btree_map::emplace_hint() // // Inserts an element of the specified value by constructing it in-place // within the `btree_map`, using the position of `hint` as a non-binding // suggestion for where to begin the insertion search, and only inserts // provided that no element with the given key already exists. // // The element may be constructed even if there already is an element with the // key in the container, in which case the newly constructed element will be // destroyed immediately. Prefer `try_emplace()` unless your key is not // copyable or moveable. // // If an insertion occurs, any references, pointers, or iterators are // invalidated. using Base::emplace_hint; // btree_map::try_emplace() // // Inserts an element of the specified value by constructing it in-place // within the `btree_map`, provided that no element with the given key // already exists. Unlike `emplace()`, if an element with the given key // already exists, we guarantee that no element is constructed. // // If an insertion occurs, any references, pointers, or iterators are // invalidated. // // Overloads are listed below. // // std::pair try_emplace(const key_type& k, Args&&... args): // std::pair try_emplace(key_type&& k, Args&&... args): // // Inserts (via copy or move) the element of the specified key into the // `btree_map`. // // iterator try_emplace(const_iterator hint, // const key_type& k, Args&&... args): // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): // // Inserts (via copy or move) the element of the specified key into the // `btree_map` using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. using Base::try_emplace; // btree_map::extract() // // Extracts the indicated element, erasing it in the process, and returns it // as a C++17-compatible node handle. Overloads are listed below. // // node_type extract(const_iterator position): // // Extracts the element at the indicated position and returns a node handle // owning that extracted data. // // template node_type extract(const K& k): // // Extracts the element with the key matching the passed key value and // returns a node handle owning that extracted data. If the `btree_map` // does not contain an element with a matching key, this function returns an // empty node handle. // // NOTE: when compiled in an earlier version of C++ than C++17, // `node_type::key()` returns a const reference to the key instead of a // mutable reference. We cannot safely return a mutable reference without // std::launder (which is not available before C++17). // // NOTE: In this context, `node_type` refers to the C++17 concept of a // move-only type that owns and provides access to the elements in associative // containers (https://en.cppreference.com/w/cpp/container/node_handle). // It does NOT refer to the data layout of the underlying btree. using Base::extract; // btree_map::merge() // // Extracts elements from a given `source` btree_map into this // `btree_map`. If the destination `btree_map` already contains an // element with an equivalent key, that element is not extracted. using Base::merge; // btree_map::swap(btree_map& other) // // Exchanges the contents of this `btree_map` with those of the `other` // btree_map, avoiding invocation of any move, copy, or swap operations on // individual elements. // // All iterators and references on the `btree_map` remain valid, excepting // for the past-the-end iterator, which is invalidated. using Base::swap; // btree_map::at() // // Returns a reference to the mapped value of the element with key equivalent // to the passed key. using Base::at; // btree_map::contains() // // template bool contains(const K& key) const: // // Determines whether an element comparing equal to the given `key` exists // within the `btree_map`, returning `true` if so or `false` otherwise. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::contains; // btree_map::count() // // template size_type count(const K& key) const: // // Returns the number of elements comparing equal to the given `key` within // the `btree_map`. Note that this function will return either `1` or `0` // since duplicate elements are not allowed within a `btree_map`. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::count; // btree_map::equal_range() // // Returns a half-open range [first, last), defined by a `std::pair` of two // iterators, containing all elements with the passed key in the `btree_map`. using Base::equal_range; // btree_map::find() // // template iterator find(const K& key): // template const_iterator find(const K& key) const: // // Finds an element with the passed `key` within the `btree_map`. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::find; // btree_map::lower_bound() // // template iterator lower_bound(const K& key): // template const_iterator lower_bound(const K& key) const: // // Finds the first element with a key that is not less than `key` within the // `btree_map`. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::lower_bound; // btree_map::upper_bound() // // template iterator upper_bound(const K& key): // template const_iterator upper_bound(const K& key) const: // // Finds the first element with a key that is greater than `key` within the // `btree_map`. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::upper_bound; // btree_map::operator[]() // // Returns a reference to the value mapped to the passed key within the // `btree_map`, performing an `insert()` if the key does not already // exist. // // If an insertion occurs, any references, pointers, or iterators are // invalidated. Otherwise iterators are not affected and references are not // invalidated. Overloads are listed below. // // T& operator[](key_type&& key): // T& operator[](const key_type& key): // // Inserts a value_type object constructed in-place if the element with the // given key does not exist. using Base::operator[]; // btree_map::get_allocator() // // Returns the allocator function associated with this `btree_map`. using Base::get_allocator; // btree_map::key_comp(); // // Returns the key comparator associated with this `btree_map`. using Base::key_comp; // btree_map::value_comp(); // // Returns the value comparator associated with this `btree_map`. using Base::value_comp; }; // absl::swap(absl::btree_map<>, absl::btree_map<>) // // Swaps the contents of two `absl::btree_map` containers. template void swap(btree_map &x, btree_map &y) { return x.swap(y); } // absl::erase_if(absl::btree_map<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. // Returns the number of erased elements. template typename btree_map::size_type erase_if( btree_map &map, Pred pred) { return container_internal::btree_access::erase_if(map, std::move(pred)); } // absl::btree_multimap // // An `absl::btree_multimap` is an ordered associative container of // keys and associated values designed to be a more efficient replacement for // `std::multimap` (in most cases). Unlike `absl::btree_map`, a B-tree multimap // allows multiple elements with equivalent keys. // // Keys are sorted using an (optional) comparison function, which defaults to // `std::less`. // // An `absl::btree_multimap` uses a default allocator of // `std::allocator>` to allocate (and deallocate) // nodes, and construct and destruct values within those nodes. You may // instead specify a custom allocator `A` (which in turn requires specifying a // custom comparator `C`) as in `absl::btree_multimap`. // template , typename Alloc = std::allocator>> class btree_multimap : public container_internal::btree_multimap_container< container_internal::btree>> { using Base = typename btree_multimap::btree_multimap_container; public: // Constructors and Assignment Operators // // A `btree_multimap` supports the same overload set as `std::multimap` // for construction and assignment: // // * Default constructor // // absl::btree_multimap map1; // // * Initializer List constructor // // absl::btree_multimap map2 = // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; // // * Copy constructor // // absl::btree_multimap map3(map2); // // * Copy assignment operator // // absl::btree_multimap map4; // map4 = map3; // // * Move constructor // // // Move is guaranteed efficient // absl::btree_multimap map5(std::move(map4)); // // * Move assignment operator // // // May be efficient if allocators are compatible // absl::btree_multimap map6; // map6 = std::move(map5); // // * Range constructor // // std::vector> v = {{1, "a"}, {2, "b"}}; // absl::btree_multimap map7(v.begin(), v.end()); btree_multimap() {} using Base::Base; // btree_multimap::begin() // // Returns an iterator to the beginning of the `btree_multimap`. using Base::begin; // btree_multimap::cbegin() // // Returns a const iterator to the beginning of the `btree_multimap`. using Base::cbegin; // btree_multimap::end() // // Returns an iterator to the end of the `btree_multimap`. using Base::end; // btree_multimap::cend() // // Returns a const iterator to the end of the `btree_multimap`. using Base::cend; // btree_multimap::empty() // // Returns whether or not the `btree_multimap` is empty. using Base::empty; // btree_multimap::max_size() // // Returns the largest theoretical possible number of elements within a // `btree_multimap` under current memory constraints. This value can be // thought of as the largest value of `std::distance(begin(), end())` for a // `btree_multimap`. using Base::max_size; // btree_multimap::size() // // Returns the number of elements currently within the `btree_multimap`. using Base::size; // btree_multimap::clear() // // Removes all elements from the `btree_multimap`. Invalidates any references, // pointers, or iterators referring to contained elements. using Base::clear; // btree_multimap::erase() // // Erases elements within the `btree_multimap`. If an erase occurs, any // references, pointers, or iterators are invalidated. // Overloads are listed below. // // iterator erase(iterator position): // iterator erase(const_iterator position): // // Erases the element at `position` of the `btree_multimap`, returning // the iterator pointing to the element after the one that was erased // (or end() if none exists). // // iterator erase(const_iterator first, const_iterator last): // // Erases the elements in the open interval [`first`, `last`), returning // the iterator pointing to the element after the interval that was erased // (or end() if none exists). // // template size_type erase(const K& key): // // Erases the elements matching the key, if any exist, returning the // number of elements erased. using Base::erase; // btree_multimap::insert() // // Inserts an element of the specified value into the `btree_multimap`, // returning an iterator pointing to the newly inserted element. // Any references, pointers, or iterators are invalidated. Overloads are // listed below. // // iterator insert(const value_type& value): // // Inserts a value into the `btree_multimap`, returning an iterator to the // inserted element. // // iterator insert(value_type&& value): // // Inserts a moveable value into the `btree_multimap`, returning an iterator // to the inserted element. // // iterator insert(const_iterator hint, const value_type& value): // iterator insert(const_iterator hint, value_type&& value): // // Inserts a value, using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. Returns an iterator to the // inserted element. // // void insert(InputIterator first, InputIterator last): // // Inserts a range of values [`first`, `last`). // // void insert(std::initializer_list ilist): // // Inserts the elements within the initializer list `ilist`. using Base::insert; // btree_multimap::emplace() // // Inserts an element of the specified value by constructing it in-place // within the `btree_multimap`. Any references, pointers, or iterators are // invalidated. using Base::emplace; // btree_multimap::emplace_hint() // // Inserts an element of the specified value by constructing it in-place // within the `btree_multimap`, using the position of `hint` as a non-binding // suggestion for where to begin the insertion search. // // Any references, pointers, or iterators are invalidated. using Base::emplace_hint; // btree_multimap::extract() // // Extracts the indicated element, erasing it in the process, and returns it // as a C++17-compatible node handle. Overloads are listed below. // // node_type extract(const_iterator position): // // Extracts the element at the indicated position and returns a node handle // owning that extracted data. // // template node_type extract(const K& k): // // Extracts the element with the key matching the passed key value and // returns a node handle owning that extracted data. If the `btree_multimap` // does not contain an element with a matching key, this function returns an // empty node handle. // // NOTE: when compiled in an earlier version of C++ than C++17, // `node_type::key()` returns a const reference to the key instead of a // mutable reference. We cannot safely return a mutable reference without // std::launder (which is not available before C++17). // // NOTE: In this context, `node_type` refers to the C++17 concept of a // move-only type that owns and provides access to the elements in associative // containers (https://en.cppreference.com/w/cpp/container/node_handle). // It does NOT refer to the data layout of the underlying btree. using Base::extract; // btree_multimap::merge() // // Extracts all elements from a given `source` btree_multimap into this // `btree_multimap`. using Base::merge; // btree_multimap::swap(btree_multimap& other) // // Exchanges the contents of this `btree_multimap` with those of the `other` // btree_multimap, avoiding invocation of any move, copy, or swap operations // on individual elements. // // All iterators and references on the `btree_multimap` remain valid, // excepting for the past-the-end iterator, which is invalidated. using Base::swap; // btree_multimap::contains() // // template bool contains(const K& key) const: // // Determines whether an element comparing equal to the given `key` exists // within the `btree_multimap`, returning `true` if so or `false` otherwise. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::contains; // btree_multimap::count() // // template size_type count(const K& key) const: // // Returns the number of elements comparing equal to the given `key` within // the `btree_multimap`. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::count; // btree_multimap::equal_range() // // Returns a half-open range [first, last), defined by a `std::pair` of two // iterators, containing all elements with the passed key in the // `btree_multimap`. using Base::equal_range; // btree_multimap::find() // // template iterator find(const K& key): // template const_iterator find(const K& key) const: // // Finds an element with the passed `key` within the `btree_multimap`. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::find; // btree_multimap::lower_bound() // // template iterator lower_bound(const K& key): // template const_iterator lower_bound(const K& key) const: // // Finds the first element with a key that is not less than `key` within the // `btree_multimap`. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::lower_bound; // btree_multimap::upper_bound() // // template iterator upper_bound(const K& key): // template const_iterator upper_bound(const K& key) const: // // Finds the first element with a key that is greater than `key` within the // `btree_multimap`. // // Supports heterogeneous lookup, provided that the map has a compatible // heterogeneous comparator. using Base::upper_bound; // btree_multimap::get_allocator() // // Returns the allocator function associated with this `btree_multimap`. using Base::get_allocator; // btree_multimap::key_comp(); // // Returns the key comparator associated with this `btree_multimap`. using Base::key_comp; // btree_multimap::value_comp(); // // Returns the value comparator associated with this `btree_multimap`. using Base::value_comp; }; // absl::swap(absl::btree_multimap<>, absl::btree_multimap<>) // // Swaps the contents of two `absl::btree_multimap` containers. template void swap(btree_multimap &x, btree_multimap &y) { return x.swap(y); } // absl::erase_if(absl::btree_multimap<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. // Returns the number of erased elements. template typename btree_multimap::size_type erase_if( btree_multimap &map, Pred pred) { return container_internal::btree_access::erase_if(map, std::move(pred)); } namespace container_internal { // A parameters structure for holding the type parameters for a btree_map. // Compare and Alloc should be nothrow copy-constructible. template struct map_params : common_params> { using super_type = typename map_params::common_params; using mapped_type = Data; // This type allows us to move keys when it is safe to do so. It is safe // for maps in which value_type and mutable_value_type are layout compatible. using slot_policy = typename super_type::slot_policy; using slot_type = typename super_type::slot_type; using value_type = typename super_type::value_type; using init_type = typename super_type::init_type; template static auto key(const V &value) -> decltype(value.first) { return value.first; } static const Key &key(const slot_type *s) { return slot_policy::key(s); } static const Key &key(slot_type *s) { return slot_policy::key(s); } // For use in node handle. static auto mutable_key(slot_type *s) -> decltype(slot_policy::mutable_key(s)) { return slot_policy::mutable_key(s); } static mapped_type &value(value_type *value) { return value->second; } }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_BTREE_MAP_H_ abseil-20220623.1/absl/container/btree_set.h000066400000000000000000000666151430371345100204210ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: btree_set.h // ----------------------------------------------------------------------------- // // This header file defines B-tree sets: sorted associative containers of // values. // // * `absl::btree_set<>` // * `absl::btree_multiset<>` // // These B-tree types are similar to the corresponding types in the STL // (`std::set` and `std::multiset`) and generally conform to the STL interfaces // of those types. However, because they are implemented using B-trees, they // are more efficient in most situations. // // Unlike `std::set` and `std::multiset`, which are commonly implemented using // red-black tree nodes, B-tree sets use more generic B-tree nodes able to hold // multiple values per node. Holding multiple values per node often makes // B-tree sets perform better than their `std::set` counterparts, because // multiple entries can be checked within the same cache hit. // // However, these types should not be considered drop-in replacements for // `std::set` and `std::multiset` as there are some API differences, which are // noted in this header file. The most consequential differences with respect to // migrating to b-tree from the STL types are listed in the next paragraph. // Other API differences are minor. // // Importantly, insertions and deletions may invalidate outstanding iterators, // pointers, and references to elements. Such invalidations are typically only // an issue if insertion and deletion operations are interleaved with the use of // more than one iterator, pointer, or reference simultaneously. For this // reason, `insert()` and `erase()` return a valid iterator at the current // position. #ifndef ABSL_CONTAINER_BTREE_SET_H_ #define ABSL_CONTAINER_BTREE_SET_H_ #include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/btree_container.h" // IWYU pragma: export namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template struct set_slot_policy; template struct set_params; } // namespace container_internal // absl::btree_set<> // // An `absl::btree_set` is an ordered associative container of unique key // values designed to be a more efficient replacement for `std::set` (in most // cases). // // Keys are sorted using an (optional) comparison function, which defaults to // `std::less`. // // An `absl::btree_set` uses a default allocator of `std::allocator` to // allocate (and deallocate) nodes, and construct and destruct values within // those nodes. You may instead specify a custom allocator `A` (which in turn // requires specifying a custom comparator `C`) as in // `absl::btree_set`. // template , typename Alloc = std::allocator> class btree_set : public container_internal::btree_set_container< container_internal::btree>> { using Base = typename btree_set::btree_set_container; public: // Constructors and Assignment Operators // // A `btree_set` supports the same overload set as `std::set` // for construction and assignment: // // * Default constructor // // absl::btree_set set1; // // * Initializer List constructor // // absl::btree_set set2 = // {{"huey"}, {"dewey"}, {"louie"},}; // // * Copy constructor // // absl::btree_set set3(set2); // // * Copy assignment operator // // absl::btree_set set4; // set4 = set3; // // * Move constructor // // // Move is guaranteed efficient // absl::btree_set set5(std::move(set4)); // // * Move assignment operator // // // May be efficient if allocators are compatible // absl::btree_set set6; // set6 = std::move(set5); // // * Range constructor // // std::vector v = {"a", "b"}; // absl::btree_set set7(v.begin(), v.end()); btree_set() {} using Base::Base; // btree_set::begin() // // Returns an iterator to the beginning of the `btree_set`. using Base::begin; // btree_set::cbegin() // // Returns a const iterator to the beginning of the `btree_set`. using Base::cbegin; // btree_set::end() // // Returns an iterator to the end of the `btree_set`. using Base::end; // btree_set::cend() // // Returns a const iterator to the end of the `btree_set`. using Base::cend; // btree_set::empty() // // Returns whether or not the `btree_set` is empty. using Base::empty; // btree_set::max_size() // // Returns the largest theoretical possible number of elements within a // `btree_set` under current memory constraints. This value can be thought // of as the largest value of `std::distance(begin(), end())` for a // `btree_set`. using Base::max_size; // btree_set::size() // // Returns the number of elements currently within the `btree_set`. using Base::size; // btree_set::clear() // // Removes all elements from the `btree_set`. Invalidates any references, // pointers, or iterators referring to contained elements. using Base::clear; // btree_set::erase() // // Erases elements within the `btree_set`. Overloads are listed below. // // iterator erase(iterator position): // iterator erase(const_iterator position): // // Erases the element at `position` of the `btree_set`, returning // the iterator pointing to the element after the one that was erased // (or end() if none exists). // // iterator erase(const_iterator first, const_iterator last): // // Erases the elements in the open interval [`first`, `last`), returning // the iterator pointing to the element after the interval that was erased // (or end() if none exists). // // template size_type erase(const K& key): // // Erases the element with the matching key, if it exists, returning the // number of elements erased (0 or 1). using Base::erase; // btree_set::insert() // // Inserts an element of the specified value into the `btree_set`, // returning an iterator pointing to the newly inserted element, provided that // an element with the given key does not already exist. If an insertion // occurs, any references, pointers, or iterators are invalidated. // Overloads are listed below. // // std::pair insert(const value_type& value): // // Inserts a value into the `btree_set`. Returns a pair consisting of an // iterator to the inserted element (or to the element that prevented the // insertion) and a bool denoting whether the insertion took place. // // std::pair insert(value_type&& value): // // Inserts a moveable value into the `btree_set`. Returns a pair // consisting of an iterator to the inserted element (or to the element that // prevented the insertion) and a bool denoting whether the insertion took // place. // // iterator insert(const_iterator hint, const value_type& value): // iterator insert(const_iterator hint, value_type&& value): // // Inserts a value, using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. Returns an iterator to the // inserted element, or to the existing element that prevented the // insertion. // // void insert(InputIterator first, InputIterator last): // // Inserts a range of values [`first`, `last`). // // void insert(std::initializer_list ilist): // // Inserts the elements within the initializer list `ilist`. using Base::insert; // btree_set::emplace() // // Inserts an element of the specified value by constructing it in-place // within the `btree_set`, provided that no element with the given key // already exists. // // The element may be constructed even if there already is an element with the // key in the container, in which case the newly constructed element will be // destroyed immediately. // // If an insertion occurs, any references, pointers, or iterators are // invalidated. using Base::emplace; // btree_set::emplace_hint() // // Inserts an element of the specified value by constructing it in-place // within the `btree_set`, using the position of `hint` as a non-binding // suggestion for where to begin the insertion search, and only inserts // provided that no element with the given key already exists. // // The element may be constructed even if there already is an element with the // key in the container, in which case the newly constructed element will be // destroyed immediately. // // If an insertion occurs, any references, pointers, or iterators are // invalidated. using Base::emplace_hint; // btree_set::extract() // // Extracts the indicated element, erasing it in the process, and returns it // as a C++17-compatible node handle. Overloads are listed below. // // node_type extract(const_iterator position): // // Extracts the element at the indicated position and returns a node handle // owning that extracted data. // // template node_type extract(const K& k): // // Extracts the element with the key matching the passed key value and // returns a node handle owning that extracted data. If the `btree_set` // does not contain an element with a matching key, this function returns an // empty node handle. // // NOTE: In this context, `node_type` refers to the C++17 concept of a // move-only type that owns and provides access to the elements in associative // containers (https://en.cppreference.com/w/cpp/container/node_handle). // It does NOT refer to the data layout of the underlying btree. using Base::extract; // btree_set::merge() // // Extracts elements from a given `source` btree_set into this // `btree_set`. If the destination `btree_set` already contains an // element with an equivalent key, that element is not extracted. using Base::merge; // btree_set::swap(btree_set& other) // // Exchanges the contents of this `btree_set` with those of the `other` // btree_set, avoiding invocation of any move, copy, or swap operations on // individual elements. // // All iterators and references on the `btree_set` remain valid, excepting // for the past-the-end iterator, which is invalidated. using Base::swap; // btree_set::contains() // // template bool contains(const K& key) const: // // Determines whether an element comparing equal to the given `key` exists // within the `btree_set`, returning `true` if so or `false` otherwise. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::contains; // btree_set::count() // // template size_type count(const K& key) const: // // Returns the number of elements comparing equal to the given `key` within // the `btree_set`. Note that this function will return either `1` or `0` // since duplicate elements are not allowed within a `btree_set`. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::count; // btree_set::equal_range() // // Returns a closed range [first, last], defined by a `std::pair` of two // iterators, containing all elements with the passed key in the // `btree_set`. using Base::equal_range; // btree_set::find() // // template iterator find(const K& key): // template const_iterator find(const K& key) const: // // Finds an element with the passed `key` within the `btree_set`. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::find; // btree_set::lower_bound() // // template iterator lower_bound(const K& key): // template const_iterator lower_bound(const K& key) const: // // Finds the first element that is not less than `key` within the `btree_set`. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::lower_bound; // btree_set::upper_bound() // // template iterator upper_bound(const K& key): // template const_iterator upper_bound(const K& key) const: // // Finds the first element that is greater than `key` within the `btree_set`. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::upper_bound; // btree_set::get_allocator() // // Returns the allocator function associated with this `btree_set`. using Base::get_allocator; // btree_set::key_comp(); // // Returns the key comparator associated with this `btree_set`. using Base::key_comp; // btree_set::value_comp(); // // Returns the value comparator associated with this `btree_set`. The keys to // sort the elements are the values themselves, therefore `value_comp` and its // sibling member function `key_comp` are equivalent. using Base::value_comp; }; // absl::swap(absl::btree_set<>, absl::btree_set<>) // // Swaps the contents of two `absl::btree_set` containers. template void swap(btree_set &x, btree_set &y) { return x.swap(y); } // absl::erase_if(absl::btree_set<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. // Returns the number of erased elements. template typename btree_set::size_type erase_if(btree_set &set, Pred pred) { return container_internal::btree_access::erase_if(set, std::move(pred)); } // absl::btree_multiset<> // // An `absl::btree_multiset` is an ordered associative container of // keys and associated values designed to be a more efficient replacement // for `std::multiset` (in most cases). Unlike `absl::btree_set`, a B-tree // multiset allows equivalent elements. // // Keys are sorted using an (optional) comparison function, which defaults to // `std::less`. // // An `absl::btree_multiset` uses a default allocator of `std::allocator` // to allocate (and deallocate) nodes, and construct and destruct values within // those nodes. You may instead specify a custom allocator `A` (which in turn // requires specifying a custom comparator `C`) as in // `absl::btree_multiset`. // template , typename Alloc = std::allocator> class btree_multiset : public container_internal::btree_multiset_container< container_internal::btree>> { using Base = typename btree_multiset::btree_multiset_container; public: // Constructors and Assignment Operators // // A `btree_multiset` supports the same overload set as `std::set` // for construction and assignment: // // * Default constructor // // absl::btree_multiset set1; // // * Initializer List constructor // // absl::btree_multiset set2 = // {{"huey"}, {"dewey"}, {"louie"},}; // // * Copy constructor // // absl::btree_multiset set3(set2); // // * Copy assignment operator // // absl::btree_multiset set4; // set4 = set3; // // * Move constructor // // // Move is guaranteed efficient // absl::btree_multiset set5(std::move(set4)); // // * Move assignment operator // // // May be efficient if allocators are compatible // absl::btree_multiset set6; // set6 = std::move(set5); // // * Range constructor // // std::vector v = {"a", "b"}; // absl::btree_multiset set7(v.begin(), v.end()); btree_multiset() {} using Base::Base; // btree_multiset::begin() // // Returns an iterator to the beginning of the `btree_multiset`. using Base::begin; // btree_multiset::cbegin() // // Returns a const iterator to the beginning of the `btree_multiset`. using Base::cbegin; // btree_multiset::end() // // Returns an iterator to the end of the `btree_multiset`. using Base::end; // btree_multiset::cend() // // Returns a const iterator to the end of the `btree_multiset`. using Base::cend; // btree_multiset::empty() // // Returns whether or not the `btree_multiset` is empty. using Base::empty; // btree_multiset::max_size() // // Returns the largest theoretical possible number of elements within a // `btree_multiset` under current memory constraints. This value can be // thought of as the largest value of `std::distance(begin(), end())` for a // `btree_multiset`. using Base::max_size; // btree_multiset::size() // // Returns the number of elements currently within the `btree_multiset`. using Base::size; // btree_multiset::clear() // // Removes all elements from the `btree_multiset`. Invalidates any references, // pointers, or iterators referring to contained elements. using Base::clear; // btree_multiset::erase() // // Erases elements within the `btree_multiset`. Overloads are listed below. // // iterator erase(iterator position): // iterator erase(const_iterator position): // // Erases the element at `position` of the `btree_multiset`, returning // the iterator pointing to the element after the one that was erased // (or end() if none exists). // // iterator erase(const_iterator first, const_iterator last): // // Erases the elements in the open interval [`first`, `last`), returning // the iterator pointing to the element after the interval that was erased // (or end() if none exists). // // template size_type erase(const K& key): // // Erases the elements matching the key, if any exist, returning the // number of elements erased. using Base::erase; // btree_multiset::insert() // // Inserts an element of the specified value into the `btree_multiset`, // returning an iterator pointing to the newly inserted element. // Any references, pointers, or iterators are invalidated. Overloads are // listed below. // // iterator insert(const value_type& value): // // Inserts a value into the `btree_multiset`, returning an iterator to the // inserted element. // // iterator insert(value_type&& value): // // Inserts a moveable value into the `btree_multiset`, returning an iterator // to the inserted element. // // iterator insert(const_iterator hint, const value_type& value): // iterator insert(const_iterator hint, value_type&& value): // // Inserts a value, using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. Returns an iterator to the // inserted element. // // void insert(InputIterator first, InputIterator last): // // Inserts a range of values [`first`, `last`). // // void insert(std::initializer_list ilist): // // Inserts the elements within the initializer list `ilist`. using Base::insert; // btree_multiset::emplace() // // Inserts an element of the specified value by constructing it in-place // within the `btree_multiset`. Any references, pointers, or iterators are // invalidated. using Base::emplace; // btree_multiset::emplace_hint() // // Inserts an element of the specified value by constructing it in-place // within the `btree_multiset`, using the position of `hint` as a non-binding // suggestion for where to begin the insertion search. // // Any references, pointers, or iterators are invalidated. using Base::emplace_hint; // btree_multiset::extract() // // Extracts the indicated element, erasing it in the process, and returns it // as a C++17-compatible node handle. Overloads are listed below. // // node_type extract(const_iterator position): // // Extracts the element at the indicated position and returns a node handle // owning that extracted data. // // template node_type extract(const K& k): // // Extracts the element with the key matching the passed key value and // returns a node handle owning that extracted data. If the `btree_multiset` // does not contain an element with a matching key, this function returns an // empty node handle. // // NOTE: In this context, `node_type` refers to the C++17 concept of a // move-only type that owns and provides access to the elements in associative // containers (https://en.cppreference.com/w/cpp/container/node_handle). // It does NOT refer to the data layout of the underlying btree. using Base::extract; // btree_multiset::merge() // // Extracts all elements from a given `source` btree_multiset into this // `btree_multiset`. using Base::merge; // btree_multiset::swap(btree_multiset& other) // // Exchanges the contents of this `btree_multiset` with those of the `other` // btree_multiset, avoiding invocation of any move, copy, or swap operations // on individual elements. // // All iterators and references on the `btree_multiset` remain valid, // excepting for the past-the-end iterator, which is invalidated. using Base::swap; // btree_multiset::contains() // // template bool contains(const K& key) const: // // Determines whether an element comparing equal to the given `key` exists // within the `btree_multiset`, returning `true` if so or `false` otherwise. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::contains; // btree_multiset::count() // // template size_type count(const K& key) const: // // Returns the number of elements comparing equal to the given `key` within // the `btree_multiset`. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::count; // btree_multiset::equal_range() // // Returns a closed range [first, last], defined by a `std::pair` of two // iterators, containing all elements with the passed key in the // `btree_multiset`. using Base::equal_range; // btree_multiset::find() // // template iterator find(const K& key): // template const_iterator find(const K& key) const: // // Finds an element with the passed `key` within the `btree_multiset`. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::find; // btree_multiset::lower_bound() // // template iterator lower_bound(const K& key): // template const_iterator lower_bound(const K& key) const: // // Finds the first element that is not less than `key` within the // `btree_multiset`. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::lower_bound; // btree_multiset::upper_bound() // // template iterator upper_bound(const K& key): // template const_iterator upper_bound(const K& key) const: // // Finds the first element that is greater than `key` within the // `btree_multiset`. // // Supports heterogeneous lookup, provided that the set has a compatible // heterogeneous comparator. using Base::upper_bound; // btree_multiset::get_allocator() // // Returns the allocator function associated with this `btree_multiset`. using Base::get_allocator; // btree_multiset::key_comp(); // // Returns the key comparator associated with this `btree_multiset`. using Base::key_comp; // btree_multiset::value_comp(); // // Returns the value comparator associated with this `btree_multiset`. The // keys to sort the elements are the values themselves, therefore `value_comp` // and its sibling member function `key_comp` are equivalent. using Base::value_comp; }; // absl::swap(absl::btree_multiset<>, absl::btree_multiset<>) // // Swaps the contents of two `absl::btree_multiset` containers. template void swap(btree_multiset &x, btree_multiset &y) { return x.swap(y); } // absl::erase_if(absl::btree_multiset<>, Pred) // // Erases all elements that satisfy the predicate pred from the container. // Returns the number of erased elements. template typename btree_multiset::size_type erase_if( btree_multiset & set, Pred pred) { return container_internal::btree_access::erase_if(set, std::move(pred)); } namespace container_internal { // This type implements the necessary functions from the // absl::container_internal::slot_type interface for btree_(multi)set. template struct set_slot_policy { using slot_type = Key; using value_type = Key; using mutable_value_type = Key; static value_type &element(slot_type *slot) { return *slot; } static const value_type &element(const slot_type *slot) { return *slot; } template static void construct(Alloc *alloc, slot_type *slot, Args &&...args) { absl::allocator_traits::construct(*alloc, slot, std::forward(args)...); } template static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { absl::allocator_traits::construct(*alloc, slot, std::move(*other)); } template static void construct(Alloc *alloc, slot_type *slot, const slot_type *other) { absl::allocator_traits::construct(*alloc, slot, *other); } template static void destroy(Alloc *alloc, slot_type *slot) { absl::allocator_traits::destroy(*alloc, slot); } template static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { construct(alloc, new_slot, old_slot); destroy(alloc, old_slot); } }; // A parameters structure for holding the type parameters for a btree_set. // Compare and Alloc should be nothrow copy-constructible. template struct set_params : common_params> { using value_type = Key; using slot_type = typename set_params::common_params::slot_type; template static const V &key(const V &value) { return value; } static const Key &key(const slot_type *slot) { return *slot; } static const Key &key(slot_type *slot) { return *slot; } }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_BTREE_SET_H_ abseil-20220623.1/absl/container/btree_test.cc000066400000000000000000003165601430371345100207400ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/btree_test.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" #include "absl/container/internal/counting_allocator.h" #include "absl/container/internal/test_instance_tracker.h" #include "absl/flags/flag.h" #include "absl/hash/hash_testing.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "absl/types/compare.h" ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests"); namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::absl::test_internal::CopyableMovableInstance; using ::absl::test_internal::InstanceTracker; using ::absl::test_internal::MovableOnlyInstance; using ::testing::ElementsAre; using ::testing::ElementsAreArray; using ::testing::IsEmpty; using ::testing::IsNull; using ::testing::Pair; using ::testing::SizeIs; template void CheckPairEquals(const T &x, const U &y) { ABSL_INTERNAL_CHECK(x == y, "Values are unequal."); } template void CheckPairEquals(const std::pair &x, const std::pair &y) { CheckPairEquals(x.first, y.first); CheckPairEquals(x.second, y.second); } } // namespace // The base class for a sorted associative container checker. TreeType is the // container type to check and CheckerType is the container type to check // against. TreeType is expected to be btree_{set,map,multiset,multimap} and // CheckerType is expected to be {set,map,multiset,multimap}. template class base_checker { public: using key_type = typename TreeType::key_type; using value_type = typename TreeType::value_type; using key_compare = typename TreeType::key_compare; using pointer = typename TreeType::pointer; using const_pointer = typename TreeType::const_pointer; using reference = typename TreeType::reference; using const_reference = typename TreeType::const_reference; using size_type = typename TreeType::size_type; using difference_type = typename TreeType::difference_type; using iterator = typename TreeType::iterator; using const_iterator = typename TreeType::const_iterator; using reverse_iterator = typename TreeType::reverse_iterator; using const_reverse_iterator = typename TreeType::const_reverse_iterator; public: base_checker() : const_tree_(tree_) {} base_checker(const base_checker &other) : tree_(other.tree_), const_tree_(tree_), checker_(other.checker_) {} template base_checker(InputIterator b, InputIterator e) : tree_(b, e), const_tree_(tree_), checker_(b, e) {} iterator begin() { return tree_.begin(); } const_iterator begin() const { return tree_.begin(); } iterator end() { return tree_.end(); } const_iterator end() const { return tree_.end(); } reverse_iterator rbegin() { return tree_.rbegin(); } const_reverse_iterator rbegin() const { return tree_.rbegin(); } reverse_iterator rend() { return tree_.rend(); } const_reverse_iterator rend() const { return tree_.rend(); } template IterType iter_check(IterType tree_iter, CheckerIterType checker_iter) const { if (tree_iter == tree_.end()) { ABSL_INTERNAL_CHECK(checker_iter == checker_.end(), "Checker iterator not at end."); } else { CheckPairEquals(*tree_iter, *checker_iter); } return tree_iter; } template IterType riter_check(IterType tree_iter, CheckerIterType checker_iter) const { if (tree_iter == tree_.rend()) { ABSL_INTERNAL_CHECK(checker_iter == checker_.rend(), "Checker iterator not at rend."); } else { CheckPairEquals(*tree_iter, *checker_iter); } return tree_iter; } void value_check(const value_type &v) { typename KeyOfValue::type key_of_value; const key_type &key = key_of_value(v); CheckPairEquals(*find(key), v); lower_bound(key); upper_bound(key); equal_range(key); contains(key); count(key); } void erase_check(const key_type &key) { EXPECT_FALSE(tree_.contains(key)); EXPECT_EQ(tree_.find(key), const_tree_.end()); EXPECT_FALSE(const_tree_.contains(key)); EXPECT_EQ(const_tree_.find(key), tree_.end()); EXPECT_EQ(tree_.equal_range(key).first, const_tree_.equal_range(key).second); } iterator lower_bound(const key_type &key) { return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); } const_iterator lower_bound(const key_type &key) const { return iter_check(tree_.lower_bound(key), checker_.lower_bound(key)); } iterator upper_bound(const key_type &key) { return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); } const_iterator upper_bound(const key_type &key) const { return iter_check(tree_.upper_bound(key), checker_.upper_bound(key)); } std::pair equal_range(const key_type &key) { std::pair checker_res = checker_.equal_range(key); std::pair tree_res = tree_.equal_range(key); iter_check(tree_res.first, checker_res.first); iter_check(tree_res.second, checker_res.second); return tree_res; } std::pair equal_range( const key_type &key) const { std::pair checker_res = checker_.equal_range(key); std::pair tree_res = tree_.equal_range(key); iter_check(tree_res.first, checker_res.first); iter_check(tree_res.second, checker_res.second); return tree_res; } iterator find(const key_type &key) { return iter_check(tree_.find(key), checker_.find(key)); } const_iterator find(const key_type &key) const { return iter_check(tree_.find(key), checker_.find(key)); } bool contains(const key_type &key) const { return find(key) != end(); } size_type count(const key_type &key) const { size_type res = checker_.count(key); EXPECT_EQ(res, tree_.count(key)); return res; } base_checker &operator=(const base_checker &other) { tree_ = other.tree_; checker_ = other.checker_; return *this; } int erase(const key_type &key) { int size = tree_.size(); int res = checker_.erase(key); EXPECT_EQ(res, tree_.count(key)); EXPECT_EQ(res, tree_.erase(key)); EXPECT_EQ(tree_.count(key), 0); EXPECT_EQ(tree_.size(), size - res); erase_check(key); return res; } iterator erase(iterator iter) { key_type key = iter.key(); int size = tree_.size(); int count = tree_.count(key); auto checker_iter = checker_.lower_bound(key); for (iterator tmp(tree_.lower_bound(key)); tmp != iter; ++tmp) { ++checker_iter; } auto checker_next = checker_iter; ++checker_next; checker_.erase(checker_iter); iter = tree_.erase(iter); EXPECT_EQ(tree_.size(), checker_.size()); EXPECT_EQ(tree_.size(), size - 1); EXPECT_EQ(tree_.count(key), count - 1); if (count == 1) { erase_check(key); } return iter_check(iter, checker_next); } void erase(iterator begin, iterator end) { int size = tree_.size(); int count = std::distance(begin, end); auto checker_begin = checker_.lower_bound(begin.key()); for (iterator tmp(tree_.lower_bound(begin.key())); tmp != begin; ++tmp) { ++checker_begin; } auto checker_end = end == tree_.end() ? checker_.end() : checker_.lower_bound(end.key()); if (end != tree_.end()) { for (iterator tmp(tree_.lower_bound(end.key())); tmp != end; ++tmp) { ++checker_end; } } const auto checker_ret = checker_.erase(checker_begin, checker_end); const auto tree_ret = tree_.erase(begin, end); EXPECT_EQ(std::distance(checker_.begin(), checker_ret), std::distance(tree_.begin(), tree_ret)); EXPECT_EQ(tree_.size(), checker_.size()); EXPECT_EQ(tree_.size(), size - count); } void clear() { tree_.clear(); checker_.clear(); } void swap(base_checker &other) { tree_.swap(other.tree_); checker_.swap(other.checker_); } void verify() const { tree_.verify(); EXPECT_EQ(tree_.size(), checker_.size()); // Move through the forward iterators using increment. auto checker_iter = checker_.begin(); const_iterator tree_iter(tree_.begin()); for (; tree_iter != tree_.end(); ++tree_iter, ++checker_iter) { CheckPairEquals(*tree_iter, *checker_iter); } // Move through the forward iterators using decrement. for (int n = tree_.size() - 1; n >= 0; --n) { iter_check(tree_iter, checker_iter); --tree_iter; --checker_iter; } EXPECT_EQ(tree_iter, tree_.begin()); EXPECT_EQ(checker_iter, checker_.begin()); // Move through the reverse iterators using increment. auto checker_riter = checker_.rbegin(); const_reverse_iterator tree_riter(tree_.rbegin()); for (; tree_riter != tree_.rend(); ++tree_riter, ++checker_riter) { CheckPairEquals(*tree_riter, *checker_riter); } // Move through the reverse iterators using decrement. for (int n = tree_.size() - 1; n >= 0; --n) { riter_check(tree_riter, checker_riter); --tree_riter; --checker_riter; } EXPECT_EQ(tree_riter, tree_.rbegin()); EXPECT_EQ(checker_riter, checker_.rbegin()); } const TreeType &tree() const { return tree_; } size_type size() const { EXPECT_EQ(tree_.size(), checker_.size()); return tree_.size(); } size_type max_size() const { return tree_.max_size(); } bool empty() const { EXPECT_EQ(tree_.empty(), checker_.empty()); return tree_.empty(); } protected: TreeType tree_; const TreeType &const_tree_; CheckerType checker_; }; namespace { // A checker for unique sorted associative containers. TreeType is expected to // be btree_{set,map} and CheckerType is expected to be {set,map}. template class unique_checker : public base_checker { using super_type = base_checker; public: using iterator = typename super_type::iterator; using value_type = typename super_type::value_type; public: unique_checker() : super_type() {} unique_checker(const unique_checker &other) : super_type(other) {} template unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {} unique_checker &operator=(const unique_checker &) = default; // Insertion routines. std::pair insert(const value_type &v) { int size = this->tree_.size(); std::pair checker_res = this->checker_.insert(v); std::pair tree_res = this->tree_.insert(v); CheckPairEquals(*tree_res.first, *checker_res.first); EXPECT_EQ(tree_res.second, checker_res.second); EXPECT_EQ(this->tree_.size(), this->checker_.size()); EXPECT_EQ(this->tree_.size(), size + tree_res.second); return tree_res; } iterator insert(iterator position, const value_type &v) { int size = this->tree_.size(); std::pair checker_res = this->checker_.insert(v); iterator tree_res = this->tree_.insert(position, v); CheckPairEquals(*tree_res, *checker_res.first); EXPECT_EQ(this->tree_.size(), this->checker_.size()); EXPECT_EQ(this->tree_.size(), size + checker_res.second); return tree_res; } template void insert(InputIterator b, InputIterator e) { for (; b != e; ++b) { insert(*b); } } }; // A checker for multiple sorted associative containers. TreeType is expected // to be btree_{multiset,multimap} and CheckerType is expected to be // {multiset,multimap}. template class multi_checker : public base_checker { using super_type = base_checker; public: using iterator = typename super_type::iterator; using value_type = typename super_type::value_type; public: multi_checker() : super_type() {} multi_checker(const multi_checker &other) : super_type(other) {} template multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {} multi_checker &operator=(const multi_checker &) = default; // Insertion routines. iterator insert(const value_type &v) { int size = this->tree_.size(); auto checker_res = this->checker_.insert(v); iterator tree_res = this->tree_.insert(v); CheckPairEquals(*tree_res, *checker_res); EXPECT_EQ(this->tree_.size(), this->checker_.size()); EXPECT_EQ(this->tree_.size(), size + 1); return tree_res; } iterator insert(iterator position, const value_type &v) { int size = this->tree_.size(); auto checker_res = this->checker_.insert(v); iterator tree_res = this->tree_.insert(position, v); CheckPairEquals(*tree_res, *checker_res); EXPECT_EQ(this->tree_.size(), this->checker_.size()); EXPECT_EQ(this->tree_.size(), size + 1); return tree_res; } template void insert(InputIterator b, InputIterator e) { for (; b != e; ++b) { insert(*b); } } }; template void DoTest(const char *name, T *b, const std::vector &values) { typename KeyOfValue::type key_of_value; T &mutable_b = *b; const T &const_b = *b; // Test insert. for (int i = 0; i < values.size(); ++i) { mutable_b.insert(values[i]); mutable_b.value_check(values[i]); } ASSERT_EQ(mutable_b.size(), values.size()); const_b.verify(); // Test copy constructor. T b_copy(const_b); EXPECT_EQ(b_copy.size(), const_b.size()); for (int i = 0; i < values.size(); ++i) { CheckPairEquals(*b_copy.find(key_of_value(values[i])), values[i]); } // Test range constructor. T b_range(const_b.begin(), const_b.end()); EXPECT_EQ(b_range.size(), const_b.size()); for (int i = 0; i < values.size(); ++i) { CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); } // Test range insertion for values that already exist. b_range.insert(b_copy.begin(), b_copy.end()); b_range.verify(); // Test range insertion for new values. b_range.clear(); b_range.insert(b_copy.begin(), b_copy.end()); EXPECT_EQ(b_range.size(), b_copy.size()); for (int i = 0; i < values.size(); ++i) { CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); } // Test assignment to self. Nothing should change. b_range.operator=(b_range); EXPECT_EQ(b_range.size(), b_copy.size()); // Test assignment of new values. b_range.clear(); b_range = b_copy; EXPECT_EQ(b_range.size(), b_copy.size()); // Test swap. b_range.clear(); b_range.swap(b_copy); EXPECT_EQ(b_copy.size(), 0); EXPECT_EQ(b_range.size(), const_b.size()); for (int i = 0; i < values.size(); ++i) { CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); } b_range.swap(b_copy); // Test non-member function swap. swap(b_range, b_copy); EXPECT_EQ(b_copy.size(), 0); EXPECT_EQ(b_range.size(), const_b.size()); for (int i = 0; i < values.size(); ++i) { CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]); } swap(b_range, b_copy); // Test erase via values. for (int i = 0; i < values.size(); ++i) { mutable_b.erase(key_of_value(values[i])); // Erasing a non-existent key should have no effect. ASSERT_EQ(mutable_b.erase(key_of_value(values[i])), 0); } const_b.verify(); EXPECT_EQ(const_b.size(), 0); // Test erase via iterators. mutable_b = b_copy; for (int i = 0; i < values.size(); ++i) { mutable_b.erase(mutable_b.find(key_of_value(values[i]))); } const_b.verify(); EXPECT_EQ(const_b.size(), 0); // Test insert with hint. for (int i = 0; i < values.size(); i++) { mutable_b.insert(mutable_b.upper_bound(key_of_value(values[i])), values[i]); } const_b.verify(); // Test range erase. mutable_b.erase(mutable_b.begin(), mutable_b.end()); EXPECT_EQ(mutable_b.size(), 0); const_b.verify(); // First half. mutable_b = b_copy; typename T::iterator mutable_iter_end = mutable_b.begin(); for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_end; mutable_b.erase(mutable_b.begin(), mutable_iter_end); EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 2); const_b.verify(); // Second half. mutable_b = b_copy; typename T::iterator mutable_iter_begin = mutable_b.begin(); for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_begin; mutable_b.erase(mutable_iter_begin, mutable_b.end()); EXPECT_EQ(mutable_b.size(), values.size() / 2); const_b.verify(); // Second quarter. mutable_b = b_copy; mutable_iter_begin = mutable_b.begin(); for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_begin; mutable_iter_end = mutable_iter_begin; for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_end; mutable_b.erase(mutable_iter_begin, mutable_iter_end); EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 4); const_b.verify(); mutable_b.clear(); } template void ConstTest() { using value_type = typename T::value_type; typename KeyOfValue::type key_of_value; T mutable_b; const T &const_b = mutable_b; // Insert a single value into the container and test looking it up. value_type value = Generator(2)(2); mutable_b.insert(value); EXPECT_TRUE(mutable_b.contains(key_of_value(value))); EXPECT_NE(mutable_b.find(key_of_value(value)), const_b.end()); EXPECT_TRUE(const_b.contains(key_of_value(value))); EXPECT_NE(const_b.find(key_of_value(value)), mutable_b.end()); EXPECT_EQ(*const_b.lower_bound(key_of_value(value)), value); EXPECT_EQ(const_b.upper_bound(key_of_value(value)), const_b.end()); EXPECT_EQ(*const_b.equal_range(key_of_value(value)).first, value); // We can only create a non-const iterator from a non-const container. typename T::iterator mutable_iter(mutable_b.begin()); EXPECT_EQ(mutable_iter, const_b.begin()); EXPECT_NE(mutable_iter, const_b.end()); EXPECT_EQ(const_b.begin(), mutable_iter); EXPECT_NE(const_b.end(), mutable_iter); typename T::reverse_iterator mutable_riter(mutable_b.rbegin()); EXPECT_EQ(mutable_riter, const_b.rbegin()); EXPECT_NE(mutable_riter, const_b.rend()); EXPECT_EQ(const_b.rbegin(), mutable_riter); EXPECT_NE(const_b.rend(), mutable_riter); // We can create a const iterator from a non-const iterator. typename T::const_iterator const_iter(mutable_iter); EXPECT_EQ(const_iter, mutable_b.begin()); EXPECT_NE(const_iter, mutable_b.end()); EXPECT_EQ(mutable_b.begin(), const_iter); EXPECT_NE(mutable_b.end(), const_iter); typename T::const_reverse_iterator const_riter(mutable_riter); EXPECT_EQ(const_riter, mutable_b.rbegin()); EXPECT_NE(const_riter, mutable_b.rend()); EXPECT_EQ(mutable_b.rbegin(), const_riter); EXPECT_NE(mutable_b.rend(), const_riter); // Make sure various methods can be invoked on a const container. const_b.verify(); ASSERT_TRUE(!const_b.empty()); EXPECT_EQ(const_b.size(), 1); EXPECT_GT(const_b.max_size(), 0); EXPECT_TRUE(const_b.contains(key_of_value(value))); EXPECT_EQ(const_b.count(key_of_value(value)), 1); } template void BtreeTest() { ConstTest(); using V = typename remove_pair_const::type; const std::vector random_values = GenerateValuesWithSeed( absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), GTEST_FLAG_GET(random_seed)); unique_checker container; // Test key insertion/deletion in sorted order. std::vector sorted_values(random_values); std::sort(sorted_values.begin(), sorted_values.end()); DoTest("sorted: ", &container, sorted_values); // Test key insertion/deletion in reverse sorted order. std::reverse(sorted_values.begin(), sorted_values.end()); DoTest("rsorted: ", &container, sorted_values); // Test key insertion/deletion in random order. DoTest("random: ", &container, random_values); } template void BtreeMultiTest() { ConstTest(); using V = typename remove_pair_const::type; const std::vector random_values = GenerateValuesWithSeed( absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values), GTEST_FLAG_GET(random_seed)); multi_checker container; // Test keys in sorted order. std::vector sorted_values(random_values); std::sort(sorted_values.begin(), sorted_values.end()); DoTest("sorted: ", &container, sorted_values); // Test keys in reverse sorted order. std::reverse(sorted_values.begin(), sorted_values.end()); DoTest("rsorted: ", &container, sorted_values); // Test keys in random order. DoTest("random: ", &container, random_values); // Test keys in random order w/ duplicates. std::vector duplicate_values(random_values); duplicate_values.insert(duplicate_values.end(), random_values.begin(), random_values.end()); DoTest("duplicates:", &container, duplicate_values); // Test all identical keys. std::vector identical_values(100); std::fill(identical_values.begin(), identical_values.end(), Generator(2)(2)); DoTest("identical: ", &container, identical_values); } template struct PropagatingCountingAlloc : public CountingAllocator { using propagate_on_container_copy_assignment = std::true_type; using propagate_on_container_move_assignment = std::true_type; using propagate_on_container_swap = std::true_type; using Base = CountingAllocator; using Base::Base; template explicit PropagatingCountingAlloc(const PropagatingCountingAlloc &other) : Base(other.bytes_used_) {} template struct rebind { using other = PropagatingCountingAlloc; }; }; template void BtreeAllocatorTest() { using value_type = typename T::value_type; int64_t bytes1 = 0, bytes2 = 0; PropagatingCountingAlloc allocator1(&bytes1); PropagatingCountingAlloc allocator2(&bytes2); Generator generator(1000); // Test that we allocate properly aligned memory. If we don't, then Layout // will assert fail. auto unused1 = allocator1.allocate(1); auto unused2 = allocator2.allocate(1); // Test copy assignment { T b1(typename T::key_compare(), allocator1); T b2(typename T::key_compare(), allocator2); int64_t original_bytes1 = bytes1; b1.insert(generator(0)); EXPECT_GT(bytes1, original_bytes1); // This should propagate the allocator. b1 = b2; EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b2.size(), 0); EXPECT_EQ(bytes1, original_bytes1); for (int i = 1; i < 1000; i++) { b1.insert(generator(i)); } // We should have allocated out of allocator2. EXPECT_GT(bytes2, bytes1); } // Test move assignment { T b1(typename T::key_compare(), allocator1); T b2(typename T::key_compare(), allocator2); int64_t original_bytes1 = bytes1; b1.insert(generator(0)); EXPECT_GT(bytes1, original_bytes1); // This should propagate the allocator. b1 = std::move(b2); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(bytes1, original_bytes1); for (int i = 1; i < 1000; i++) { b1.insert(generator(i)); } // We should have allocated out of allocator2. EXPECT_GT(bytes2, bytes1); } // Test swap { T b1(typename T::key_compare(), allocator1); T b2(typename T::key_compare(), allocator2); int64_t original_bytes1 = bytes1; b1.insert(generator(0)); EXPECT_GT(bytes1, original_bytes1); // This should swap the allocators. swap(b1, b2); EXPECT_EQ(b1.size(), 0); EXPECT_EQ(b2.size(), 1); EXPECT_GT(bytes1, original_bytes1); for (int i = 1; i < 1000; i++) { b1.insert(generator(i)); } // We should have allocated out of allocator2. EXPECT_GT(bytes2, bytes1); } allocator1.deallocate(unused1, 1); allocator2.deallocate(unused2, 1); } template void BtreeMapTest() { using value_type = typename T::value_type; using mapped_type = typename T::mapped_type; mapped_type m = Generator(0)(0); (void)m; T b; // Verify we can insert using operator[]. for (int i = 0; i < 1000; i++) { value_type v = Generator(1000)(i); b[v.first] = v.second; } EXPECT_EQ(b.size(), 1000); // Test whether we can use the "->" operator on iterators and // reverse_iterators. This stresses the btree_map_params::pair_pointer // mechanism. EXPECT_EQ(b.begin()->first, Generator(1000)(0).first); EXPECT_EQ(b.begin()->second, Generator(1000)(0).second); EXPECT_EQ(b.rbegin()->first, Generator(1000)(999).first); EXPECT_EQ(b.rbegin()->second, Generator(1000)(999).second); } template void BtreeMultiMapTest() { using mapped_type = typename T::mapped_type; mapped_type m = Generator(0)(0); (void)m; } template void SetTest() { EXPECT_EQ( sizeof(absl::btree_set), 2 * sizeof(void *) + sizeof(typename absl::btree_set::size_type)); using BtreeSet = absl::btree_set; using CountingBtreeSet = absl::btree_set, PropagatingCountingAlloc>; BtreeTest>(); BtreeAllocatorTest(); } template void MapTest() { EXPECT_EQ( sizeof(absl::btree_map), 2 * sizeof(void *) + sizeof(typename absl::btree_map::size_type)); using BtreeMap = absl::btree_map; using CountingBtreeMap = absl::btree_map, PropagatingCountingAlloc>>; BtreeTest>(); BtreeAllocatorTest(); BtreeMapTest(); } TEST(Btree, set_int32) { SetTest(); } TEST(Btree, set_int64) { SetTest(); } TEST(Btree, set_string) { SetTest(); } TEST(Btree, set_cord) { SetTest(); } TEST(Btree, set_pair) { SetTest>(); } TEST(Btree, map_int32) { MapTest(); } TEST(Btree, map_int64) { MapTest(); } TEST(Btree, map_string) { MapTest(); } TEST(Btree, map_cord) { MapTest(); } TEST(Btree, map_pair) { MapTest>(); } template void MultiSetTest() { EXPECT_EQ( sizeof(absl::btree_multiset), 2 * sizeof(void *) + sizeof(typename absl::btree_multiset::size_type)); using BtreeMSet = absl::btree_multiset; using CountingBtreeMSet = absl::btree_multiset, PropagatingCountingAlloc>; BtreeMultiTest>(); BtreeAllocatorTest(); } template void MultiMapTest() { EXPECT_EQ(sizeof(absl::btree_multimap), 2 * sizeof(void *) + sizeof(typename absl::btree_multimap::size_type)); using BtreeMMap = absl::btree_multimap; using CountingBtreeMMap = absl::btree_multimap, PropagatingCountingAlloc>>; BtreeMultiTest>(); BtreeMultiMapTest(); BtreeAllocatorTest(); } TEST(Btree, multiset_int32) { MultiSetTest(); } TEST(Btree, multiset_int64) { MultiSetTest(); } TEST(Btree, multiset_string) { MultiSetTest(); } TEST(Btree, multiset_cord) { MultiSetTest(); } TEST(Btree, multiset_pair) { MultiSetTest>(); } TEST(Btree, multimap_int32) { MultiMapTest(); } TEST(Btree, multimap_int64) { MultiMapTest(); } TEST(Btree, multimap_string) { MultiMapTest(); } TEST(Btree, multimap_cord) { MultiMapTest(); } TEST(Btree, multimap_pair) { MultiMapTest>(); } struct CompareIntToString { bool operator()(const std::string &a, const std::string &b) const { return a < b; } bool operator()(const std::string &a, int b) const { return a < absl::StrCat(b); } bool operator()(int a, const std::string &b) const { return absl::StrCat(a) < b; } using is_transparent = void; }; struct NonTransparentCompare { template bool operator()(const T &t, const U &u) const { // Treating all comparators as transparent can cause inefficiencies (see // N3657 C++ proposal). Test that for comparators without 'is_transparent' // alias (like this one), we do not attempt heterogeneous lookup. EXPECT_TRUE((std::is_same())); return t < u; } }; template bool CanEraseWithEmptyBrace(T t, decltype(t.erase({})) *) { return true; } template bool CanEraseWithEmptyBrace(T, ...) { return false; } template void TestHeterogeneous(T table) { auto lb = table.lower_bound("3"); EXPECT_EQ(lb, table.lower_bound(3)); EXPECT_NE(lb, table.lower_bound(4)); EXPECT_EQ(lb, table.lower_bound({"3"})); EXPECT_NE(lb, table.lower_bound({})); auto ub = table.upper_bound("3"); EXPECT_EQ(ub, table.upper_bound(3)); EXPECT_NE(ub, table.upper_bound(5)); EXPECT_EQ(ub, table.upper_bound({"3"})); EXPECT_NE(ub, table.upper_bound({})); auto er = table.equal_range("3"); EXPECT_EQ(er, table.equal_range(3)); EXPECT_NE(er, table.equal_range(4)); EXPECT_EQ(er, table.equal_range({"3"})); EXPECT_NE(er, table.equal_range({})); auto it = table.find("3"); EXPECT_EQ(it, table.find(3)); EXPECT_NE(it, table.find(4)); EXPECT_EQ(it, table.find({"3"})); EXPECT_NE(it, table.find({})); EXPECT_TRUE(table.contains(3)); EXPECT_FALSE(table.contains(4)); EXPECT_TRUE(table.count({"3"})); EXPECT_FALSE(table.contains({})); EXPECT_EQ(1, table.count(3)); EXPECT_EQ(0, table.count(4)); EXPECT_EQ(1, table.count({"3"})); EXPECT_EQ(0, table.count({})); auto copy = table; copy.erase(3); EXPECT_EQ(table.size() - 1, copy.size()); copy.erase(4); EXPECT_EQ(table.size() - 1, copy.size()); copy.erase({"5"}); EXPECT_EQ(table.size() - 2, copy.size()); EXPECT_FALSE(CanEraseWithEmptyBrace(table, nullptr)); // Also run it with const T&. if (std::is_class()) TestHeterogeneous(table); } TEST(Btree, HeterogeneousLookup) { TestHeterogeneous(btree_set{"1", "3", "5"}); TestHeterogeneous(btree_map{ {"1", 1}, {"3", 3}, {"5", 5}}); TestHeterogeneous( btree_multiset{"1", "3", "5"}); TestHeterogeneous(btree_multimap{ {"1", 1}, {"3", 3}, {"5", 5}}); // Only maps have .at() btree_map map{ {"", -1}, {"1", 1}, {"3", 3}, {"5", 5}}; EXPECT_EQ(1, map.at(1)); EXPECT_EQ(3, map.at({"3"})); EXPECT_EQ(-1, map.at({})); const auto &cmap = map; EXPECT_EQ(1, cmap.at(1)); EXPECT_EQ(3, cmap.at({"3"})); EXPECT_EQ(-1, cmap.at({})); } TEST(Btree, NoHeterogeneousLookupWithoutAlias) { using StringSet = absl::btree_set; StringSet s; ASSERT_TRUE(s.insert("hello").second); ASSERT_TRUE(s.insert("world").second); EXPECT_TRUE(s.end() == s.find("blah")); EXPECT_TRUE(s.begin() == s.lower_bound("hello")); EXPECT_EQ(1, s.count("world")); EXPECT_TRUE(s.contains("hello")); EXPECT_TRUE(s.contains("world")); EXPECT_FALSE(s.contains("blah")); using StringMultiSet = absl::btree_multiset; StringMultiSet ms; ms.insert("hello"); ms.insert("world"); ms.insert("world"); EXPECT_TRUE(ms.end() == ms.find("blah")); EXPECT_TRUE(ms.begin() == ms.lower_bound("hello")); EXPECT_EQ(2, ms.count("world")); EXPECT_TRUE(ms.contains("hello")); EXPECT_TRUE(ms.contains("world")); EXPECT_FALSE(ms.contains("blah")); } TEST(Btree, DefaultTransparent) { { // `int` does not have a default transparent comparator. // The input value is converted to key_type. btree_set s = {1}; double d = 1.1; EXPECT_EQ(s.begin(), s.find(d)); EXPECT_TRUE(s.contains(d)); } { // `std::string` has heterogeneous support. btree_set s = {"A"}; EXPECT_EQ(s.begin(), s.find(absl::string_view("A"))); EXPECT_TRUE(s.contains(absl::string_view("A"))); } } class StringLike { public: StringLike() = default; StringLike(const char *s) : s_(s) { // NOLINT ++constructor_calls_; } bool operator<(const StringLike &a) const { return s_ < a.s_; } static void clear_constructor_call_count() { constructor_calls_ = 0; } static int constructor_calls() { return constructor_calls_; } private: static int constructor_calls_; std::string s_; }; int StringLike::constructor_calls_ = 0; TEST(Btree, HeterogeneousLookupDoesntDegradePerformance) { using StringSet = absl::btree_set; StringSet s; for (int i = 0; i < 100; ++i) { ASSERT_TRUE(s.insert(absl::StrCat(i).c_str()).second); } StringLike::clear_constructor_call_count(); s.find("50"); ASSERT_EQ(1, StringLike::constructor_calls()); StringLike::clear_constructor_call_count(); s.contains("50"); ASSERT_EQ(1, StringLike::constructor_calls()); StringLike::clear_constructor_call_count(); s.count("50"); ASSERT_EQ(1, StringLike::constructor_calls()); StringLike::clear_constructor_call_count(); s.lower_bound("50"); ASSERT_EQ(1, StringLike::constructor_calls()); StringLike::clear_constructor_call_count(); s.upper_bound("50"); ASSERT_EQ(1, StringLike::constructor_calls()); StringLike::clear_constructor_call_count(); s.equal_range("50"); ASSERT_EQ(1, StringLike::constructor_calls()); StringLike::clear_constructor_call_count(); s.erase("50"); ASSERT_EQ(1, StringLike::constructor_calls()); } // Verify that swapping btrees swaps the key comparison functors and that we can // use non-default constructible comparators. struct SubstringLess { SubstringLess() = delete; explicit SubstringLess(int length) : n(length) {} bool operator()(const std::string &a, const std::string &b) const { return absl::string_view(a).substr(0, n) < absl::string_view(b).substr(0, n); } int n; }; TEST(Btree, SwapKeyCompare) { using SubstringSet = absl::btree_set; SubstringSet s1(SubstringLess(1), SubstringSet::allocator_type()); SubstringSet s2(SubstringLess(2), SubstringSet::allocator_type()); ASSERT_TRUE(s1.insert("a").second); ASSERT_FALSE(s1.insert("aa").second); ASSERT_TRUE(s2.insert("a").second); ASSERT_TRUE(s2.insert("aa").second); ASSERT_FALSE(s2.insert("aaa").second); swap(s1, s2); ASSERT_TRUE(s1.insert("b").second); ASSERT_TRUE(s1.insert("bb").second); ASSERT_FALSE(s1.insert("bbb").second); ASSERT_TRUE(s2.insert("b").second); ASSERT_FALSE(s2.insert("bb").second); } TEST(Btree, UpperBoundRegression) { // Regress a bug where upper_bound would default-construct a new key_compare // instead of copying the existing one. using SubstringSet = absl::btree_set; SubstringSet my_set(SubstringLess(3)); my_set.insert("aab"); my_set.insert("abb"); // We call upper_bound("aaa"). If this correctly uses the length 3 // comparator, aaa < aab < abb, so we should get aab as the result. // If it instead uses the default-constructed length 2 comparator, // aa == aa < ab, so we'll get abb as our result. SubstringSet::iterator it = my_set.upper_bound("aaa"); ASSERT_TRUE(it != my_set.end()); EXPECT_EQ("aab", *it); } TEST(Btree, Comparison) { const int kSetSize = 1201; absl::btree_set my_set; for (int i = 0; i < kSetSize; ++i) { my_set.insert(i); } absl::btree_set my_set_copy(my_set); EXPECT_TRUE(my_set_copy == my_set); EXPECT_TRUE(my_set == my_set_copy); EXPECT_FALSE(my_set_copy != my_set); EXPECT_FALSE(my_set != my_set_copy); my_set.insert(kSetSize); EXPECT_FALSE(my_set_copy == my_set); EXPECT_FALSE(my_set == my_set_copy); EXPECT_TRUE(my_set_copy != my_set); EXPECT_TRUE(my_set != my_set_copy); my_set.erase(kSetSize - 1); EXPECT_FALSE(my_set_copy == my_set); EXPECT_FALSE(my_set == my_set_copy); EXPECT_TRUE(my_set_copy != my_set); EXPECT_TRUE(my_set != my_set_copy); absl::btree_map my_map; for (int i = 0; i < kSetSize; ++i) { my_map[std::string(i, 'a')] = i; } absl::btree_map my_map_copy(my_map); EXPECT_TRUE(my_map_copy == my_map); EXPECT_TRUE(my_map == my_map_copy); EXPECT_FALSE(my_map_copy != my_map); EXPECT_FALSE(my_map != my_map_copy); ++my_map_copy[std::string(7, 'a')]; EXPECT_FALSE(my_map_copy == my_map); EXPECT_FALSE(my_map == my_map_copy); EXPECT_TRUE(my_map_copy != my_map); EXPECT_TRUE(my_map != my_map_copy); my_map_copy = my_map; my_map["hello"] = kSetSize; EXPECT_FALSE(my_map_copy == my_map); EXPECT_FALSE(my_map == my_map_copy); EXPECT_TRUE(my_map_copy != my_map); EXPECT_TRUE(my_map != my_map_copy); my_map.erase(std::string(kSetSize - 1, 'a')); EXPECT_FALSE(my_map_copy == my_map); EXPECT_FALSE(my_map == my_map_copy); EXPECT_TRUE(my_map_copy != my_map); EXPECT_TRUE(my_map != my_map_copy); } TEST(Btree, RangeCtorSanity) { std::vector ivec; ivec.push_back(1); std::map imap; imap.insert(std::make_pair(1, 2)); absl::btree_multiset tmset(ivec.begin(), ivec.end()); absl::btree_multimap tmmap(imap.begin(), imap.end()); absl::btree_set tset(ivec.begin(), ivec.end()); absl::btree_map tmap(imap.begin(), imap.end()); EXPECT_EQ(1, tmset.size()); EXPECT_EQ(1, tmmap.size()); EXPECT_EQ(1, tset.size()); EXPECT_EQ(1, tmap.size()); } } // namespace class BtreeNodePeer { public: // Yields the size of a leaf node with a specific number of values. template constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) { return btree_node< set_params, std::allocator, /*TargetNodeSize=*/256, // This parameter isn't used here. /*Multi=*/false>>::SizeWithNSlots(target_values_per_node); } // Yields the number of slots in a (non-root) leaf node for this btree. template constexpr static size_t GetNumSlotsPerNode() { return btree_node::kNodeSlots; } template constexpr static size_t GetMaxFieldType() { return std::numeric_limits< typename btree_node::field_type>::max(); } template constexpr static bool UsesLinearNodeSearch() { return btree_node::use_linear_search::value; } template constexpr static bool UsesGenerations() { return Btree::params_type::kEnableGenerations; } }; namespace { class BtreeMapTest : public ::testing::Test { public: struct Key {}; struct Cmp { template bool operator()(T, T) const { return false; } }; struct KeyLin { using absl_btree_prefer_linear_node_search = std::true_type; }; struct CmpLin : Cmp { using absl_btree_prefer_linear_node_search = std::true_type; }; struct KeyBin { using absl_btree_prefer_linear_node_search = std::false_type; }; struct CmpBin : Cmp { using absl_btree_prefer_linear_node_search = std::false_type; }; template static bool IsLinear() { return BtreeNodePeer::UsesLinearNodeSearch>(); } }; TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) { // Test requesting linear search by directly exporting an alias. EXPECT_FALSE((IsLinear())); EXPECT_TRUE((IsLinear())); EXPECT_TRUE((IsLinear())); EXPECT_TRUE((IsLinear())); } TEST_F(BtreeMapTest, LinearChoiceTree) { // Cmp has precedence, and is forcing binary EXPECT_FALSE((IsLinear())); EXPECT_FALSE((IsLinear())); EXPECT_FALSE((IsLinear())); EXPECT_FALSE((IsLinear())); EXPECT_FALSE((IsLinear())); // Cmp has precedence, and is forcing linear EXPECT_TRUE((IsLinear())); EXPECT_TRUE((IsLinear())); EXPECT_TRUE((IsLinear())); EXPECT_TRUE((IsLinear())); EXPECT_TRUE((IsLinear())); // Cmp has no preference, Key determines linear vs binary. EXPECT_FALSE((IsLinear())); EXPECT_TRUE((IsLinear())); EXPECT_FALSE((IsLinear())); // arithmetic key w/ std::less or std::greater: linear EXPECT_TRUE((IsLinear>())); EXPECT_TRUE((IsLinear>())); // arithmetic key w/ custom compare: binary EXPECT_FALSE((IsLinear())); // non-arithmetic key: binary EXPECT_FALSE((IsLinear>())); } TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) { absl::btree_map> m; std::unique_ptr &v = m["A"]; EXPECT_TRUE(v == nullptr); v = absl::make_unique("X"); auto iter = m.find("A"); EXPECT_EQ("X", *iter->second); } TEST(Btree, InitializerListConstructor) { absl::btree_set set({"a", "b"}); EXPECT_EQ(set.count("a"), 1); EXPECT_EQ(set.count("b"), 1); absl::btree_multiset mset({1, 1, 4}); EXPECT_EQ(mset.count(1), 2); EXPECT_EQ(mset.count(4), 1); absl::btree_map map({{1, 5}, {2, 10}}); EXPECT_EQ(map[1], 5); EXPECT_EQ(map[2], 10); absl::btree_multimap mmap({{1, 5}, {1, 10}}); auto range = mmap.equal_range(1); auto it = range.first; ASSERT_NE(it, range.second); EXPECT_EQ(it->second, 5); ASSERT_NE(++it, range.second); EXPECT_EQ(it->second, 10); EXPECT_EQ(++it, range.second); } TEST(Btree, InitializerListInsert) { absl::btree_set set; set.insert({"a", "b"}); EXPECT_EQ(set.count("a"), 1); EXPECT_EQ(set.count("b"), 1); absl::btree_multiset mset; mset.insert({1, 1, 4}); EXPECT_EQ(mset.count(1), 2); EXPECT_EQ(mset.count(4), 1); absl::btree_map map; map.insert({{1, 5}, {2, 10}}); // Test that inserting one element using an initializer list also works. map.insert({3, 15}); EXPECT_EQ(map[1], 5); EXPECT_EQ(map[2], 10); EXPECT_EQ(map[3], 15); absl::btree_multimap mmap; mmap.insert({{1, 5}, {1, 10}}); auto range = mmap.equal_range(1); auto it = range.first; ASSERT_NE(it, range.second); EXPECT_EQ(it->second, 5); ASSERT_NE(++it, range.second); EXPECT_EQ(it->second, 10); EXPECT_EQ(++it, range.second); } template void AssertKeyCompareStringAdapted() { using Adapted = typename key_compare_adapter::type; static_assert( std::is_same::value || std::is_same::value, "key_compare_adapter should have string-adapted this comparator."); } template void AssertKeyCompareNotStringAdapted() { using Adapted = typename key_compare_adapter::type; static_assert( !std::is_same::value && !std::is_same::value, "key_compare_adapter shouldn't have string-adapted this comparator."); } TEST(Btree, KeyCompareAdapter) { AssertKeyCompareStringAdapted, std::string>(); AssertKeyCompareStringAdapted, std::string>(); AssertKeyCompareStringAdapted, absl::string_view>(); AssertKeyCompareStringAdapted, absl::string_view>(); AssertKeyCompareStringAdapted, absl::Cord>(); AssertKeyCompareStringAdapted, absl::Cord>(); AssertKeyCompareNotStringAdapted, int>(); AssertKeyCompareNotStringAdapted, int>(); } TEST(Btree, RValueInsert) { InstanceTracker tracker; absl::btree_set set; set.insert(MovableOnlyInstance(1)); set.insert(MovableOnlyInstance(3)); MovableOnlyInstance two(2); set.insert(set.find(MovableOnlyInstance(3)), std::move(two)); auto it = set.find(MovableOnlyInstance(2)); ASSERT_NE(it, set.end()); ASSERT_NE(++it, set.end()); EXPECT_EQ(it->value(), 3); absl::btree_multiset mset; MovableOnlyInstance zero(0); MovableOnlyInstance zero2(0); mset.insert(std::move(zero)); mset.insert(mset.find(MovableOnlyInstance(0)), std::move(zero2)); EXPECT_EQ(mset.count(MovableOnlyInstance(0)), 2); absl::btree_map map; std::pair p1 = {1, MovableOnlyInstance(5)}; std::pair p2 = {2, MovableOnlyInstance(10)}; std::pair p3 = {3, MovableOnlyInstance(15)}; map.insert(std::move(p1)); map.insert(std::move(p3)); map.insert(map.find(3), std::move(p2)); ASSERT_NE(map.find(2), map.end()); EXPECT_EQ(map.find(2)->second.value(), 10); absl::btree_multimap mmap; std::pair p4 = {1, MovableOnlyInstance(5)}; std::pair p5 = {1, MovableOnlyInstance(10)}; mmap.insert(std::move(p4)); mmap.insert(mmap.find(1), std::move(p5)); auto range = mmap.equal_range(1); auto it1 = range.first; ASSERT_NE(it1, range.second); EXPECT_EQ(it1->second.value(), 10); ASSERT_NE(++it1, range.second); EXPECT_EQ(it1->second.value(), 5); EXPECT_EQ(++it1, range.second); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.swaps(), 0); } template struct CheckedCompareOptedOutCmp : Cmp, BtreeTestOnlyCheckedCompareOptOutBase { using Cmp::Cmp; CheckedCompareOptedOutCmp() {} CheckedCompareOptedOutCmp(Cmp cmp) : Cmp(std::move(cmp)) {} // NOLINT }; // A btree set with a specific number of values per node. Opt out of // checked_compare so that we can expect exact numbers of comparisons. template > class SizedBtreeSet : public btree_set_container, std::allocator, BtreeNodePeer::GetTargetNodeSize(TargetValuesPerNode), /*Multi=*/false>>> { using Base = typename SizedBtreeSet::btree_set_container; public: SizedBtreeSet() {} using Base::Base; }; template void ExpectOperationCounts(const int expected_moves, const int expected_comparisons, const std::vector &values, InstanceTracker *tracker, Set *set) { for (const int v : values) set->insert(MovableOnlyInstance(v)); set->clear(); EXPECT_EQ(tracker->moves(), expected_moves); EXPECT_EQ(tracker->comparisons(), expected_comparisons); EXPECT_EQ(tracker->copies(), 0); EXPECT_EQ(tracker->swaps(), 0); tracker->ResetCopiesMovesSwaps(); } // Note: when the values in this test change, it is expected to have an impact // on performance. TEST(Btree, MovesComparisonsCopiesSwapsTracking) { InstanceTracker tracker; // Note: this is minimum number of values per node. SizedBtreeSet set4; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet set61; SizedBtreeSet set100; // Don't depend on flags for random values because then the expectations will // fail if the flags change. std::vector values = GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); if (sizeof(void *) == 8) { EXPECT_EQ( BtreeNodePeer::GetNumSlotsPerNode>(), // When we have generations, there is one fewer slot. BtreeNodePeer::UsesGenerations>() ? 60 : 61); } // Test key insertion/deletion in random order. ExpectOperationCounts(56540, 134212, values, &tracker, &set4); ExpectOperationCounts(386718, 129807, values, &tracker, &set61); ExpectOperationCounts(586761, 130310, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); ExpectOperationCounts(24972, 85563, values, &tracker, &set4); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); ExpectOperationCounts(54949, 127531, values, &tracker, &set4); ExpectOperationCounts(338813, 118266, values, &tracker, &set61); ExpectOperationCounts(534529, 125279, values, &tracker, &set100); } struct MovableOnlyInstanceThreeWayCompare { absl::weak_ordering operator()(const MovableOnlyInstance &a, const MovableOnlyInstance &b) const { return a.compare(b); } }; // Note: when the values in this test change, it is expected to have an impact // on performance. TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) { InstanceTracker tracker; // Note: this is minimum number of values per node. SizedBtreeSet set4; // Note: this is the default number of values per node for a set of int32s // (with 64-bit pointers). SizedBtreeSet set61; SizedBtreeSet set100; // Don't depend on flags for random values because then the expectations will // fail if the flags change. std::vector values = GenerateValuesWithSeed(10000, 1 << 22, /*seed=*/23); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 4); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 61); EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode(), 100); if (sizeof(void *) == 8) { EXPECT_EQ( BtreeNodePeer::GetNumSlotsPerNode>(), // When we have generations, there is one fewer slot. BtreeNodePeer::UsesGenerations>() ? 60 : 61); } // Test key insertion/deletion in random order. ExpectOperationCounts(56540, 124221, values, &tracker, &set4); ExpectOperationCounts(386718, 119816, values, &tracker, &set61); ExpectOperationCounts(586761, 120319, values, &tracker, &set100); // Test key insertion/deletion in sorted order. std::sort(values.begin(), values.end()); ExpectOperationCounts(24972, 85563, values, &tracker, &set4); ExpectOperationCounts(20208, 87757, values, &tracker, &set61); ExpectOperationCounts(20124, 96583, values, &tracker, &set100); // Test key insertion/deletion in reverse sorted order. std::reverse(values.begin(), values.end()); ExpectOperationCounts(54949, 117532, values, &tracker, &set4); ExpectOperationCounts(338813, 108267, values, &tracker, &set61); ExpectOperationCounts(534529, 115280, values, &tracker, &set100); } struct NoDefaultCtor { int num; explicit NoDefaultCtor(int i) : num(i) {} friend bool operator<(const NoDefaultCtor &a, const NoDefaultCtor &b) { return a.num < b.num; } }; TEST(Btree, BtreeMapCanHoldNoDefaultCtorTypes) { absl::btree_map m; for (int i = 1; i <= 99; ++i) { SCOPED_TRACE(i); EXPECT_TRUE(m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)).second); } EXPECT_FALSE(m.emplace(NoDefaultCtor(78), NoDefaultCtor(0)).second); auto iter99 = m.find(NoDefaultCtor(99)); ASSERT_NE(iter99, m.end()); EXPECT_EQ(iter99->second.num, 1); auto iter1 = m.find(NoDefaultCtor(1)); ASSERT_NE(iter1, m.end()); EXPECT_EQ(iter1->second.num, 99); auto iter50 = m.find(NoDefaultCtor(50)); ASSERT_NE(iter50, m.end()); EXPECT_EQ(iter50->second.num, 50); auto iter25 = m.find(NoDefaultCtor(25)); ASSERT_NE(iter25, m.end()); EXPECT_EQ(iter25->second.num, 75); } TEST(Btree, BtreeMultimapCanHoldNoDefaultCtorTypes) { absl::btree_multimap m; for (int i = 1; i <= 99; ++i) { SCOPED_TRACE(i); m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)); } auto iter99 = m.find(NoDefaultCtor(99)); ASSERT_NE(iter99, m.end()); EXPECT_EQ(iter99->second.num, 1); auto iter1 = m.find(NoDefaultCtor(1)); ASSERT_NE(iter1, m.end()); EXPECT_EQ(iter1->second.num, 99); auto iter50 = m.find(NoDefaultCtor(50)); ASSERT_NE(iter50, m.end()); EXPECT_EQ(iter50->second.num, 50); auto iter25 = m.find(NoDefaultCtor(25)); ASSERT_NE(iter25, m.end()); EXPECT_EQ(iter25->second.num, 75); } TEST(Btree, MapAt) { absl::btree_map map = {{1, 2}, {2, 4}}; EXPECT_EQ(map.at(1), 2); EXPECT_EQ(map.at(2), 4); map.at(2) = 8; const absl::btree_map &const_map = map; EXPECT_EQ(const_map.at(1), 2); EXPECT_EQ(const_map.at(2), 8); #ifdef ABSL_HAVE_EXCEPTIONS EXPECT_THROW(map.at(3), std::out_of_range); #else EXPECT_DEATH_IF_SUPPORTED(map.at(3), "absl::btree_map::at"); #endif } TEST(Btree, BtreeMultisetEmplace) { const int value_to_insert = 123456; absl::btree_multiset s; auto iter = s.emplace(value_to_insert); ASSERT_NE(iter, s.end()); EXPECT_EQ(*iter, value_to_insert); auto iter2 = s.emplace(value_to_insert); EXPECT_NE(iter2, iter); ASSERT_NE(iter2, s.end()); EXPECT_EQ(*iter2, value_to_insert); auto result = s.equal_range(value_to_insert); EXPECT_EQ(std::distance(result.first, result.second), 2); } TEST(Btree, BtreeMultisetEmplaceHint) { const int value_to_insert = 123456; absl::btree_multiset s; auto iter = s.emplace(value_to_insert); ASSERT_NE(iter, s.end()); EXPECT_EQ(*iter, value_to_insert); auto emplace_iter = s.emplace_hint(iter, value_to_insert); EXPECT_NE(emplace_iter, iter); ASSERT_NE(emplace_iter, s.end()); EXPECT_EQ(*emplace_iter, value_to_insert); } TEST(Btree, BtreeMultimapEmplace) { const int key_to_insert = 123456; const char value0[] = "a"; absl::btree_multimap s; auto iter = s.emplace(key_to_insert, value0); ASSERT_NE(iter, s.end()); EXPECT_EQ(iter->first, key_to_insert); EXPECT_EQ(iter->second, value0); const char value1[] = "b"; auto iter2 = s.emplace(key_to_insert, value1); EXPECT_NE(iter2, iter); ASSERT_NE(iter2, s.end()); EXPECT_EQ(iter2->first, key_to_insert); EXPECT_EQ(iter2->second, value1); auto result = s.equal_range(key_to_insert); EXPECT_EQ(std::distance(result.first, result.second), 2); } TEST(Btree, BtreeMultimapEmplaceHint) { const int key_to_insert = 123456; const char value0[] = "a"; absl::btree_multimap s; auto iter = s.emplace(key_to_insert, value0); ASSERT_NE(iter, s.end()); EXPECT_EQ(iter->first, key_to_insert); EXPECT_EQ(iter->second, value0); const char value1[] = "b"; auto emplace_iter = s.emplace_hint(iter, key_to_insert, value1); EXPECT_NE(emplace_iter, iter); ASSERT_NE(emplace_iter, s.end()); EXPECT_EQ(emplace_iter->first, key_to_insert); EXPECT_EQ(emplace_iter->second, value1); } TEST(Btree, ConstIteratorAccessors) { absl::btree_set set; for (int i = 0; i < 100; ++i) { set.insert(i); } auto it = set.cbegin(); auto r_it = set.crbegin(); for (int i = 0; i < 100; ++i, ++it, ++r_it) { ASSERT_EQ(*it, i); ASSERT_EQ(*r_it, 99 - i); } EXPECT_EQ(it, set.cend()); EXPECT_EQ(r_it, set.crend()); } TEST(Btree, StrSplitCompatible) { const absl::btree_set split_set = absl::StrSplit("a,b,c", ','); const absl::btree_set expected_set = {"a", "b", "c"}; EXPECT_EQ(split_set, expected_set); } TEST(Btree, KeyComp) { absl::btree_set s; EXPECT_TRUE(s.key_comp()(1, 2)); EXPECT_FALSE(s.key_comp()(2, 2)); EXPECT_FALSE(s.key_comp()(2, 1)); absl::btree_map m1; EXPECT_TRUE(m1.key_comp()(1, 2)); EXPECT_FALSE(m1.key_comp()(2, 2)); EXPECT_FALSE(m1.key_comp()(2, 1)); // Even though we internally adapt the comparator of `m2` to be three-way and // heterogeneous, the comparator we expose through key_comp() is the original // unadapted comparator. absl::btree_map m2; EXPECT_TRUE(m2.key_comp()("a", "b")); EXPECT_FALSE(m2.key_comp()("b", "b")); EXPECT_FALSE(m2.key_comp()("b", "a")); } TEST(Btree, ValueComp) { absl::btree_set s; EXPECT_TRUE(s.value_comp()(1, 2)); EXPECT_FALSE(s.value_comp()(2, 2)); EXPECT_FALSE(s.value_comp()(2, 1)); absl::btree_map m1; EXPECT_TRUE(m1.value_comp()(std::make_pair(1, 0), std::make_pair(2, 0))); EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0))); EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0))); // Even though we internally adapt the comparator of `m2` to be three-way and // heterogeneous, the comparator we expose through value_comp() is based on // the original unadapted comparator. absl::btree_map m2; EXPECT_TRUE(m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0))); EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0))); EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0))); } // Test that we have the protected members from the std::map::value_compare API. // See https://en.cppreference.com/w/cpp/container/map/value_compare. TEST(Btree, MapValueCompProtected) { struct key_compare { bool operator()(int l, int r) const { return l < r; } int id; }; using value_compare = absl::btree_map::value_compare; struct value_comp_child : public value_compare { explicit value_comp_child(key_compare kc) : value_compare(kc) {} int GetId() const { return comp.id; } }; value_comp_child c(key_compare{10}); EXPECT_EQ(c.GetId(), 10); } TEST(Btree, DefaultConstruction) { absl::btree_set s; absl::btree_map m; absl::btree_multiset ms; absl::btree_multimap mm; EXPECT_TRUE(s.empty()); EXPECT_TRUE(m.empty()); EXPECT_TRUE(ms.empty()); EXPECT_TRUE(mm.empty()); } TEST(Btree, SwissTableHashable) { static constexpr int kValues = 10000; std::vector values(kValues); std::iota(values.begin(), values.end(), 0); std::vector> map_values; for (int v : values) map_values.emplace_back(v, -v); using set = absl::btree_set; EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ set{}, set{1}, set{2}, set{1, 2}, set{2, 1}, set(values.begin(), values.end()), set(values.rbegin(), values.rend()), })); using mset = absl::btree_multiset; EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ mset{}, mset{1}, mset{1, 1}, mset{2}, mset{2, 2}, mset{1, 2}, mset{1, 1, 2}, mset{1, 2, 2}, mset{1, 1, 2, 2}, mset(values.begin(), values.end()), mset(values.rbegin(), values.rend()), })); using map = absl::btree_map; EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ map{}, map{{1, 0}}, map{{1, 1}}, map{{2, 0}}, map{{2, 2}}, map{{1, 0}, {2, 1}}, map(map_values.begin(), map_values.end()), map(map_values.rbegin(), map_values.rend()), })); using mmap = absl::btree_multimap; EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({ mmap{}, mmap{{1, 0}}, mmap{{1, 1}}, mmap{{1, 0}, {1, 1}}, mmap{{1, 1}, {1, 0}}, mmap{{2, 0}}, mmap{{2, 2}}, mmap{{1, 0}, {2, 1}}, mmap(map_values.begin(), map_values.end()), mmap(map_values.rbegin(), map_values.rend()), })); } TEST(Btree, ComparableSet) { absl::btree_set s1 = {1, 2}; absl::btree_set s2 = {2, 3}; EXPECT_LT(s1, s2); EXPECT_LE(s1, s2); EXPECT_LE(s1, s1); EXPECT_GT(s2, s1); EXPECT_GE(s2, s1); EXPECT_GE(s1, s1); } TEST(Btree, ComparableSetsDifferentLength) { absl::btree_set s1 = {1, 2}; absl::btree_set s2 = {1, 2, 3}; EXPECT_LT(s1, s2); EXPECT_LE(s1, s2); EXPECT_GT(s2, s1); EXPECT_GE(s2, s1); } TEST(Btree, ComparableMultiset) { absl::btree_multiset s1 = {1, 2}; absl::btree_multiset s2 = {2, 3}; EXPECT_LT(s1, s2); EXPECT_LE(s1, s2); EXPECT_LE(s1, s1); EXPECT_GT(s2, s1); EXPECT_GE(s2, s1); EXPECT_GE(s1, s1); } TEST(Btree, ComparableMap) { absl::btree_map s1 = {{1, 2}}; absl::btree_map s2 = {{2, 3}}; EXPECT_LT(s1, s2); EXPECT_LE(s1, s2); EXPECT_LE(s1, s1); EXPECT_GT(s2, s1); EXPECT_GE(s2, s1); EXPECT_GE(s1, s1); } TEST(Btree, ComparableMultimap) { absl::btree_multimap s1 = {{1, 2}}; absl::btree_multimap s2 = {{2, 3}}; EXPECT_LT(s1, s2); EXPECT_LE(s1, s2); EXPECT_LE(s1, s1); EXPECT_GT(s2, s1); EXPECT_GE(s2, s1); EXPECT_GE(s1, s1); } TEST(Btree, ComparableSetWithCustomComparator) { // As specified by // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3337.pdf section // [container.requirements.general].12, ordering associative containers always // uses default '<' operator // - even if otherwise the container uses custom functor. absl::btree_set> s1 = {1, 2}; absl::btree_set> s2 = {2, 3}; EXPECT_LT(s1, s2); EXPECT_LE(s1, s2); EXPECT_LE(s1, s1); EXPECT_GT(s2, s1); EXPECT_GE(s2, s1); EXPECT_GE(s1, s1); } TEST(Btree, EraseReturnsIterator) { absl::btree_set set = {1, 2, 3, 4, 5}; auto result_it = set.erase(set.begin(), set.find(3)); EXPECT_EQ(result_it, set.find(3)); result_it = set.erase(set.find(5)); EXPECT_EQ(result_it, set.end()); } TEST(Btree, ExtractAndInsertNodeHandleSet) { absl::btree_set src1 = {1, 2, 3, 4, 5}; auto nh = src1.extract(src1.find(3)); EXPECT_THAT(src1, ElementsAre(1, 2, 4, 5)); absl::btree_set other; absl::btree_set::insert_return_type res = other.insert(std::move(nh)); EXPECT_THAT(other, ElementsAre(3)); EXPECT_EQ(res.position, other.find(3)); EXPECT_TRUE(res.inserted); EXPECT_TRUE(res.node.empty()); absl::btree_set src2 = {3, 4}; nh = src2.extract(src2.find(3)); EXPECT_THAT(src2, ElementsAre(4)); res = other.insert(std::move(nh)); EXPECT_THAT(other, ElementsAre(3)); EXPECT_EQ(res.position, other.find(3)); EXPECT_FALSE(res.inserted); ASSERT_FALSE(res.node.empty()); EXPECT_EQ(res.node.value(), 3); } template void TestExtractWithTrackingForSet() { InstanceTracker tracker; { Set s; // Add enough elements to make sure we test internal nodes too. const size_t kSize = 1000; while (s.size() < kSize) { s.insert(MovableOnlyInstance(s.size())); } for (int i = 0; i < kSize; ++i) { // Extract with key auto nh = s.extract(MovableOnlyInstance(i)); EXPECT_EQ(s.size(), kSize - 1); EXPECT_EQ(nh.value().value(), i); // Insert with node s.insert(std::move(nh)); EXPECT_EQ(s.size(), kSize); // Extract with iterator auto it = s.find(MovableOnlyInstance(i)); nh = s.extract(it); EXPECT_EQ(s.size(), kSize - 1); EXPECT_EQ(nh.value().value(), i); // Insert with node and hint s.insert(s.begin(), std::move(nh)); EXPECT_EQ(s.size(), kSize); } } EXPECT_EQ(0, tracker.instances()); } template void TestExtractWithTrackingForMap() { InstanceTracker tracker; { Map m; // Add enough elements to make sure we test internal nodes too. const size_t kSize = 1000; while (m.size() < kSize) { m.insert( {CopyableMovableInstance(m.size()), MovableOnlyInstance(m.size())}); } for (int i = 0; i < kSize; ++i) { // Extract with key auto nh = m.extract(CopyableMovableInstance(i)); EXPECT_EQ(m.size(), kSize - 1); EXPECT_EQ(nh.key().value(), i); EXPECT_EQ(nh.mapped().value(), i); // Insert with node m.insert(std::move(nh)); EXPECT_EQ(m.size(), kSize); // Extract with iterator auto it = m.find(CopyableMovableInstance(i)); nh = m.extract(it); EXPECT_EQ(m.size(), kSize - 1); EXPECT_EQ(nh.key().value(), i); EXPECT_EQ(nh.mapped().value(), i); // Insert with node and hint m.insert(m.begin(), std::move(nh)); EXPECT_EQ(m.size(), kSize); } } EXPECT_EQ(0, tracker.instances()); } TEST(Btree, ExtractTracking) { TestExtractWithTrackingForSet>(); TestExtractWithTrackingForSet>(); TestExtractWithTrackingForMap< absl::btree_map>(); TestExtractWithTrackingForMap< absl::btree_multimap>(); } TEST(Btree, ExtractAndInsertNodeHandleMultiSet) { absl::btree_multiset src1 = {1, 2, 3, 3, 4, 5}; auto nh = src1.extract(src1.find(3)); EXPECT_THAT(src1, ElementsAre(1, 2, 3, 4, 5)); absl::btree_multiset other; auto res = other.insert(std::move(nh)); EXPECT_THAT(other, ElementsAre(3)); EXPECT_EQ(res, other.find(3)); absl::btree_multiset src2 = {3, 4}; nh = src2.extract(src2.find(3)); EXPECT_THAT(src2, ElementsAre(4)); res = other.insert(std::move(nh)); EXPECT_THAT(other, ElementsAre(3, 3)); EXPECT_EQ(res, ++other.find(3)); } TEST(Btree, ExtractAndInsertNodeHandleMap) { absl::btree_map src1 = {{1, 2}, {3, 4}, {5, 6}}; auto nh = src1.extract(src1.find(3)); EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); absl::btree_map other; absl::btree_map::insert_return_type res = other.insert(std::move(nh)); EXPECT_THAT(other, ElementsAre(Pair(3, 4))); EXPECT_EQ(res.position, other.find(3)); EXPECT_TRUE(res.inserted); EXPECT_TRUE(res.node.empty()); absl::btree_map src2 = {{3, 6}}; nh = src2.extract(src2.find(3)); EXPECT_TRUE(src2.empty()); res = other.insert(std::move(nh)); EXPECT_THAT(other, ElementsAre(Pair(3, 4))); EXPECT_EQ(res.position, other.find(3)); EXPECT_FALSE(res.inserted); ASSERT_FALSE(res.node.empty()); EXPECT_EQ(res.node.key(), 3); EXPECT_EQ(res.node.mapped(), 6); } TEST(Btree, ExtractAndInsertNodeHandleMultiMap) { absl::btree_multimap src1 = {{1, 2}, {3, 4}, {5, 6}}; auto nh = src1.extract(src1.find(3)); EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6))); absl::btree_multimap other; auto res = other.insert(std::move(nh)); EXPECT_THAT(other, ElementsAre(Pair(3, 4))); EXPECT_EQ(res, other.find(3)); absl::btree_multimap src2 = {{3, 6}}; nh = src2.extract(src2.find(3)); EXPECT_TRUE(src2.empty()); res = other.insert(std::move(nh)); EXPECT_THAT(other, ElementsAre(Pair(3, 4), Pair(3, 6))); EXPECT_EQ(res, ++other.begin()); } TEST(Btree, ExtractMultiMapEquivalentKeys) { // Note: using string keys means a three-way comparator. absl::btree_multimap map; for (int i = 0; i < 100; ++i) { for (int j = 0; j < 100; ++j) { map.insert({absl::StrCat(i), j}); } } for (int i = 0; i < 100; ++i) { const std::string key = absl::StrCat(i); auto node_handle = map.extract(key); EXPECT_EQ(node_handle.key(), key); EXPECT_EQ(node_handle.mapped(), 0) << i; } for (int i = 0; i < 100; ++i) { const std::string key = absl::StrCat(i); auto node_handle = map.extract(key); EXPECT_EQ(node_handle.key(), key); EXPECT_EQ(node_handle.mapped(), 1) << i; } } // For multisets, insert with hint also affects correctness because we need to // insert immediately before the hint if possible. struct InsertMultiHintData { int key; int not_key; bool operator==(const InsertMultiHintData other) const { return key == other.key && not_key == other.not_key; } }; struct InsertMultiHintDataKeyCompare { using is_transparent = void; bool operator()(const InsertMultiHintData a, const InsertMultiHintData b) const { return a.key < b.key; } bool operator()(const int a, const InsertMultiHintData b) const { return a < b.key; } bool operator()(const InsertMultiHintData a, const int b) const { return a.key < b; } }; TEST(Btree, InsertHintNodeHandle) { // For unique sets, insert with hint is just a performance optimization. // Test that insert works correctly when the hint is right or wrong. { absl::btree_set src = {1, 2, 3, 4, 5}; auto nh = src.extract(src.find(3)); EXPECT_THAT(src, ElementsAre(1, 2, 4, 5)); absl::btree_set other = {0, 100}; // Test a correct hint. auto it = other.insert(other.lower_bound(3), std::move(nh)); EXPECT_THAT(other, ElementsAre(0, 3, 100)); EXPECT_EQ(it, other.find(3)); nh = src.extract(src.find(5)); // Test an incorrect hint. it = other.insert(other.end(), std::move(nh)); EXPECT_THAT(other, ElementsAre(0, 3, 5, 100)); EXPECT_EQ(it, other.find(5)); } absl::btree_multiset src = {{1, 2}, {3, 4}, {3, 5}}; auto nh = src.extract(src.lower_bound(3)); EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 4})); absl::btree_multiset other = {{3, 1}, {3, 2}, {3, 3}}; auto it = other.insert(--other.end(), std::move(nh)); EXPECT_THAT( other, ElementsAre(InsertMultiHintData{3, 1}, InsertMultiHintData{3, 2}, InsertMultiHintData{3, 4}, InsertMultiHintData{3, 3})); EXPECT_EQ(it, --(--other.end())); nh = src.extract(src.find(3)); EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 5})); it = other.insert(other.begin(), std::move(nh)); EXPECT_THAT(other, ElementsAre(InsertMultiHintData{3, 5}, InsertMultiHintData{3, 1}, InsertMultiHintData{3, 2}, InsertMultiHintData{3, 4}, InsertMultiHintData{3, 3})); EXPECT_EQ(it, other.begin()); } struct IntCompareToCmp { absl::weak_ordering operator()(int a, int b) const { if (a < b) return absl::weak_ordering::less; if (a > b) return absl::weak_ordering::greater; return absl::weak_ordering::equivalent; } }; TEST(Btree, MergeIntoUniqueContainers) { absl::btree_set src1 = {1, 2, 3}; absl::btree_multiset src2 = {3, 4, 4, 5}; absl::btree_set dst; dst.merge(src1); EXPECT_TRUE(src1.empty()); EXPECT_THAT(dst, ElementsAre(1, 2, 3)); dst.merge(src2); EXPECT_THAT(src2, ElementsAre(3, 4)); EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); } TEST(Btree, MergeIntoUniqueContainersWithCompareTo) { absl::btree_set src1 = {1, 2, 3}; absl::btree_multiset src2 = {3, 4, 4, 5}; absl::btree_set dst; dst.merge(src1); EXPECT_TRUE(src1.empty()); EXPECT_THAT(dst, ElementsAre(1, 2, 3)); dst.merge(src2); EXPECT_THAT(src2, ElementsAre(3, 4)); EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5)); } TEST(Btree, MergeIntoMultiContainers) { absl::btree_set src1 = {1, 2, 3}; absl::btree_multiset src2 = {3, 4, 4, 5}; absl::btree_multiset dst; dst.merge(src1); EXPECT_TRUE(src1.empty()); EXPECT_THAT(dst, ElementsAre(1, 2, 3)); dst.merge(src2); EXPECT_TRUE(src2.empty()); EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); } TEST(Btree, MergeIntoMultiContainersWithCompareTo) { absl::btree_set src1 = {1, 2, 3}; absl::btree_multiset src2 = {3, 4, 4, 5}; absl::btree_multiset dst; dst.merge(src1); EXPECT_TRUE(src1.empty()); EXPECT_THAT(dst, ElementsAre(1, 2, 3)); dst.merge(src2); EXPECT_TRUE(src2.empty()); EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5)); } TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) { absl::btree_map src1 = {{1, 1}, {2, 2}, {3, 3}}; absl::btree_multimap> src2 = { {5, 5}, {4, 1}, {4, 4}, {3, 2}}; absl::btree_multimap dst; dst.merge(src1); EXPECT_TRUE(src1.empty()); EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3))); dst.merge(src2); EXPECT_TRUE(src2.empty()); EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(3, 2), Pair(4, 1), Pair(4, 4), Pair(5, 5))); } TEST(Btree, MergeIntoSetMovableOnly) { absl::btree_set src; src.insert(MovableOnlyInstance(1)); absl::btree_multiset dst1; dst1.insert(MovableOnlyInstance(2)); absl::btree_set dst2; // Test merge into multiset. dst1.merge(src); EXPECT_TRUE(src.empty()); // ElementsAre/ElementsAreArray don't work with move-only types. ASSERT_THAT(dst1, SizeIs(2)); EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1)); EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2)); // Test merge into set. dst2.merge(dst1); EXPECT_TRUE(dst1.empty()); ASSERT_THAT(dst2, SizeIs(2)); EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1)); EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2)); } struct KeyCompareToWeakOrdering { template absl::weak_ordering operator()(const T &a, const T &b) const { return a < b ? absl::weak_ordering::less : a == b ? absl::weak_ordering::equivalent : absl::weak_ordering::greater; } }; struct KeyCompareToStrongOrdering { template absl::strong_ordering operator()(const T &a, const T &b) const { return a < b ? absl::strong_ordering::less : a == b ? absl::strong_ordering::equal : absl::strong_ordering::greater; } }; TEST(Btree, UserProvidedKeyCompareToComparators) { absl::btree_set weak_set = {1, 2, 3}; EXPECT_TRUE(weak_set.contains(2)); EXPECT_FALSE(weak_set.contains(4)); absl::btree_set strong_set = {1, 2, 3}; EXPECT_TRUE(strong_set.contains(2)); EXPECT_FALSE(strong_set.contains(4)); } TEST(Btree, TryEmplaceBasicTest) { absl::btree_map m; // Should construct a string from the literal. m.try_emplace(1, "one"); EXPECT_EQ(1, m.size()); // Try other string constructors and const lvalue key. const int key(42); m.try_emplace(key, 3, 'a'); m.try_emplace(2, std::string("two")); EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); EXPECT_THAT(m, ElementsAreArray(std::vector>{ {1, "one"}, {2, "two"}, {42, "aaa"}})); } TEST(Btree, TryEmplaceWithHintWorks) { // Use a counting comparator here to verify that hint is used. int calls = 0; auto cmp = [&calls](int x, int y) { ++calls; return x < y; }; using Cmp = decltype(cmp); // Use a map that is opted out of key_compare being adapted so we can expect // strict comparison call limits. absl::btree_map> m(cmp); for (int i = 0; i < 128; ++i) { m.emplace(i, i); } // Sanity check for the comparator calls = 0; m.emplace(127, 127); EXPECT_GE(calls, 4); // Try with begin hint: calls = 0; auto it = m.try_emplace(m.begin(), -1, -1); EXPECT_EQ(129, m.size()); EXPECT_EQ(it, m.begin()); EXPECT_LE(calls, 2); // Try with end hint: calls = 0; std::pair pair1024 = {1024, 1024}; it = m.try_emplace(m.end(), pair1024.first, pair1024.second); EXPECT_EQ(130, m.size()); EXPECT_EQ(it, --m.end()); EXPECT_LE(calls, 2); // Try value already present, bad hint; ensure no duplicate added: calls = 0; it = m.try_emplace(m.end(), 16, 17); EXPECT_EQ(130, m.size()); EXPECT_GE(calls, 4); EXPECT_EQ(it, m.find(16)); // Try value already present, hint points directly to it: calls = 0; it = m.try_emplace(it, 16, 17); EXPECT_EQ(130, m.size()); EXPECT_LE(calls, 2); EXPECT_EQ(it, m.find(16)); m.erase(2); EXPECT_EQ(129, m.size()); auto hint = m.find(3); // Try emplace in the middle of two other elements. calls = 0; m.try_emplace(hint, 2, 2); EXPECT_EQ(130, m.size()); EXPECT_LE(calls, 2); EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); } TEST(Btree, TryEmplaceWithBadHint) { absl::btree_map m = {{1, 1}, {9, 9}}; // Bad hint (too small), should still emplace: auto it = m.try_emplace(m.begin(), 2, 2); EXPECT_EQ(it, ++m.begin()); EXPECT_THAT(m, ElementsAreArray( std::vector>{{1, 1}, {2, 2}, {9, 9}})); // Bad hint, too large this time: it = m.try_emplace(++(++m.begin()), 0, 0); EXPECT_EQ(it, m.begin()); EXPECT_THAT(m, ElementsAreArray(std::vector>{ {0, 0}, {1, 1}, {2, 2}, {9, 9}})); } TEST(Btree, TryEmplaceMaintainsSortedOrder) { absl::btree_map m; std::pair pair5 = {5, "five"}; // Test both lvalue & rvalue emplace. m.try_emplace(10, "ten"); m.try_emplace(pair5.first, pair5.second); EXPECT_EQ(2, m.size()); EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); int int100{100}; m.try_emplace(int100, "hundred"); m.try_emplace(1, "one"); EXPECT_EQ(4, m.size()); EXPECT_TRUE(std::is_sorted(m.begin(), m.end())); } TEST(Btree, TryEmplaceWithHintAndNoValueArgsWorks) { absl::btree_map m; m.try_emplace(m.end(), 1); EXPECT_EQ(0, m[1]); } TEST(Btree, TryEmplaceWithHintAndMultipleValueArgsWorks) { absl::btree_map m; m.try_emplace(m.end(), 1, 10, 'a'); EXPECT_EQ(std::string(10, 'a'), m[1]); } TEST(Btree, MoveAssignmentAllocatorPropagation) { InstanceTracker tracker; int64_t bytes1 = 0, bytes2 = 0; PropagatingCountingAlloc allocator1(&bytes1); PropagatingCountingAlloc allocator2(&bytes2); std::less cmp; // Test propagating allocator_type. { absl::btree_set, PropagatingCountingAlloc> set1(cmp, allocator1), set2(cmp, allocator2); for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); tracker.ResetCopiesMovesSwaps(); set2 = std::move(set1); EXPECT_EQ(tracker.moves(), 0); } // Test non-propagating allocator_type with equal allocators. { absl::btree_set, CountingAllocator> set1(cmp, allocator1), set2(cmp, allocator1); for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); tracker.ResetCopiesMovesSwaps(); set2 = std::move(set1); EXPECT_EQ(tracker.moves(), 0); } // Test non-propagating allocator_type with different allocators. { absl::btree_set, CountingAllocator> set1(cmp, allocator1), set2(cmp, allocator2); for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i)); tracker.ResetCopiesMovesSwaps(); set2 = std::move(set1); EXPECT_GE(tracker.moves(), 100); } } TEST(Btree, EmptyTree) { absl::btree_set s; EXPECT_TRUE(s.empty()); EXPECT_EQ(s.size(), 0); EXPECT_GT(s.max_size(), 0); } bool IsEven(int k) { return k % 2 == 0; } TEST(Btree, EraseIf) { // Test that erase_if works with all the container types and supports lambdas. { absl::btree_set s = {1, 3, 5, 6, 100}; EXPECT_EQ(erase_if(s, [](int k) { return k > 3; }), 3); EXPECT_THAT(s, ElementsAre(1, 3)); } { absl::btree_multiset s = {1, 3, 3, 5, 6, 6, 100}; EXPECT_EQ(erase_if(s, [](int k) { return k <= 3; }), 3); EXPECT_THAT(s, ElementsAre(5, 6, 6, 100)); } { absl::btree_map m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}}; EXPECT_EQ( erase_if(m, [](std::pair kv) { return kv.first > 3; }), 2); EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3))); } { absl::btree_multimap m = {{1, 1}, {3, 3}, {3, 6}, {6, 6}, {6, 7}, {100, 6}}; EXPECT_EQ( erase_if(m, [](std::pair kv) { return kv.second == 6; }), 3); EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7))); } // Test that erasing all elements from a large set works and test support for // function pointers. { absl::btree_set s; for (int i = 0; i < 1000; ++i) s.insert(2 * i); EXPECT_EQ(erase_if(s, IsEven), 1000); EXPECT_THAT(s, IsEmpty()); } // Test that erase_if supports other format of function pointers. { absl::btree_set s = {1, 3, 5, 6, 100}; EXPECT_EQ(erase_if(s, &IsEven), 2); EXPECT_THAT(s, ElementsAre(1, 3, 5)); } // Test that erase_if invokes the predicate once per element. { absl::btree_set s; for (int i = 0; i < 1000; ++i) s.insert(i); int pred_calls = 0; EXPECT_EQ(erase_if(s, [&pred_calls](int k) { ++pred_calls; return k % 2; }), 500); EXPECT_THAT(s, SizeIs(500)); EXPECT_EQ(pred_calls, 1000); } } TEST(Btree, InsertOrAssign) { absl::btree_map m = {{1, 1}, {3, 3}}; using value_type = typename decltype(m)::value_type; auto ret = m.insert_or_assign(4, 4); EXPECT_EQ(*ret.first, value_type(4, 4)); EXPECT_TRUE(ret.second); ret = m.insert_or_assign(3, 100); EXPECT_EQ(*ret.first, value_type(3, 100)); EXPECT_FALSE(ret.second); auto hint_ret = m.insert_or_assign(ret.first, 3, 200); EXPECT_EQ(*hint_ret, value_type(3, 200)); hint_ret = m.insert_or_assign(m.find(1), 0, 1); EXPECT_EQ(*hint_ret, value_type(0, 1)); // Test with bad hint. hint_ret = m.insert_or_assign(m.end(), -1, 1); EXPECT_EQ(*hint_ret, value_type(-1, 1)); EXPECT_THAT(m, ElementsAre(Pair(-1, 1), Pair(0, 1), Pair(1, 1), Pair(3, 200), Pair(4, 4))); } TEST(Btree, InsertOrAssignMovableOnly) { absl::btree_map m; using value_type = typename decltype(m)::value_type; auto ret = m.insert_or_assign(4, MovableOnlyInstance(4)); EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(4))); EXPECT_TRUE(ret.second); ret = m.insert_or_assign(4, MovableOnlyInstance(100)); EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(100))); EXPECT_FALSE(ret.second); auto hint_ret = m.insert_or_assign(ret.first, 3, MovableOnlyInstance(200)); EXPECT_EQ(*hint_ret, value_type(3, MovableOnlyInstance(200))); EXPECT_EQ(m.size(), 2); } TEST(Btree, BitfieldArgument) { union { int n : 1; }; n = 0; absl::btree_map m; m.erase(n); m.count(n); m.find(n); m.contains(n); m.equal_range(n); m.insert_or_assign(n, n); m.insert_or_assign(m.end(), n, n); m.try_emplace(n); m.try_emplace(m.end(), n); m.at(n); m[n]; } TEST(Btree, SetRangeConstructorAndInsertSupportExplicitConversionComparable) { const absl::string_view names[] = {"n1", "n2"}; absl::btree_set name_set1{std::begin(names), std::end(names)}; EXPECT_THAT(name_set1, ElementsAreArray(names)); absl::btree_set name_set2; name_set2.insert(std::begin(names), std::end(names)); EXPECT_THAT(name_set2, ElementsAreArray(names)); } // A type that is explicitly convertible from int and counts constructor calls. struct ConstructorCounted { explicit ConstructorCounted(int i) : i(i) { ++constructor_calls; } bool operator==(int other) const { return i == other; } int i; static int constructor_calls; }; int ConstructorCounted::constructor_calls = 0; struct ConstructorCountedCompare { bool operator()(int a, const ConstructorCounted &b) const { return a < b.i; } bool operator()(const ConstructorCounted &a, int b) const { return a.i < b; } bool operator()(const ConstructorCounted &a, const ConstructorCounted &b) const { return a.i < b.i; } using is_transparent = void; }; TEST(Btree, SetRangeConstructorAndInsertExplicitConvComparableLimitConstruction) { const int i[] = {0, 1, 1}; ConstructorCounted::constructor_calls = 0; absl::btree_set set{ std::begin(i), std::end(i)}; EXPECT_THAT(set, ElementsAre(0, 1)); EXPECT_EQ(ConstructorCounted::constructor_calls, 2); set.insert(std::begin(i), std::end(i)); EXPECT_THAT(set, ElementsAre(0, 1)); EXPECT_EQ(ConstructorCounted::constructor_calls, 2); } TEST(Btree, SetRangeConstructorAndInsertSupportExplicitConversionNonComparable) { const int i[] = {0, 1}; absl::btree_set> s1{std::begin(i), std::end(i)}; EXPECT_THAT(s1, ElementsAre(IsEmpty(), ElementsAre(IsNull()))); absl::btree_set> s2; s2.insert(std::begin(i), std::end(i)); EXPECT_THAT(s2, ElementsAre(IsEmpty(), ElementsAre(IsNull()))); } // libstdc++ included with GCC 4.9 has a bug in the std::pair constructors that // prevents explicit conversions between pair types. // We only run this test for the libstdc++ from GCC 7 or newer because we can't // reliably check the libstdc++ version prior to that release. #if !defined(__GLIBCXX__) || \ (defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7) TEST(Btree, MapRangeConstructorAndInsertSupportExplicitConversionComparable) { const std::pair names[] = {{"n1", 1}, {"n2", 2}}; absl::btree_map name_map1{std::begin(names), std::end(names)}; EXPECT_THAT(name_map1, ElementsAre(Pair("n1", 1), Pair("n2", 2))); absl::btree_map name_map2; name_map2.insert(std::begin(names), std::end(names)); EXPECT_THAT(name_map2, ElementsAre(Pair("n1", 1), Pair("n2", 2))); } TEST(Btree, MapRangeConstructorAndInsertExplicitConvComparableLimitConstruction) { const std::pair i[] = {{0, 1}, {1, 2}, {1, 3}}; ConstructorCounted::constructor_calls = 0; absl::btree_map map{ std::begin(i), std::end(i)}; EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2))); EXPECT_EQ(ConstructorCounted::constructor_calls, 2); map.insert(std::begin(i), std::end(i)); EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2))); EXPECT_EQ(ConstructorCounted::constructor_calls, 2); } TEST(Btree, MapRangeConstructorAndInsertSupportExplicitConversionNonComparable) { const std::pair i[] = {{0, 1}, {1, 2}}; absl::btree_map, int> m1{std::begin(i), std::end(i)}; EXPECT_THAT(m1, ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2))); absl::btree_map, int> m2; m2.insert(std::begin(i), std::end(i)); EXPECT_THAT(m2, ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2))); } TEST(Btree, HeterogeneousTryEmplace) { absl::btree_map m; std::string s = "key"; absl::string_view sv = s; m.try_emplace(sv, 1); EXPECT_EQ(m[s], 1); m.try_emplace(m.end(), sv, 2); EXPECT_EQ(m[s], 1); } TEST(Btree, HeterogeneousOperatorMapped) { absl::btree_map m; std::string s = "key"; absl::string_view sv = s; m[sv] = 1; EXPECT_EQ(m[s], 1); m[sv] = 2; EXPECT_EQ(m[s], 2); } TEST(Btree, HeterogeneousInsertOrAssign) { absl::btree_map m; std::string s = "key"; absl::string_view sv = s; m.insert_or_assign(sv, 1); EXPECT_EQ(m[s], 1); m.insert_or_assign(m.end(), sv, 2); EXPECT_EQ(m[s], 2); } #endif // This test requires std::launder for mutable key access in node handles. #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 TEST(Btree, NodeHandleMutableKeyAccess) { { absl::btree_map map; map["key1"] = "mapped"; auto nh = map.extract(map.begin()); nh.key().resize(3); map.insert(std::move(nh)); EXPECT_THAT(map, ElementsAre(Pair("key", "mapped"))); } // Also for multimap. { absl::btree_multimap map; map.emplace("key1", "mapped"); auto nh = map.extract(map.begin()); nh.key().resize(3); map.insert(std::move(nh)); EXPECT_THAT(map, ElementsAre(Pair("key", "mapped"))); } } #endif struct MultiKey { int i1; int i2; }; bool operator==(const MultiKey a, const MultiKey b) { return a.i1 == b.i1 && a.i2 == b.i2; } // A heterogeneous comparator that has different equivalence classes for // different lookup types. struct MultiKeyComp { using is_transparent = void; bool operator()(const MultiKey a, const MultiKey b) const { if (a.i1 != b.i1) return a.i1 < b.i1; return a.i2 < b.i2; } bool operator()(const int a, const MultiKey b) const { return a < b.i1; } bool operator()(const MultiKey a, const int b) const { return a.i1 < b; } }; // A heterogeneous, three-way comparator that has different equivalence classes // for different lookup types. struct MultiKeyThreeWayComp { using is_transparent = void; absl::weak_ordering operator()(const MultiKey a, const MultiKey b) const { if (a.i1 < b.i1) return absl::weak_ordering::less; if (a.i1 > b.i1) return absl::weak_ordering::greater; if (a.i2 < b.i2) return absl::weak_ordering::less; if (a.i2 > b.i2) return absl::weak_ordering::greater; return absl::weak_ordering::equivalent; } absl::weak_ordering operator()(const int a, const MultiKey b) const { if (a < b.i1) return absl::weak_ordering::less; if (a > b.i1) return absl::weak_ordering::greater; return absl::weak_ordering::equivalent; } absl::weak_ordering operator()(const MultiKey a, const int b) const { if (a.i1 < b) return absl::weak_ordering::less; if (a.i1 > b) return absl::weak_ordering::greater; return absl::weak_ordering::equivalent; } }; template class BtreeMultiKeyTest : public ::testing::Test {}; using MultiKeyComps = ::testing::Types; TYPED_TEST_SUITE(BtreeMultiKeyTest, MultiKeyComps); TYPED_TEST(BtreeMultiKeyTest, EqualRange) { absl::btree_set set; for (int i = 0; i < 100; ++i) { for (int j = 0; j < 100; ++j) { set.insert({i, j}); } } for (int i = 0; i < 100; ++i) { auto equal_range = set.equal_range(i); EXPECT_EQ(equal_range.first->i1, i); EXPECT_EQ(equal_range.first->i2, 0) << i; EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i; } } TYPED_TEST(BtreeMultiKeyTest, Extract) { absl::btree_set set; for (int i = 0; i < 100; ++i) { for (int j = 0; j < 100; ++j) { set.insert({i, j}); } } for (int i = 0; i < 100; ++i) { auto node_handle = set.extract(i); EXPECT_EQ(node_handle.value().i1, i); EXPECT_EQ(node_handle.value().i2, 0) << i; } for (int i = 0; i < 100; ++i) { auto node_handle = set.extract(i); EXPECT_EQ(node_handle.value().i1, i); EXPECT_EQ(node_handle.value().i2, 1) << i; } } TYPED_TEST(BtreeMultiKeyTest, Erase) { absl::btree_set set = { {1, 1}, {2, 1}, {2, 2}, {3, 1}}; EXPECT_EQ(set.erase(2), 2); EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1})); } TYPED_TEST(BtreeMultiKeyTest, Count) { const absl::btree_set set = { {1, 1}, {2, 1}, {2, 2}, {3, 1}}; EXPECT_EQ(set.count(2), 2); } TEST(Btree, AllocConstructor) { using Alloc = CountingAllocator; using Set = absl::btree_set, Alloc>; int64_t bytes_used = 0; Alloc alloc(&bytes_used); Set set(alloc); set.insert({1, 2, 3}); EXPECT_THAT(set, ElementsAre(1, 2, 3)); EXPECT_GT(bytes_used, set.size() * sizeof(int)); } TEST(Btree, AllocInitializerListConstructor) { using Alloc = CountingAllocator; using Set = absl::btree_set, Alloc>; int64_t bytes_used = 0; Alloc alloc(&bytes_used); Set set({1, 2, 3}, alloc); EXPECT_THAT(set, ElementsAre(1, 2, 3)); EXPECT_GT(bytes_used, set.size() * sizeof(int)); } TEST(Btree, AllocRangeConstructor) { using Alloc = CountingAllocator; using Set = absl::btree_set, Alloc>; int64_t bytes_used = 0; Alloc alloc(&bytes_used); std::vector v = {1, 2, 3}; Set set(v.begin(), v.end(), alloc); EXPECT_THAT(set, ElementsAre(1, 2, 3)); EXPECT_GT(bytes_used, set.size() * sizeof(int)); } TEST(Btree, AllocCopyConstructor) { using Alloc = CountingAllocator; using Set = absl::btree_set, Alloc>; int64_t bytes_used1 = 0; Alloc alloc1(&bytes_used1); Set set1(alloc1); set1.insert({1, 2, 3}); int64_t bytes_used2 = 0; Alloc alloc2(&bytes_used2); Set set2(set1, alloc2); EXPECT_THAT(set1, ElementsAre(1, 2, 3)); EXPECT_THAT(set2, ElementsAre(1, 2, 3)); EXPECT_GT(bytes_used1, set1.size() * sizeof(int)); EXPECT_EQ(bytes_used1, bytes_used2); } TEST(Btree, AllocMoveConstructor_SameAlloc) { using Alloc = CountingAllocator; using Set = absl::btree_set, Alloc>; int64_t bytes_used = 0; Alloc alloc(&bytes_used); Set set1(alloc); set1.insert({1, 2, 3}); const int64_t original_bytes_used = bytes_used; EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); Set set2(std::move(set1), alloc); EXPECT_THAT(set2, ElementsAre(1, 2, 3)); EXPECT_EQ(bytes_used, original_bytes_used); } TEST(Btree, AllocMoveConstructor_DifferentAlloc) { using Alloc = CountingAllocator; using Set = absl::btree_set, Alloc>; int64_t bytes_used1 = 0; Alloc alloc1(&bytes_used1); Set set1(alloc1); set1.insert({1, 2, 3}); const int64_t original_bytes_used = bytes_used1; EXPECT_GT(original_bytes_used, set1.size() * sizeof(int)); int64_t bytes_used2 = 0; Alloc alloc2(&bytes_used2); Set set2(std::move(set1), alloc2); EXPECT_THAT(set2, ElementsAre(1, 2, 3)); // We didn't free these bytes allocated by `set1` yet. EXPECT_EQ(bytes_used1, original_bytes_used); EXPECT_EQ(bytes_used2, original_bytes_used); } bool IntCmp(const int a, const int b) { return a < b; } TEST(Btree, SupportsFunctionPtrComparator) { absl::btree_set set(IntCmp); set.insert({1, 2, 3}); EXPECT_THAT(set, ElementsAre(1, 2, 3)); EXPECT_TRUE(set.key_comp()(1, 2)); EXPECT_TRUE(set.value_comp()(1, 2)); absl::btree_map map(&IntCmp); map[1] = 1; EXPECT_THAT(map, ElementsAre(Pair(1, 1))); EXPECT_TRUE(map.key_comp()(1, 2)); EXPECT_TRUE(map.value_comp()(std::make_pair(1, 1), std::make_pair(2, 2))); } template struct TransparentPassThroughComp { using is_transparent = void; // This will fail compilation if we attempt a comparison that Compare does not // support, and the failure will happen inside the function implementation so // it can't be avoided by using SFINAE on this comparator. template bool operator()(const T &lhs, const U &rhs) const { return Compare()(lhs, rhs); } }; TEST(Btree, SupportsTransparentComparatorThatDoesNotImplementAllVisibleOperators) { absl::btree_set> set; set.insert(MultiKey{1, 2}); EXPECT_TRUE(set.contains(1)); } TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) { absl::btree_set set = {{}, MultiKeyComp{}}; } #ifndef NDEBUG TEST(Btree, InvalidComparatorsCaught) { { struct ZeroAlwaysLessCmp { bool operator()(int lhs, int rhs) const { if (lhs == 0) return true; return lhs < rhs; } }; absl::btree_set set; EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); } { struct ThreeWayAlwaysLessCmp { absl::weak_ordering operator()(int, int) const { return absl::weak_ordering::less; } }; absl::btree_set set; EXPECT_DEATH(set.insert({0, 1, 2}), "is_self_equivalent"); } { struct SumGreaterZeroCmp { bool operator()(int lhs, int rhs) const { // First, do equivalence correctly - so we can test later condition. if (lhs == rhs) return false; return lhs + rhs > 0; } }; absl::btree_set set; // Note: '!' only needs to be escaped when it's the first character. EXPECT_DEATH(set.insert({0, 1, 2}), R"regex(\!lhs_comp_rhs \|\| !comp\(\)\(rhs, lhs\))regex"); } { struct ThreeWaySumGreaterZeroCmp { absl::weak_ordering operator()(int lhs, int rhs) const { // First, do equivalence correctly - so we can test later condition. if (lhs == rhs) return absl::weak_ordering::equivalent; if (lhs + rhs > 0) return absl::weak_ordering::less; if (lhs + rhs == 0) return absl::weak_ordering::equivalent; return absl::weak_ordering::greater; } }; absl::btree_set set; EXPECT_DEATH(set.insert({0, 1, 2}), "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); } } #endif #ifndef _MSC_VER // This test crashes on MSVC. TEST(Btree, InvalidIteratorUse) { if (!BtreeNodePeer::UsesGenerations>()) GTEST_SKIP() << "Generation validation for iterators is disabled."; { absl::btree_set set; for (int i = 0; i < 10; ++i) set.insert(i); auto it = set.begin(); set.erase(it++); EXPECT_DEATH(set.erase(it++), "invalidated iterator"); } { absl::btree_set set; for (int i = 0; i < 10; ++i) set.insert(i); auto it = set.insert(20).first; set.insert(30); EXPECT_DEATH(*it, "invalidated iterator"); } { absl::btree_set set; for (int i = 0; i < 10000; ++i) set.insert(i); auto it = set.find(5000); ASSERT_NE(it, set.end()); set.erase(1); EXPECT_DEATH(*it, "invalidated iterator"); } } #endif class OnlyConstructibleByAllocator { explicit OnlyConstructibleByAllocator(int i) : i_(i) {} public: OnlyConstructibleByAllocator(const OnlyConstructibleByAllocator &other) : i_(other.i_) {} OnlyConstructibleByAllocator &operator=( const OnlyConstructibleByAllocator &other) { i_ = other.i_; return *this; } int Get() const { return i_; } bool operator==(int i) const { return i_ == i; } private: template friend class OnlyConstructibleAllocator; int i_; }; template class OnlyConstructibleAllocator : public std::allocator { public: OnlyConstructibleAllocator() = default; template explicit OnlyConstructibleAllocator(const OnlyConstructibleAllocator &) {} void construct(OnlyConstructibleByAllocator *p, int i) { new (p) OnlyConstructibleByAllocator(i); } template void construct(Pair *p, const int i) { OnlyConstructibleByAllocator only(i); new (p) Pair(std::move(only), i); } template struct rebind { using other = OnlyConstructibleAllocator; }; }; struct OnlyConstructibleByAllocatorComp { using is_transparent = void; bool operator()(OnlyConstructibleByAllocator a, OnlyConstructibleByAllocator b) const { return a.Get() < b.Get(); } bool operator()(int a, OnlyConstructibleByAllocator b) const { return a < b.Get(); } bool operator()(OnlyConstructibleByAllocator a, int b) const { return a.Get() < b; } }; TEST(Btree, OnlyConstructibleByAllocatorType) { const std::array arr = {3, 4}; { absl::btree_set> set; set.emplace(1); set.emplace_hint(set.end(), 2); set.insert(arr.begin(), arr.end()); EXPECT_THAT(set, ElementsAre(1, 2, 3, 4)); } { absl::btree_multiset> set; set.emplace(1); set.emplace_hint(set.end(), 2); // TODO(ezb): fix insert_multi to allow this to compile. // set.insert(arr.begin(), arr.end()); EXPECT_THAT(set, ElementsAre(1, 2)); } { absl::btree_map> map; map.emplace(1); map.emplace_hint(map.end(), 2); map.insert(arr.begin(), arr.end()); EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4))); } { absl::btree_multimap> map; map.emplace(1); map.emplace_hint(map.end(), 2); // TODO(ezb): fix insert_multi to allow this to compile. // map.insert(arr.begin(), arr.end()); EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2))); } } class NotAssignable { public: explicit NotAssignable(int i) : i_(i) {} NotAssignable(const NotAssignable &other) : i_(other.i_) {} NotAssignable &operator=(NotAssignable &&other) = delete; int Get() const { return i_; } bool operator==(int i) const { return i_ == i; } friend bool operator<(NotAssignable a, NotAssignable b) { return a.i_ < b.i_; } private: int i_; }; TEST(Btree, NotAssignableType) { { absl::btree_set set; set.emplace(1); set.emplace_hint(set.end(), 2); set.insert(NotAssignable(3)); set.insert(set.end(), NotAssignable(4)); EXPECT_THAT(set, ElementsAre(1, 2, 3, 4)); set.erase(set.begin()); EXPECT_THAT(set, ElementsAre(2, 3, 4)); } { absl::btree_multiset set; set.emplace(1); set.emplace_hint(set.end(), 2); set.insert(NotAssignable(2)); set.insert(set.end(), NotAssignable(3)); EXPECT_THAT(set, ElementsAre(1, 2, 2, 3)); set.erase(set.begin()); EXPECT_THAT(set, ElementsAre(2, 2, 3)); } { absl::btree_map map; map.emplace(NotAssignable(1), 1); map.emplace_hint(map.end(), NotAssignable(2), 2); map.insert({NotAssignable(3), 3}); map.insert(map.end(), {NotAssignable(4), 4}); EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4))); map.erase(map.begin()); EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(3, 3), Pair(4, 4))); } { absl::btree_multimap map; map.emplace(NotAssignable(1), 1); map.emplace_hint(map.end(), NotAssignable(2), 2); map.insert({NotAssignable(2), 3}); map.insert(map.end(), {NotAssignable(3), 3}); EXPECT_THAT(map, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(2, 3), Pair(3, 3))); map.erase(map.begin()); EXPECT_THAT(map, ElementsAre(Pair(2, 2), Pair(2, 3), Pair(3, 3))); } } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/btree_test.h000066400000000000000000000110261430371345100205670ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_BTREE_TEST_H_ #define ABSL_CONTAINER_BTREE_TEST_H_ #include #include #include #include #include #include #include "absl/container/btree_map.h" #include "absl/container/btree_set.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/cord.h" #include "absl/time/time.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // Like remove_const but propagates the removal through std::pair. template struct remove_pair_const { using type = typename std::remove_const::type; }; template struct remove_pair_const > { using type = std::pair::type, typename remove_pair_const::type>; }; // Utility class to provide an accessor for a key given a value. The default // behavior is to treat the value as a pair and return the first element. template struct KeyOfValue { struct type { const K& operator()(const V& p) const { return p.first; } }; }; // Partial specialization of KeyOfValue class for when the key and value are // the same type such as in set<> and btree_set<>. template struct KeyOfValue { struct type { const K& operator()(const K& k) const { return k; } }; }; inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) { assert(val <= maxval); constexpr unsigned kBase = 64; // avoid integer division. unsigned p = 15; buf[p--] = 0; while (maxval > 0) { buf[p--] = ' ' + (val % kBase); val /= kBase; maxval /= kBase; } return buf + p + 1; } template struct Generator { int maxval; explicit Generator(int m) : maxval(m) {} K operator()(int i) const { assert(i <= maxval); return K(i); } }; template <> struct Generator { int maxval; explicit Generator(int m) : maxval(m) {} absl::Time operator()(int i) const { return absl::FromUnixMillis(i); } }; template <> struct Generator { int maxval; explicit Generator(int m) : maxval(m) {} std::string operator()(int i) const { char buf[16]; return GenerateDigits(buf, i, maxval); } }; template <> struct Generator { int maxval; explicit Generator(int m) : maxval(m) {} Cord operator()(int i) const { char buf[16]; return Cord(GenerateDigits(buf, i, maxval)); } }; template struct Generator > { Generator::type> tgen; Generator::type> ugen; explicit Generator(int m) : tgen(m), ugen(m) {} std::pair operator()(int i) const { return std::make_pair(tgen(i), ugen(i)); } }; // Generate n values for our tests and benchmarks. Value range is [0, maxval]. inline std::vector GenerateNumbersWithSeed(int n, int maxval, int seed) { // NOTE: Some tests rely on generated numbers not changing between test runs. // We use std::minstd_rand0 because it is well-defined, but don't use // std::uniform_int_distribution because platforms use different algorithms. std::minstd_rand0 rng(seed); std::vector values; absl::flat_hash_set unique_values; if (values.size() < n) { for (int i = values.size(); i < n; i++) { int value; do { value = static_cast(rng()) % (maxval + 1); } while (!unique_values.insert(value).second); values.push_back(value); } } return values; } // Generates n values in the range [0, maxval]. template std::vector GenerateValuesWithSeed(int n, int maxval, int seed) { const std::vector nums = GenerateNumbersWithSeed(n, maxval, seed); Generator gen(maxval); std::vector vec; vec.reserve(n); for (int i = 0; i < n; i++) { vec.push_back(gen(nums[i])); } return vec; } } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_BTREE_TEST_H_ abseil-20220623.1/absl/container/fixed_array.h000066400000000000000000000456131430371345100207350ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: fixed_array.h // ----------------------------------------------------------------------------- // // A `FixedArray` represents a non-resizable array of `T` where the length of // the array can be determined at run-time. It is a good replacement for // non-standard and deprecated uses of `alloca()` and variable length arrays // within the GCC extension. (See // https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html). // // `FixedArray` allocates small arrays inline, keeping performance fast by // avoiding heap operations. It also helps reduce the chances of // accidentally overflowing your stack if large input is passed to // your function. #ifndef ABSL_CONTAINER_FIXED_ARRAY_H_ #define ABSL_CONTAINER_FIXED_ARRAY_H_ #include #include #include #include #include #include #include #include #include #include "absl/algorithm/algorithm.h" #include "absl/base/config.h" #include "absl/base/dynamic_annotations.h" #include "absl/base/internal/throw_delegate.h" #include "absl/base/macros.h" #include "absl/base/optimization.h" #include "absl/base/port.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" namespace absl { ABSL_NAMESPACE_BEGIN constexpr static auto kFixedArrayUseDefault = static_cast(-1); // ----------------------------------------------------------------------------- // FixedArray // ----------------------------------------------------------------------------- // // A `FixedArray` provides a run-time fixed-size array, allocating a small array // inline for efficiency. // // Most users should not specify an `inline_elements` argument and let // `FixedArray` automatically determine the number of elements // to store inline based on `sizeof(T)`. If `inline_elements` is specified, the // `FixedArray` implementation will use inline storage for arrays with a // length <= `inline_elements`. // // Note that a `FixedArray` constructed with a `size_type` argument will // default-initialize its values by leaving trivially constructible types // uninitialized (e.g. int, int[4], double), and others default-constructed. // This matches the behavior of c-style arrays and `std::array`, but not // `std::vector`. template > class FixedArray { static_assert(!std::is_array::value || std::extent::value > 0, "Arrays with unknown bounds cannot be used with FixedArray."); static constexpr size_t kInlineBytesDefault = 256; using AllocatorTraits = std::allocator_traits; // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17, // but this seems to be mostly pedantic. template using EnableIfForwardIterator = absl::enable_if_t::iterator_category, std::forward_iterator_tag>::value>; static constexpr bool NoexceptCopyable() { return std::is_nothrow_copy_constructible::value && absl::allocator_is_nothrow::value; } static constexpr bool NoexceptMovable() { return std::is_nothrow_move_constructible::value && absl::allocator_is_nothrow::value; } static constexpr bool DefaultConstructorIsNonTrivial() { return !absl::is_trivially_default_constructible::value; } public: using allocator_type = typename AllocatorTraits::allocator_type; using value_type = typename AllocatorTraits::value_type; using pointer = typename AllocatorTraits::pointer; using const_pointer = typename AllocatorTraits::const_pointer; using reference = value_type&; using const_reference = const value_type&; using size_type = typename AllocatorTraits::size_type; using difference_type = typename AllocatorTraits::difference_type; using iterator = pointer; using const_iterator = const_pointer; using reverse_iterator = std::reverse_iterator; using const_reverse_iterator = std::reverse_iterator; static constexpr size_type inline_elements = (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type) : static_cast(N)); FixedArray( const FixedArray& other, const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable()) : FixedArray(other.begin(), other.end(), a) {} FixedArray( FixedArray&& other, const allocator_type& a = allocator_type()) noexcept(NoexceptMovable()) : FixedArray(std::make_move_iterator(other.begin()), std::make_move_iterator(other.end()), a) {} // Creates an array object that can store `n` elements. // Note that trivially constructible elements will be uninitialized. explicit FixedArray(size_type n, const allocator_type& a = allocator_type()) : storage_(n, a) { if (DefaultConstructorIsNonTrivial()) { memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), storage_.end()); } } // Creates an array initialized with `n` copies of `val`. FixedArray(size_type n, const value_type& val, const allocator_type& a = allocator_type()) : storage_(n, a) { memory_internal::ConstructRange(storage_.alloc(), storage_.begin(), storage_.end(), val); } // Creates an array initialized with the size and contents of `init_list`. FixedArray(std::initializer_list init_list, const allocator_type& a = allocator_type()) : FixedArray(init_list.begin(), init_list.end(), a) {} // Creates an array initialized with the elements from the input // range. The array's size will always be `std::distance(first, last)`. // REQUIRES: Iterator must be a forward_iterator or better. template * = nullptr> FixedArray(Iterator first, Iterator last, const allocator_type& a = allocator_type()) : storage_(std::distance(first, last), a) { memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last); } ~FixedArray() noexcept { for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) { AllocatorTraits::destroy(storage_.alloc(), cur); } } // Assignments are deleted because they break the invariant that the size of a // `FixedArray` never changes. void operator=(FixedArray&&) = delete; void operator=(const FixedArray&) = delete; // FixedArray::size() // // Returns the length of the fixed array. size_type size() const { return storage_.size(); } // FixedArray::max_size() // // Returns the largest possible value of `std::distance(begin(), end())` for a // `FixedArray`. This is equivalent to the most possible addressable bytes // over the number of bytes taken by T. constexpr size_type max_size() const { return (std::numeric_limits::max)() / sizeof(value_type); } // FixedArray::empty() // // Returns whether or not the fixed array is empty. bool empty() const { return size() == 0; } // FixedArray::memsize() // // Returns the memory size of the fixed array in bytes. size_t memsize() const { return size() * sizeof(value_type); } // FixedArray::data() // // Returns a const T* pointer to elements of the `FixedArray`. This pointer // can be used to access (but not modify) the contained elements. const_pointer data() const { return AsValueType(storage_.begin()); } // Overload of FixedArray::data() to return a T* pointer to elements of the // fixed array. This pointer can be used to access and modify the contained // elements. pointer data() { return AsValueType(storage_.begin()); } // FixedArray::operator[] // // Returns a reference the ith element of the fixed array. // REQUIRES: 0 <= i < size() reference operator[](size_type i) { ABSL_HARDENING_ASSERT(i < size()); return data()[i]; } // Overload of FixedArray::operator()[] to return a const reference to the // ith element of the fixed array. // REQUIRES: 0 <= i < size() const_reference operator[](size_type i) const { ABSL_HARDENING_ASSERT(i < size()); return data()[i]; } // FixedArray::at // // Bounds-checked access. Returns a reference to the ith element of the fixed // array, or throws std::out_of_range reference at(size_type i) { if (ABSL_PREDICT_FALSE(i >= size())) { base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); } return data()[i]; } // Overload of FixedArray::at() to return a const reference to the ith element // of the fixed array. const_reference at(size_type i) const { if (ABSL_PREDICT_FALSE(i >= size())) { base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check"); } return data()[i]; } // FixedArray::front() // // Returns a reference to the first element of the fixed array. reference front() { ABSL_HARDENING_ASSERT(!empty()); return data()[0]; } // Overload of FixedArray::front() to return a reference to the first element // of a fixed array of const values. const_reference front() const { ABSL_HARDENING_ASSERT(!empty()); return data()[0]; } // FixedArray::back() // // Returns a reference to the last element of the fixed array. reference back() { ABSL_HARDENING_ASSERT(!empty()); return data()[size() - 1]; } // Overload of FixedArray::back() to return a reference to the last element // of a fixed array of const values. const_reference back() const { ABSL_HARDENING_ASSERT(!empty()); return data()[size() - 1]; } // FixedArray::begin() // // Returns an iterator to the beginning of the fixed array. iterator begin() { return data(); } // Overload of FixedArray::begin() to return a const iterator to the // beginning of the fixed array. const_iterator begin() const { return data(); } // FixedArray::cbegin() // // Returns a const iterator to the beginning of the fixed array. const_iterator cbegin() const { return begin(); } // FixedArray::end() // // Returns an iterator to the end of the fixed array. iterator end() { return data() + size(); } // Overload of FixedArray::end() to return a const iterator to the end of the // fixed array. const_iterator end() const { return data() + size(); } // FixedArray::cend() // // Returns a const iterator to the end of the fixed array. const_iterator cend() const { return end(); } // FixedArray::rbegin() // // Returns a reverse iterator from the end of the fixed array. reverse_iterator rbegin() { return reverse_iterator(end()); } // Overload of FixedArray::rbegin() to return a const reverse iterator from // the end of the fixed array. const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } // FixedArray::crbegin() // // Returns a const reverse iterator from the end of the fixed array. const_reverse_iterator crbegin() const { return rbegin(); } // FixedArray::rend() // // Returns a reverse iterator from the beginning of the fixed array. reverse_iterator rend() { return reverse_iterator(begin()); } // Overload of FixedArray::rend() for returning a const reverse iterator // from the beginning of the fixed array. const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } // FixedArray::crend() // // Returns a reverse iterator from the beginning of the fixed array. const_reverse_iterator crend() const { return rend(); } // FixedArray::fill() // // Assigns the given `value` to all elements in the fixed array. void fill(const value_type& val) { std::fill(begin(), end(), val); } // Relational operators. Equality operators are elementwise using // `operator==`, while order operators order FixedArrays lexicographically. friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) { return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); } friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) { return !(lhs == rhs); } friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) { return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(), rhs.end()); } friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) { return rhs < lhs; } friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) { return !(rhs < lhs); } friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) { return !(lhs < rhs); } template friend H AbslHashValue(H h, const FixedArray& v) { return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()), v.size()); } private: // StorageElement // // For FixedArrays with a C-style-array value_type, StorageElement is a POD // wrapper struct called StorageElementWrapper that holds the value_type // instance inside. This is needed for construction and destruction of the // entire array regardless of how many dimensions it has. For all other cases, // StorageElement is just an alias of value_type. // // Maintainer's Note: The simpler solution would be to simply wrap value_type // in a struct whether it's an array or not. That causes some paranoid // diagnostics to misfire, believing that 'data()' returns a pointer to a // single element, rather than the packed array that it really is. // e.g.: // // FixedArray buf(1); // sprintf(buf.data(), "foo"); // // error: call to int __builtin___sprintf_chk(etc...) // will always overflow destination buffer [-Werror] // template , size_t InnerN = std::extent::value> struct StorageElementWrapper { InnerT array[InnerN]; }; using StorageElement = absl::conditional_t::value, StorageElementWrapper, value_type>; static pointer AsValueType(pointer ptr) { return ptr; } static pointer AsValueType(StorageElementWrapper* ptr) { return std::addressof(ptr->array); } static_assert(sizeof(StorageElement) == sizeof(value_type), ""); static_assert(alignof(StorageElement) == alignof(value_type), ""); class NonEmptyInlinedStorage { public: StorageElement* data() { return reinterpret_cast(buff_); } void AnnotateConstruct(size_type n); void AnnotateDestruct(size_type n); #ifdef ABSL_HAVE_ADDRESS_SANITIZER void* RedzoneBegin() { return &redzone_begin_; } void* RedzoneEnd() { return &redzone_end_ + 1; } #endif // ABSL_HAVE_ADDRESS_SANITIZER private: ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_); alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])]; ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_); }; class EmptyInlinedStorage { public: StorageElement* data() { return nullptr; } void AnnotateConstruct(size_type) {} void AnnotateDestruct(size_type) {} }; using InlinedStorage = absl::conditional_t; // Storage // // An instance of Storage manages the inline and out-of-line memory for // instances of FixedArray. This guarantees that even when construction of // individual elements fails in the FixedArray constructor body, the // destructor for Storage will still be called and out-of-line memory will be // properly deallocated. // class Storage : public InlinedStorage { public: Storage(size_type n, const allocator_type& a) : size_alloc_(n, a), data_(InitializeData()) {} ~Storage() noexcept { if (UsingInlinedStorage(size())) { InlinedStorage::AnnotateDestruct(size()); } else { AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size()); } } size_type size() const { return size_alloc_.template get<0>(); } StorageElement* begin() const { return data_; } StorageElement* end() const { return begin() + size(); } allocator_type& alloc() { return size_alloc_.template get<1>(); } private: static bool UsingInlinedStorage(size_type n) { return n <= inline_elements; } StorageElement* InitializeData() { if (UsingInlinedStorage(size())) { InlinedStorage::AnnotateConstruct(size()); return InlinedStorage::data(); } else { return reinterpret_cast( AllocatorTraits::allocate(alloc(), size())); } } // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s container_internal::CompressedTuple size_alloc_; StorageElement* data_; }; Storage storage_; }; #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL template constexpr size_t FixedArray::kInlineBytesDefault; template constexpr typename FixedArray::size_type FixedArray::inline_elements; #endif template void FixedArray::NonEmptyInlinedStorage::AnnotateConstruct( typename FixedArray::size_type n) { #ifdef ABSL_HAVE_ADDRESS_SANITIZER if (!n) return; ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n); ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin()); #endif // ABSL_HAVE_ADDRESS_SANITIZER static_cast(n); // Mark used when not in asan mode } template void FixedArray::NonEmptyInlinedStorage::AnnotateDestruct( typename FixedArray::size_type n) { #ifdef ABSL_HAVE_ADDRESS_SANITIZER if (!n) return; ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd()); ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data()); #endif // ABSL_HAVE_ADDRESS_SANITIZER static_cast(n); // Mark used when not in asan mode } ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_FIXED_ARRAY_H_ abseil-20220623.1/absl/container/fixed_array_benchmark.cc000066400000000000000000000047371430371345100231070ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include "benchmark/benchmark.h" #include "absl/container/fixed_array.h" namespace { // For benchmarking -- simple class with constructor and destructor that // set an int to a constant.. class SimpleClass { public: SimpleClass() : i(3) {} ~SimpleClass() { i = 0; } private: int i; }; template void BM_FixedArray(benchmark::State& state) { const int size = state.range(0); for (auto _ : state) { absl::FixedArray fa(size); benchmark::DoNotOptimize(fa.data()); } } BENCHMARK_TEMPLATE(BM_FixedArray, char, absl::kFixedArrayUseDefault) ->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, char, 0)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, char, 1)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, char, 16)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, char, 256)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, char, 65536)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, absl::kFixedArrayUseDefault) ->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 0)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 1)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 16)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 256)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 65536)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, std::string, absl::kFixedArrayUseDefault) ->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 0)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 1)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 16)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 256)->Range(0, 1 << 16); BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 65536)->Range(0, 1 << 16); } // namespace abseil-20220623.1/absl/container/fixed_array_exception_safety_test.cc000066400000000000000000000155221430371345100255570ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/base/config.h" #include "absl/container/fixed_array.h" #ifdef ABSL_HAVE_EXCEPTIONS #include #include "gtest/gtest.h" #include "absl/base/internal/exception_safety_testing.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace { constexpr size_t kInlined = 25; constexpr size_t kSmallSize = kInlined / 2; constexpr size_t kLargeSize = kInlined * 2; constexpr int kInitialValue = 5; constexpr int kUpdatedValue = 10; using ::testing::TestThrowingCtor; using Thrower = testing::ThrowingValue; using ThrowAlloc = testing::ThrowingAllocator; using MoveThrower = testing::ThrowingValue; using MoveThrowAlloc = testing::ThrowingAllocator; using FixedArr = absl::FixedArray; using FixedArrWithAlloc = absl::FixedArray; using MoveFixedArr = absl::FixedArray; using MoveFixedArrWithAlloc = absl::FixedArray; TEST(FixedArrayExceptionSafety, CopyConstructor) { auto small = FixedArr(kSmallSize); TestThrowingCtor(small); auto large = FixedArr(kLargeSize); TestThrowingCtor(large); } TEST(FixedArrayExceptionSafety, CopyConstructorWithAlloc) { auto small = FixedArrWithAlloc(kSmallSize); TestThrowingCtor(small); auto large = FixedArrWithAlloc(kLargeSize); TestThrowingCtor(large); } TEST(FixedArrayExceptionSafety, MoveConstructor) { TestThrowingCtor(FixedArr(kSmallSize)); TestThrowingCtor(FixedArr(kLargeSize)); // TypeSpec::kNoThrowMove TestThrowingCtor(MoveFixedArr(kSmallSize)); TestThrowingCtor(MoveFixedArr(kLargeSize)); } TEST(FixedArrayExceptionSafety, MoveConstructorWithAlloc) { TestThrowingCtor(FixedArrWithAlloc(kSmallSize)); TestThrowingCtor(FixedArrWithAlloc(kLargeSize)); // TypeSpec::kNoThrowMove TestThrowingCtor(MoveFixedArrWithAlloc(kSmallSize)); TestThrowingCtor(MoveFixedArrWithAlloc(kLargeSize)); } TEST(FixedArrayExceptionSafety, SizeConstructor) { TestThrowingCtor(kSmallSize); TestThrowingCtor(kLargeSize); } TEST(FixedArrayExceptionSafety, SizeConstructorWithAlloc) { TestThrowingCtor(kSmallSize); TestThrowingCtor(kLargeSize); } TEST(FixedArrayExceptionSafety, SizeValueConstructor) { TestThrowingCtor(kSmallSize, Thrower()); TestThrowingCtor(kLargeSize, Thrower()); } TEST(FixedArrayExceptionSafety, SizeValueConstructorWithAlloc) { TestThrowingCtor(kSmallSize, Thrower()); TestThrowingCtor(kLargeSize, Thrower()); } TEST(FixedArrayExceptionSafety, IteratorConstructor) { auto small = FixedArr(kSmallSize); TestThrowingCtor(small.begin(), small.end()); auto large = FixedArr(kLargeSize); TestThrowingCtor(large.begin(), large.end()); } TEST(FixedArrayExceptionSafety, IteratorConstructorWithAlloc) { auto small = FixedArrWithAlloc(kSmallSize); TestThrowingCtor(small.begin(), small.end()); auto large = FixedArrWithAlloc(kLargeSize); TestThrowingCtor(large.begin(), large.end()); } TEST(FixedArrayExceptionSafety, InitListConstructor) { constexpr int small_inlined = 3; using SmallFixedArr = absl::FixedArray; TestThrowingCtor(std::initializer_list{}); // Test inlined allocation TestThrowingCtor( std::initializer_list{Thrower{}, Thrower{}}); // Test out of line allocation TestThrowingCtor(std::initializer_list{ Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}}); } TEST(FixedArrayExceptionSafety, InitListConstructorWithAlloc) { constexpr int small_inlined = 3; using SmallFixedArrWithAlloc = absl::FixedArray; TestThrowingCtor(std::initializer_list{}); // Test inlined allocation TestThrowingCtor( std::initializer_list{Thrower{}, Thrower{}}); // Test out of line allocation TestThrowingCtor(std::initializer_list{ Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}}); } template testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) { int sum = 0; for (const auto& thrower : *fixed_arr) { sum += thrower.Get(); } return testing::AssertionSuccess() << "Values sum to [" << sum << "]"; } TEST(FixedArrayExceptionSafety, Fill) { auto test_fill = testing::MakeExceptionSafetyTester() .WithContracts(ReadMemory) .WithOperation([&](FixedArr* fixed_arr_ptr) { auto thrower = Thrower(kUpdatedValue, testing::nothrow_ctor); fixed_arr_ptr->fill(thrower); }); EXPECT_TRUE( test_fill.WithInitialValue(FixedArr(kSmallSize, Thrower(kInitialValue))) .Test()); EXPECT_TRUE( test_fill.WithInitialValue(FixedArr(kLargeSize, Thrower(kInitialValue))) .Test()); } TEST(FixedArrayExceptionSafety, FillWithAlloc) { auto test_fill = testing::MakeExceptionSafetyTester() .WithContracts(ReadMemory) .WithOperation([&](FixedArrWithAlloc* fixed_arr_ptr) { auto thrower = Thrower(kUpdatedValue, testing::nothrow_ctor); fixed_arr_ptr->fill(thrower); }); EXPECT_TRUE(test_fill .WithInitialValue( FixedArrWithAlloc(kSmallSize, Thrower(kInitialValue))) .Test()); EXPECT_TRUE(test_fill .WithInitialValue( FixedArrWithAlloc(kLargeSize, Thrower(kInitialValue))) .Test()); } } // namespace ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_HAVE_EXCEPTIONS abseil-20220623.1/absl/container/fixed_array_test.cc000066400000000000000000000610521430371345100221250ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/fixed_array.h" #include #include #include #include #include #include #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/config.h" #include "absl/base/internal/exception_testing.h" #include "absl/base/options.h" #include "absl/container/internal/counting_allocator.h" #include "absl/hash/hash_testing.h" #include "absl/memory/memory.h" using ::testing::ElementsAreArray; namespace { // Helper routine to determine if a absl::FixedArray used stack allocation. template static bool IsOnStack(const ArrayType& a) { return a.size() <= ArrayType::inline_elements; } class ConstructionTester { public: ConstructionTester() : self_ptr_(this), value_(0) { constructions++; } ~ConstructionTester() { assert(self_ptr_ == this); self_ptr_ = nullptr; destructions++; } // These are incremented as elements are constructed and destructed so we can // be sure all elements are properly cleaned up. static int constructions; static int destructions; void CheckConstructed() { assert(self_ptr_ == this); } void set(int value) { value_ = value; } int get() { return value_; } private: // self_ptr_ should always point to 'this' -- that's how we can be sure the // constructor has been called. ConstructionTester* self_ptr_; int value_; }; int ConstructionTester::constructions = 0; int ConstructionTester::destructions = 0; // ThreeInts will initialize its three ints to the value stored in // ThreeInts::counter. The constructor increments counter so that each object // in an array of ThreeInts will have different values. class ThreeInts { public: ThreeInts() { x_ = counter; y_ = counter; z_ = counter; ++counter; } static int counter; int x_, y_, z_; }; int ThreeInts::counter = 0; TEST(FixedArrayTest, CopyCtor) { absl::FixedArray on_stack(5); std::iota(on_stack.begin(), on_stack.end(), 0); absl::FixedArray stack_copy = on_stack; EXPECT_THAT(stack_copy, ElementsAreArray(on_stack)); EXPECT_TRUE(IsOnStack(stack_copy)); absl::FixedArray allocated(15); std::iota(allocated.begin(), allocated.end(), 0); absl::FixedArray alloced_copy = allocated; EXPECT_THAT(alloced_copy, ElementsAreArray(allocated)); EXPECT_FALSE(IsOnStack(alloced_copy)); } TEST(FixedArrayTest, MoveCtor) { absl::FixedArray, 10> on_stack(5); for (int i = 0; i < 5; ++i) { on_stack[i] = absl::make_unique(i); } absl::FixedArray, 10> stack_copy = std::move(on_stack); for (int i = 0; i < 5; ++i) EXPECT_EQ(*(stack_copy[i]), i); EXPECT_EQ(stack_copy.size(), on_stack.size()); absl::FixedArray, 10> allocated(15); for (int i = 0; i < 15; ++i) { allocated[i] = absl::make_unique(i); } absl::FixedArray, 10> alloced_copy = std::move(allocated); for (int i = 0; i < 15; ++i) EXPECT_EQ(*(alloced_copy[i]), i); EXPECT_EQ(allocated.size(), alloced_copy.size()); } TEST(FixedArrayTest, SmallObjects) { // Small object arrays { // Short arrays should be on the stack absl::FixedArray array(4); EXPECT_TRUE(IsOnStack(array)); } { // Large arrays should be on the heap absl::FixedArray array(1048576); EXPECT_FALSE(IsOnStack(array)); } { // Arrays of <= default size should be on the stack absl::FixedArray array(100); EXPECT_TRUE(IsOnStack(array)); } { // Arrays of > default size should be on the heap absl::FixedArray array(101); EXPECT_FALSE(IsOnStack(array)); } { // Arrays with different size elements should use approximately // same amount of stack space absl::FixedArray array1(0); absl::FixedArray array2(0); EXPECT_LE(sizeof(array1), sizeof(array2) + 100); EXPECT_LE(sizeof(array2), sizeof(array1) + 100); } { // Ensure that vectors are properly constructed inside a fixed array. absl::FixedArray> array(2); EXPECT_EQ(0, array[0].size()); EXPECT_EQ(0, array[1].size()); } { // Regardless of absl::FixedArray implementation, check that a type with a // low alignment requirement and a non power-of-two size is initialized // correctly. ThreeInts::counter = 1; absl::FixedArray array(2); EXPECT_EQ(1, array[0].x_); EXPECT_EQ(1, array[0].y_); EXPECT_EQ(1, array[0].z_); EXPECT_EQ(2, array[1].x_); EXPECT_EQ(2, array[1].y_); EXPECT_EQ(2, array[1].z_); } } TEST(FixedArrayTest, AtThrows) { absl::FixedArray a = {1, 2, 3}; EXPECT_EQ(a.at(2), 3); ABSL_BASE_INTERNAL_EXPECT_FAIL(a.at(3), std::out_of_range, "failed bounds check"); } TEST(FixedArrayTest, Hardened) { #if !defined(NDEBUG) || ABSL_OPTION_HARDENED absl::FixedArray a = {1, 2, 3}; EXPECT_EQ(a[2], 3); EXPECT_DEATH_IF_SUPPORTED(a[3], ""); EXPECT_DEATH_IF_SUPPORTED(a[-1], ""); absl::FixedArray empty(0); EXPECT_DEATH_IF_SUPPORTED(empty[0], ""); EXPECT_DEATH_IF_SUPPORTED(empty[-1], ""); EXPECT_DEATH_IF_SUPPORTED(empty.front(), ""); EXPECT_DEATH_IF_SUPPORTED(empty.back(), ""); #endif } TEST(FixedArrayRelationalsTest, EqualArrays) { for (int i = 0; i < 10; ++i) { absl::FixedArray a1(i); std::iota(a1.begin(), a1.end(), 0); absl::FixedArray a2(a1.begin(), a1.end()); EXPECT_TRUE(a1 == a2); EXPECT_FALSE(a1 != a2); EXPECT_TRUE(a2 == a1); EXPECT_FALSE(a2 != a1); EXPECT_FALSE(a1 < a2); EXPECT_FALSE(a1 > a2); EXPECT_FALSE(a2 < a1); EXPECT_FALSE(a2 > a1); EXPECT_TRUE(a1 <= a2); EXPECT_TRUE(a1 >= a2); EXPECT_TRUE(a2 <= a1); EXPECT_TRUE(a2 >= a1); } } TEST(FixedArrayRelationalsTest, UnequalArrays) { for (int i = 1; i < 10; ++i) { absl::FixedArray a1(i); std::iota(a1.begin(), a1.end(), 0); absl::FixedArray a2(a1.begin(), a1.end()); --a2[i / 2]; EXPECT_FALSE(a1 == a2); EXPECT_TRUE(a1 != a2); EXPECT_FALSE(a2 == a1); EXPECT_TRUE(a2 != a1); EXPECT_FALSE(a1 < a2); EXPECT_TRUE(a1 > a2); EXPECT_TRUE(a2 < a1); EXPECT_FALSE(a2 > a1); EXPECT_FALSE(a1 <= a2); EXPECT_TRUE(a1 >= a2); EXPECT_TRUE(a2 <= a1); EXPECT_FALSE(a2 >= a1); } } template static void TestArray(int n) { SCOPED_TRACE(n); SCOPED_TRACE(stack_elements); ConstructionTester::constructions = 0; ConstructionTester::destructions = 0; { absl::FixedArray array(n); EXPECT_THAT(array.size(), n); EXPECT_THAT(array.memsize(), sizeof(ConstructionTester) * n); EXPECT_THAT(array.begin() + n, array.end()); // Check that all elements were constructed for (int i = 0; i < n; i++) { array[i].CheckConstructed(); } // Check that no other elements were constructed EXPECT_THAT(ConstructionTester::constructions, n); // Test operator[] for (int i = 0; i < n; i++) { array[i].set(i); } for (int i = 0; i < n; i++) { EXPECT_THAT(array[i].get(), i); EXPECT_THAT(array.data()[i].get(), i); } // Test data() for (int i = 0; i < n; i++) { array.data()[i].set(i + 1); } for (int i = 0; i < n; i++) { EXPECT_THAT(array[i].get(), i + 1); EXPECT_THAT(array.data()[i].get(), i + 1); } } // Close scope containing 'array'. // Check that all constructed elements were destructed. EXPECT_EQ(ConstructionTester::constructions, ConstructionTester::destructions); } template static void TestArrayOfArrays(int n) { SCOPED_TRACE(n); SCOPED_TRACE(inline_elements); SCOPED_TRACE(elements_per_inner_array); ConstructionTester::constructions = 0; ConstructionTester::destructions = 0; { using InnerArray = ConstructionTester[elements_per_inner_array]; // Heap-allocate the FixedArray to avoid blowing the stack frame. auto array_ptr = absl::make_unique>(n); auto& array = *array_ptr; ASSERT_EQ(array.size(), n); ASSERT_EQ(array.memsize(), sizeof(ConstructionTester) * elements_per_inner_array * n); ASSERT_EQ(array.begin() + n, array.end()); // Check that all elements were constructed for (int i = 0; i < n; i++) { for (int j = 0; j < elements_per_inner_array; j++) { (array[i])[j].CheckConstructed(); } } // Check that no other elements were constructed ASSERT_EQ(ConstructionTester::constructions, n * elements_per_inner_array); // Test operator[] for (int i = 0; i < n; i++) { for (int j = 0; j < elements_per_inner_array; j++) { (array[i])[j].set(i * elements_per_inner_array + j); } } for (int i = 0; i < n; i++) { for (int j = 0; j < elements_per_inner_array; j++) { ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j); ASSERT_EQ((array.data()[i])[j].get(), i * elements_per_inner_array + j); } } // Test data() for (int i = 0; i < n; i++) { for (int j = 0; j < elements_per_inner_array; j++) { (array.data()[i])[j].set((i + 1) * elements_per_inner_array + j); } } for (int i = 0; i < n; i++) { for (int j = 0; j < elements_per_inner_array; j++) { ASSERT_EQ((array[i])[j].get(), (i + 1) * elements_per_inner_array + j); ASSERT_EQ((array.data()[i])[j].get(), (i + 1) * elements_per_inner_array + j); } } } // Close scope containing 'array'. // Check that all constructed elements were destructed. EXPECT_EQ(ConstructionTester::constructions, ConstructionTester::destructions); } TEST(IteratorConstructorTest, NonInline) { int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; absl::FixedArray const fixed( kInput, kInput + ABSL_ARRAYSIZE(kInput)); ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { ASSERT_EQ(kInput[i], fixed[i]); } } TEST(IteratorConstructorTest, Inline) { int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; absl::FixedArray const fixed( kInput, kInput + ABSL_ARRAYSIZE(kInput)); ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { ASSERT_EQ(kInput[i], fixed[i]); } } TEST(IteratorConstructorTest, NonPod) { char const* kInput[] = {"red", "orange", "yellow", "green", "blue", "indigo", "violet"}; absl::FixedArray const fixed(kInput, kInput + ABSL_ARRAYSIZE(kInput)); ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size()); for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) { ASSERT_EQ(kInput[i], fixed[i]); } } TEST(IteratorConstructorTest, FromEmptyVector) { std::vector const empty; absl::FixedArray const fixed(empty.begin(), empty.end()); EXPECT_EQ(0, fixed.size()); EXPECT_EQ(empty.size(), fixed.size()); } TEST(IteratorConstructorTest, FromNonEmptyVector) { int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; std::vector const items(kInput, kInput + ABSL_ARRAYSIZE(kInput)); absl::FixedArray const fixed(items.begin(), items.end()); ASSERT_EQ(items.size(), fixed.size()); for (size_t i = 0; i < items.size(); ++i) { ASSERT_EQ(items[i], fixed[i]); } } TEST(IteratorConstructorTest, FromBidirectionalIteratorRange) { int const kInput[] = {2, 3, 5, 7, 11, 13, 17}; std::list const items(kInput, kInput + ABSL_ARRAYSIZE(kInput)); absl::FixedArray const fixed(items.begin(), items.end()); EXPECT_THAT(fixed, testing::ElementsAreArray(kInput)); } TEST(InitListConstructorTest, InitListConstruction) { absl::FixedArray fixed = {1, 2, 3}; EXPECT_THAT(fixed, testing::ElementsAreArray({1, 2, 3})); } TEST(FillConstructorTest, NonEmptyArrays) { absl::FixedArray stack_array(4, 1); EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1})); absl::FixedArray heap_array(4, 1); EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1})); } TEST(FillConstructorTest, EmptyArray) { absl::FixedArray empty_fill(0, 1); absl::FixedArray empty_size(0); EXPECT_EQ(empty_fill, empty_size); } TEST(FillConstructorTest, NotTriviallyCopyable) { std::string str = "abcd"; absl::FixedArray strings = {str, str, str, str}; absl::FixedArray array(4, str); EXPECT_EQ(array, strings); } TEST(FillConstructorTest, Disambiguation) { absl::FixedArray a(1, 2); EXPECT_THAT(a, testing::ElementsAre(2)); } TEST(FixedArrayTest, ManySizedArrays) { std::vector sizes; for (int i = 1; i < 100; i++) sizes.push_back(i); for (int i = 100; i <= 1000; i += 100) sizes.push_back(i); for (int n : sizes) { TestArray<0>(n); TestArray<1>(n); TestArray<64>(n); TestArray<1000>(n); } } TEST(FixedArrayTest, ManySizedArraysOfArraysOf1) { for (int n = 1; n < 1000; n++) { ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 0>(n))); ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1>(n))); ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 64>(n))); ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1000>(n))); } } TEST(FixedArrayTest, ManySizedArraysOfArraysOf2) { for (int n = 1; n < 1000; n++) { TestArrayOfArrays<2, 0>(n); TestArrayOfArrays<2, 1>(n); TestArrayOfArrays<2, 64>(n); TestArrayOfArrays<2, 1000>(n); } } // If value_type is put inside of a struct container, // we might evoke this error in a hardened build unless data() is carefully // written, so check on that. // error: call to int __builtin___sprintf_chk(etc...) // will always overflow destination buffer [-Werror] TEST(FixedArrayTest, AvoidParanoidDiagnostics) { absl::FixedArray buf(32); sprintf(buf.data(), "foo"); // NOLINT(runtime/printf) } TEST(FixedArrayTest, TooBigInlinedSpace) { struct TooBig { char c[1 << 20]; }; // too big for even one on the stack // Simulate the data members of absl::FixedArray, a pointer and a size_t. struct Data { TooBig* p; size_t size; }; // Make sure TooBig objects are not inlined for 0 or default size. static_assert(sizeof(absl::FixedArray) == sizeof(Data), "0-sized absl::FixedArray should have same size as Data."); static_assert(alignof(absl::FixedArray) == alignof(Data), "0-sized absl::FixedArray should have same alignment as Data."); static_assert(sizeof(absl::FixedArray) == sizeof(Data), "default-sized absl::FixedArray should have same size as Data"); static_assert( alignof(absl::FixedArray) == alignof(Data), "default-sized absl::FixedArray should have same alignment as Data."); } // PickyDelete EXPECTs its class-scope deallocation funcs are unused. struct PickyDelete { PickyDelete() {} ~PickyDelete() {} void operator delete(void* p) { EXPECT_TRUE(false) << __FUNCTION__; ::operator delete(p); } void operator delete[](void* p) { EXPECT_TRUE(false) << __FUNCTION__; ::operator delete[](p); } }; TEST(FixedArrayTest, UsesGlobalAlloc) { absl::FixedArray a(5); } TEST(FixedArrayTest, Data) { static const int kInput[] = {2, 3, 5, 7, 11, 13, 17}; absl::FixedArray fa(std::begin(kInput), std::end(kInput)); EXPECT_EQ(fa.data(), &*fa.begin()); EXPECT_EQ(fa.data(), &fa[0]); const absl::FixedArray& cfa = fa; EXPECT_EQ(cfa.data(), &*cfa.begin()); EXPECT_EQ(cfa.data(), &cfa[0]); } TEST(FixedArrayTest, Empty) { absl::FixedArray empty(0); absl::FixedArray inline_filled(1); absl::FixedArray heap_filled(1); EXPECT_TRUE(empty.empty()); EXPECT_FALSE(inline_filled.empty()); EXPECT_FALSE(heap_filled.empty()); } TEST(FixedArrayTest, FrontAndBack) { absl::FixedArray inlined = {1, 2, 3}; EXPECT_EQ(inlined.front(), 1); EXPECT_EQ(inlined.back(), 3); absl::FixedArray allocated = {1, 2, 3}; EXPECT_EQ(allocated.front(), 1); EXPECT_EQ(allocated.back(), 3); absl::FixedArray one_element = {1}; EXPECT_EQ(one_element.front(), one_element.back()); } TEST(FixedArrayTest, ReverseIteratorInlined) { absl::FixedArray a = {0, 1, 2, 3, 4}; int counter = 5; for (absl::FixedArray::reverse_iterator iter = a.rbegin(); iter != a.rend(); ++iter) { counter--; EXPECT_EQ(counter, *iter); } EXPECT_EQ(counter, 0); counter = 5; for (absl::FixedArray::const_reverse_iterator iter = a.rbegin(); iter != a.rend(); ++iter) { counter--; EXPECT_EQ(counter, *iter); } EXPECT_EQ(counter, 0); counter = 5; for (auto iter = a.crbegin(); iter != a.crend(); ++iter) { counter--; EXPECT_EQ(counter, *iter); } EXPECT_EQ(counter, 0); } TEST(FixedArrayTest, ReverseIteratorAllocated) { absl::FixedArray a = {0, 1, 2, 3, 4}; int counter = 5; for (absl::FixedArray::reverse_iterator iter = a.rbegin(); iter != a.rend(); ++iter) { counter--; EXPECT_EQ(counter, *iter); } EXPECT_EQ(counter, 0); counter = 5; for (absl::FixedArray::const_reverse_iterator iter = a.rbegin(); iter != a.rend(); ++iter) { counter--; EXPECT_EQ(counter, *iter); } EXPECT_EQ(counter, 0); counter = 5; for (auto iter = a.crbegin(); iter != a.crend(); ++iter) { counter--; EXPECT_EQ(counter, *iter); } EXPECT_EQ(counter, 0); } TEST(FixedArrayTest, Fill) { absl::FixedArray inlined(5); int fill_val = 42; inlined.fill(fill_val); for (int i : inlined) EXPECT_EQ(i, fill_val); absl::FixedArray allocated(5); allocated.fill(fill_val); for (int i : allocated) EXPECT_EQ(i, fill_val); // It doesn't do anything, just make sure this compiles. absl::FixedArray empty(0); empty.fill(fill_val); } #ifndef __GNUC__ TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) { using T = char; constexpr auto capacity = 10; using FixedArrType = absl::FixedArray; constexpr auto scrubbed_bits = 0x95; constexpr auto length = capacity / 2; alignas(FixedArrType) unsigned char buff[sizeof(FixedArrType)]; std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrType)); FixedArrType* arr = ::new (static_cast(std::addressof(buff))) FixedArrType(length); EXPECT_THAT(*arr, testing::Each(scrubbed_bits)); arr->~FixedArrType(); } #endif // __GNUC__ TEST(AllocatorSupportTest, CountInlineAllocations) { constexpr size_t inlined_size = 4; using Alloc = absl::container_internal::CountingAllocator; using AllocFxdArr = absl::FixedArray; int64_t allocated = 0; int64_t active_instances = 0; { const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; Alloc alloc(&allocated, &active_instances); AllocFxdArr arr(ia, ia + inlined_size, alloc); static_cast(arr); } EXPECT_EQ(allocated, 0); EXPECT_EQ(active_instances, 0); } TEST(AllocatorSupportTest, CountOutoflineAllocations) { constexpr size_t inlined_size = 4; using Alloc = absl::container_internal::CountingAllocator; using AllocFxdArr = absl::FixedArray; int64_t allocated = 0; int64_t active_instances = 0; { const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; Alloc alloc(&allocated, &active_instances); AllocFxdArr arr(ia, ia + ABSL_ARRAYSIZE(ia), alloc); EXPECT_EQ(allocated, arr.size() * sizeof(int)); static_cast(arr); } EXPECT_EQ(active_instances, 0); } TEST(AllocatorSupportTest, CountCopyInlineAllocations) { constexpr size_t inlined_size = 4; using Alloc = absl::container_internal::CountingAllocator; using AllocFxdArr = absl::FixedArray; int64_t allocated1 = 0; int64_t allocated2 = 0; int64_t active_instances = 0; Alloc alloc(&allocated1, &active_instances); Alloc alloc2(&allocated2, &active_instances); { int initial_value = 1; AllocFxdArr arr1(inlined_size / 2, initial_value, alloc); EXPECT_EQ(allocated1, 0); AllocFxdArr arr2(arr1, alloc2); EXPECT_EQ(allocated2, 0); static_cast(arr1); static_cast(arr2); } EXPECT_EQ(active_instances, 0); } TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) { constexpr size_t inlined_size = 4; using Alloc = absl::container_internal::CountingAllocator; using AllocFxdArr = absl::FixedArray; int64_t allocated1 = 0; int64_t allocated2 = 0; int64_t active_instances = 0; Alloc alloc(&allocated1, &active_instances); Alloc alloc2(&allocated2, &active_instances); { int initial_value = 1; AllocFxdArr arr1(inlined_size * 2, initial_value, alloc); EXPECT_EQ(allocated1, arr1.size() * sizeof(int)); AllocFxdArr arr2(arr1, alloc2); EXPECT_EQ(allocated2, inlined_size * 2 * sizeof(int)); static_cast(arr1); static_cast(arr2); } EXPECT_EQ(active_instances, 0); } TEST(AllocatorSupportTest, SizeValAllocConstructor) { using testing::AllOf; using testing::Each; using testing::SizeIs; constexpr size_t inlined_size = 4; using Alloc = absl::container_internal::CountingAllocator; using AllocFxdArr = absl::FixedArray; { auto len = inlined_size / 2; auto val = 0; int64_t allocated = 0; AllocFxdArr arr(len, val, Alloc(&allocated)); EXPECT_EQ(allocated, 0); EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); } { auto len = inlined_size * 2; auto val = 0; int64_t allocated = 0; AllocFxdArr arr(len, val, Alloc(&allocated)); EXPECT_EQ(allocated, len * sizeof(int)); EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0))); } } #ifdef ABSL_HAVE_ADDRESS_SANITIZER TEST(FixedArrayTest, AddressSanitizerAnnotations1) { absl::FixedArray a(10); int* raw = a.data(); raw[0] = 0; raw[9] = 0; EXPECT_DEATH_IF_SUPPORTED(raw[-2] = 0, "container-overflow"); EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow"); EXPECT_DEATH_IF_SUPPORTED(raw[10] = 0, "container-overflow"); EXPECT_DEATH_IF_SUPPORTED(raw[31] = 0, "container-overflow"); } TEST(FixedArrayTest, AddressSanitizerAnnotations2) { absl::FixedArray a(12); char* raw = a.data(); raw[0] = 0; raw[11] = 0; EXPECT_DEATH_IF_SUPPORTED(raw[-7] = 0, "container-overflow"); EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow"); EXPECT_DEATH_IF_SUPPORTED(raw[12] = 0, "container-overflow"); EXPECT_DEATH_IF_SUPPORTED(raw[17] = 0, "container-overflow"); } TEST(FixedArrayTest, AddressSanitizerAnnotations3) { absl::FixedArray a(20); uint64_t* raw = a.data(); raw[0] = 0; raw[19] = 0; EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow"); EXPECT_DEATH_IF_SUPPORTED(raw[20] = 0, "container-overflow"); } TEST(FixedArrayTest, AddressSanitizerAnnotations4) { absl::FixedArray a(10); ThreeInts* raw = a.data(); raw[0] = ThreeInts(); raw[9] = ThreeInts(); // Note: raw[-1] is pointing to 12 bytes before the container range. However, // there is only a 8-byte red zone before the container range, so we only // access the last 4 bytes of the struct to make sure it stays within the red // zone. EXPECT_DEATH_IF_SUPPORTED(raw[-1].z_ = 0, "container-overflow"); EXPECT_DEATH_IF_SUPPORTED(raw[10] = ThreeInts(), "container-overflow"); // The actual size of storage is kDefaultBytes=256, 21*12 = 252, // so reading raw[21] should still trigger the correct warning. EXPECT_DEATH_IF_SUPPORTED(raw[21] = ThreeInts(), "container-overflow"); } #endif // ABSL_HAVE_ADDRESS_SANITIZER TEST(FixedArrayTest, AbslHashValueWorks) { using V = absl::FixedArray; std::vector cases; // Generate a variety of vectors some of these are small enough for the inline // space but are stored out of line. for (int i = 0; i < 10; ++i) { V v(i); for (int j = 0; j < i; ++j) { v[j] = j; } cases.push_back(v); } EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); } } // namespace abseil-20220623.1/absl/container/flat_hash_map.h000066400000000000000000000565021430371345100212250ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: flat_hash_map.h // ----------------------------------------------------------------------------- // // An `absl::flat_hash_map` is an unordered associative container of // unique keys and associated values designed to be a more efficient replacement // for `std::unordered_map`. Like `unordered_map`, search, insertion, and // deletion of map elements can be done as an `O(1)` operation. However, // `flat_hash_map` (and other unordered associative containers known as the // collection of Abseil "Swiss tables") contain other optimizations that result // in both memory and computation advantages. // // In most cases, your default choice for a hash map should be a map of type // `flat_hash_map`. #ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_ #define ABSL_CONTAINER_FLAT_HASH_MAP_H_ #include #include #include #include #include "absl/algorithm/container.h" #include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export #include "absl/memory/memory.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template struct FlatHashMapPolicy; } // namespace container_internal // ----------------------------------------------------------------------------- // absl::flat_hash_map // ----------------------------------------------------------------------------- // // An `absl::flat_hash_map` is an unordered associative container which // has been optimized for both speed and memory footprint in most common use // cases. Its interface is similar to that of `std::unordered_map` with // the following notable differences: // // * Requires keys that are CopyConstructible // * Requires values that are MoveConstructible // * Supports heterogeneous lookup, through `find()`, `operator[]()` and // `insert()`, provided that the map is provided a compatible heterogeneous // hashing function and equality operator. // * Invalidates any references and pointers to elements within the table after // `rehash()`. // * Contains a `capacity()` member function indicating the number of element // slots (open, deleted, and empty) within the hash map. // * Returns `void` from the `erase(iterator)` overload. // // By default, `flat_hash_map` uses the `absl::Hash` hashing framework. // All fundamental and Abseil types that support the `absl::Hash` framework have // a compatible equality operator for comparing insertions into `flat_hash_map`. // If your type is not yet supported by the `absl::Hash` framework, see // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // // Using `absl::flat_hash_map` at interface boundaries in dynamically loaded // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may // be randomized across dynamically loaded libraries. // // NOTE: A `flat_hash_map` stores its value types directly inside its // implementation array to avoid memory indirection. Because a `flat_hash_map` // is designed to move data when rehashed, map values will not retain pointer // stability. If you require pointer stability, or if your values are large, // consider using `absl::flat_hash_map>` instead. // If your types are not moveable or you require pointer stability for keys, // consider `absl::node_hash_map`. // // Example: // // // Create a flat hash map of three strings (that map to strings) // absl::flat_hash_map ducks = // {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}}; // // // Insert a new element into the flat hash map // ducks.insert({"d", "donald"}); // // // Force a rehash of the flat hash map // ducks.rehash(0); // // // Find the element with the key "b" // std::string search_key = "b"; // auto result = ducks.find(search_key); // if (result != ducks.end()) { // std::cout << "Result: " << result->second << std::endl; // } template , class Eq = absl::container_internal::hash_default_eq, class Allocator = std::allocator>> class flat_hash_map : public absl::container_internal::raw_hash_map< absl::container_internal::FlatHashMapPolicy, Hash, Eq, Allocator> { using Base = typename flat_hash_map::raw_hash_map; public: // Constructors and Assignment Operators // // A flat_hash_map supports the same overload set as `std::unordered_map` // for construction and assignment: // // * Default constructor // // // No allocation for the table's elements is made. // absl::flat_hash_map map1; // // * Initializer List constructor // // absl::flat_hash_map map2 = // {{1, "huey"}, {2, "dewey"}, {3, "louie"},}; // // * Copy constructor // // absl::flat_hash_map map3(map2); // // * Copy assignment operator // // // Hash functor and Comparator are copied as well // absl::flat_hash_map map4; // map4 = map3; // // * Move constructor // // // Move is guaranteed efficient // absl::flat_hash_map map5(std::move(map4)); // // * Move assignment operator // // // May be efficient if allocators are compatible // absl::flat_hash_map map6; // map6 = std::move(map5); // // * Range constructor // // std::vector> v = {{1, "a"}, {2, "b"}}; // absl::flat_hash_map map7(v.begin(), v.end()); flat_hash_map() {} using Base::Base; // flat_hash_map::begin() // // Returns an iterator to the beginning of the `flat_hash_map`. using Base::begin; // flat_hash_map::cbegin() // // Returns a const iterator to the beginning of the `flat_hash_map`. using Base::cbegin; // flat_hash_map::cend() // // Returns a const iterator to the end of the `flat_hash_map`. using Base::cend; // flat_hash_map::end() // // Returns an iterator to the end of the `flat_hash_map`. using Base::end; // flat_hash_map::capacity() // // Returns the number of element slots (assigned, deleted, and empty) // available within the `flat_hash_map`. // // NOTE: this member function is particular to `absl::flat_hash_map` and is // not provided in the `std::unordered_map` API. using Base::capacity; // flat_hash_map::empty() // // Returns whether or not the `flat_hash_map` is empty. using Base::empty; // flat_hash_map::max_size() // // Returns the largest theoretical possible number of elements within a // `flat_hash_map` under current memory constraints. This value can be thought // of the largest value of `std::distance(begin(), end())` for a // `flat_hash_map`. using Base::max_size; // flat_hash_map::size() // // Returns the number of elements currently within the `flat_hash_map`. using Base::size; // flat_hash_map::clear() // // Removes all elements from the `flat_hash_map`. Invalidates any references, // pointers, or iterators referring to contained elements. // // NOTE: this operation may shrink the underlying buffer. To avoid shrinking // the underlying buffer call `erase(begin(), end())`. using Base::clear; // flat_hash_map::erase() // // Erases elements within the `flat_hash_map`. Erasing does not trigger a // rehash. Overloads are listed below. // // void erase(const_iterator pos): // // Erases the element at `position` of the `flat_hash_map`, returning // `void`. // // NOTE: returning `void` in this case is different than that of STL // containers in general and `std::unordered_map` in particular (which // return an iterator to the element following the erased element). If that // iterator is needed, simply post increment the iterator: // // map.erase(it++); // // iterator erase(const_iterator first, const_iterator last): // // Erases the elements in the open interval [`first`, `last`), returning an // iterator pointing to `last`. // // size_type erase(const key_type& key): // // Erases the element with the matching key, if it exists, returning the // number of elements erased (0 or 1). using Base::erase; // flat_hash_map::insert() // // Inserts an element of the specified value into the `flat_hash_map`, // returning an iterator pointing to the newly inserted element, provided that // an element with the given key does not already exist. If rehashing occurs // due to the insertion, all iterators are invalidated. Overloads are listed // below. // // std::pair insert(const init_type& value): // // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an // iterator to the inserted element (or to the element that prevented the // insertion) and a bool denoting whether the insertion took place. // // std::pair insert(T&& value): // std::pair insert(init_type&& value): // // Inserts a moveable value into the `flat_hash_map`. Returns a pair // consisting of an iterator to the inserted element (or to the element that // prevented the insertion) and a bool denoting whether the insertion took // place. // // iterator insert(const_iterator hint, const init_type& value): // iterator insert(const_iterator hint, T&& value): // iterator insert(const_iterator hint, init_type&& value); // // Inserts a value, using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. Returns an iterator to the // inserted element, or to the existing element that prevented the // insertion. // // void insert(InputIterator first, InputIterator last): // // Inserts a range of values [`first`, `last`). // // NOTE: Although the STL does not specify which element may be inserted if // multiple keys compare equivalently, for `flat_hash_map` we guarantee the // first match is inserted. // // void insert(std::initializer_list ilist): // // Inserts the elements within the initializer list `ilist`. // // NOTE: Although the STL does not specify which element may be inserted if // multiple keys compare equivalently within the initializer list, for // `flat_hash_map` we guarantee the first match is inserted. using Base::insert; // flat_hash_map::insert_or_assign() // // Inserts an element of the specified value into the `flat_hash_map` provided // that a value with the given key does not already exist, or replaces it with // the element value if a key for that value already exists, returning an // iterator pointing to the newly inserted element. If rehashing occurs due // to the insertion, all existing iterators are invalidated. Overloads are // listed below. // // pair insert_or_assign(const init_type& k, T&& obj): // pair insert_or_assign(init_type&& k, T&& obj): // // Inserts/Assigns (or moves) the element of the specified key into the // `flat_hash_map`. // // iterator insert_or_assign(const_iterator hint, // const init_type& k, T&& obj): // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj): // // Inserts/Assigns (or moves) the element of the specified key into the // `flat_hash_map` using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. using Base::insert_or_assign; // flat_hash_map::emplace() // // Inserts an element of the specified value by constructing it in-place // within the `flat_hash_map`, provided that no element with the given key // already exists. // // The element may be constructed even if there already is an element with the // key in the container, in which case the newly constructed element will be // destroyed immediately. Prefer `try_emplace()` unless your key is not // copyable or moveable. // // If rehashing occurs due to the insertion, all iterators are invalidated. using Base::emplace; // flat_hash_map::emplace_hint() // // Inserts an element of the specified value by constructing it in-place // within the `flat_hash_map`, using the position of `hint` as a non-binding // suggestion for where to begin the insertion search, and only inserts // provided that no element with the given key already exists. // // The element may be constructed even if there already is an element with the // key in the container, in which case the newly constructed element will be // destroyed immediately. Prefer `try_emplace()` unless your key is not // copyable or moveable. // // If rehashing occurs due to the insertion, all iterators are invalidated. using Base::emplace_hint; // flat_hash_map::try_emplace() // // Inserts an element of the specified value by constructing it in-place // within the `flat_hash_map`, provided that no element with the given key // already exists. Unlike `emplace()`, if an element with the given key // already exists, we guarantee that no element is constructed. // // If rehashing occurs due to the insertion, all iterators are invalidated. // Overloads are listed below. // // pair try_emplace(const key_type& k, Args&&... args): // pair try_emplace(key_type&& k, Args&&... args): // // Inserts (via copy or move) the element of the specified key into the // `flat_hash_map`. // // iterator try_emplace(const_iterator hint, // const key_type& k, Args&&... args): // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args): // // Inserts (via copy or move) the element of the specified key into the // `flat_hash_map` using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. // // All `try_emplace()` overloads make the same guarantees regarding rvalue // arguments as `std::unordered_map::try_emplace()`, namely that these // functions will not move from rvalue arguments if insertions do not happen. using Base::try_emplace; // flat_hash_map::extract() // // Extracts the indicated element, erasing it in the process, and returns it // as a C++17-compatible node handle. Overloads are listed below. // // node_type extract(const_iterator position): // // Extracts the key,value pair of the element at the indicated position and // returns a node handle owning that extracted data. // // node_type extract(const key_type& x): // // Extracts the key,value pair of the element with a key matching the passed // key value and returns a node handle owning that extracted data. If the // `flat_hash_map` does not contain an element with a matching key, this // function returns an empty node handle. // // NOTE: when compiled in an earlier version of C++ than C++17, // `node_type::key()` returns a const reference to the key instead of a // mutable reference. We cannot safely return a mutable reference without // std::launder (which is not available before C++17). using Base::extract; // flat_hash_map::merge() // // Extracts elements from a given `source` flat hash map into this // `flat_hash_map`. If the destination `flat_hash_map` already contains an // element with an equivalent key, that element is not extracted. using Base::merge; // flat_hash_map::swap(flat_hash_map& other) // // Exchanges the contents of this `flat_hash_map` with those of the `other` // flat hash map, avoiding invocation of any move, copy, or swap operations on // individual elements. // // All iterators and references on the `flat_hash_map` remain valid, excepting // for the past-the-end iterator, which is invalidated. // // `swap()` requires that the flat hash map's hashing and key equivalence // functions be Swappable, and are exchanged using unqualified calls to // non-member `swap()`. If the map's allocator has // `std::allocator_traits::propagate_on_container_swap::value` // set to `true`, the allocators are also exchanged using an unqualified call // to non-member `swap()`; otherwise, the allocators are not swapped. using Base::swap; // flat_hash_map::rehash(count) // // Rehashes the `flat_hash_map`, setting the number of slots to be at least // the passed value. If the new number of slots increases the load factor more // than the current maximum load factor // (`count` < `size()` / `max_load_factor()`), then the new number of slots // will be at least `size()` / `max_load_factor()`. // // To force a rehash, pass rehash(0). // // NOTE: unlike behavior in `std::unordered_map`, references are also // invalidated upon a `rehash()`. using Base::rehash; // flat_hash_map::reserve(count) // // Sets the number of slots in the `flat_hash_map` to the number needed to // accommodate at least `count` total elements without exceeding the current // maximum load factor, and may rehash the container if needed. using Base::reserve; // flat_hash_map::at() // // Returns a reference to the mapped value of the element with key equivalent // to the passed key. using Base::at; // flat_hash_map::contains() // // Determines whether an element with a key comparing equal to the given `key` // exists within the `flat_hash_map`, returning `true` if so or `false` // otherwise. using Base::contains; // flat_hash_map::count(const Key& key) const // // Returns the number of elements with a key comparing equal to the given // `key` within the `flat_hash_map`. note that this function will return // either `1` or `0` since duplicate keys are not allowed within a // `flat_hash_map`. using Base::count; // flat_hash_map::equal_range() // // Returns a closed range [first, last], defined by a `std::pair` of two // iterators, containing all elements with the passed key in the // `flat_hash_map`. using Base::equal_range; // flat_hash_map::find() // // Finds an element with the passed `key` within the `flat_hash_map`. using Base::find; // flat_hash_map::operator[]() // // Returns a reference to the value mapped to the passed key within the // `flat_hash_map`, performing an `insert()` if the key does not already // exist. // // If an insertion occurs and results in a rehashing of the container, all // iterators are invalidated. Otherwise iterators are not affected and // references are not invalidated. Overloads are listed below. // // T& operator[](const Key& key): // // Inserts an init_type object constructed in-place if the element with the // given key does not exist. // // T& operator[](Key&& key): // // Inserts an init_type object constructed in-place provided that an element // with the given key does not exist. using Base::operator[]; // flat_hash_map::bucket_count() // // Returns the number of "buckets" within the `flat_hash_map`. Note that // because a flat hash map contains all elements within its internal storage, // this value simply equals the current capacity of the `flat_hash_map`. using Base::bucket_count; // flat_hash_map::load_factor() // // Returns the current load factor of the `flat_hash_map` (the average number // of slots occupied with a value within the hash map). using Base::load_factor; // flat_hash_map::max_load_factor() // // Manages the maximum load factor of the `flat_hash_map`. Overloads are // listed below. // // float flat_hash_map::max_load_factor() // // Returns the current maximum load factor of the `flat_hash_map`. // // void flat_hash_map::max_load_factor(float ml) // // Sets the maximum load factor of the `flat_hash_map` to the passed value. // // NOTE: This overload is provided only for API compatibility with the STL; // `flat_hash_map` will ignore any set load factor and manage its rehashing // internally as an implementation detail. using Base::max_load_factor; // flat_hash_map::get_allocator() // // Returns the allocator function associated with this `flat_hash_map`. using Base::get_allocator; // flat_hash_map::hash_function() // // Returns the hashing function used to hash the keys within this // `flat_hash_map`. using Base::hash_function; // flat_hash_map::key_eq() // // Returns the function used for comparing keys equality. using Base::key_eq; }; // erase_if(flat_hash_map<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. // Returns the number of erased elements. template typename flat_hash_map::size_type erase_if( flat_hash_map& c, Predicate pred) { return container_internal::EraseIf(pred, &c); } namespace container_internal { template struct FlatHashMapPolicy { using slot_policy = container_internal::map_slot_policy; using slot_type = typename slot_policy::slot_type; using key_type = K; using mapped_type = V; using init_type = std::pair; template static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { slot_policy::construct(alloc, slot, std::forward(args)...); } template static void destroy(Allocator* alloc, slot_type* slot) { slot_policy::destroy(alloc, slot); } template static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { slot_policy::transfer(alloc, new_slot, old_slot); } template static decltype(absl::container_internal::DecomposePair( std::declval(), std::declval()...)) apply(F&& f, Args&&... args) { return absl::container_internal::DecomposePair(std::forward(f), std::forward(args)...); } static size_t space_used(const slot_type*) { return 0; } static std::pair& element(slot_type* slot) { return slot->value; } static V& value(std::pair* kv) { return kv->second; } static const V& value(const std::pair* kv) { return kv->second; } }; } // namespace container_internal namespace container_algorithm_internal { // Specialization of trait in absl/algorithm/container.h template struct IsUnorderedContainer< absl::flat_hash_map> : std::true_type {}; } // namespace container_algorithm_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_ abseil-20220623.1/absl/container/flat_hash_map_test.cc000066400000000000000000000227421430371345100224210ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/flat_hash_map.h" #include #include "absl/base/internal/raw_logging.h" #include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/unordered_map_constructor_test.h" #include "absl/container/internal/unordered_map_lookup_test.h" #include "absl/container/internal/unordered_map_members_test.h" #include "absl/container/internal/unordered_map_modifiers_test.h" #include "absl/types/any.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::absl::container_internal::hash_internal::Enum; using ::absl::container_internal::hash_internal::EnumClass; using ::testing::_; using ::testing::IsEmpty; using ::testing::Pair; using ::testing::UnorderedElementsAre; // Check that absl::flat_hash_map works in a global constructor. struct BeforeMain { BeforeMain() { absl::flat_hash_map x; x.insert({1, 1}); ABSL_RAW_CHECK(x.find(0) == x.end(), "x should not contain 0"); auto it = x.find(1); ABSL_RAW_CHECK(it != x.end(), "x should contain 1"); ABSL_RAW_CHECK(it->second, "1 should map to 1"); } }; const BeforeMain before_main; template using Map = flat_hash_map>>; static_assert(!std::is_standard_layout(), ""); using MapTypes = ::testing::Types, Map, Map, Map, Map, Map>; INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ConstructorTest, MapTypes); INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, LookupTest, MapTypes); INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, MembersTest, MapTypes); INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ModifiersTest, MapTypes); using UniquePtrMapTypes = ::testing::Types>>; INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, UniquePtrModifiersTest, UniquePtrMapTypes); TEST(FlatHashMap, StandardLayout) { struct Int { explicit Int(size_t value) : value(value) {} Int() : value(0) { ADD_FAILURE(); } Int(const Int& other) : value(other.value) { ADD_FAILURE(); } Int(Int&&) = default; bool operator==(const Int& other) const { return value == other.value; } size_t value; }; static_assert(std::is_standard_layout(), ""); struct Hash { size_t operator()(const Int& obj) const { return obj.value; } }; // Verify that neither the key nor the value get default-constructed or // copy-constructed. { flat_hash_map m; m.try_emplace(Int(1), Int(2)); m.try_emplace(Int(3), Int(4)); m.erase(Int(1)); m.rehash(2 * m.bucket_count()); } { flat_hash_map m; m.try_emplace(Int(1), Int(2)); m.try_emplace(Int(3), Int(4)); m.erase(Int(1)); m.clear(); } } // gcc becomes unhappy if this is inside the method, so pull it out here. struct balast {}; TEST(FlatHashMap, IteratesMsan) { // Because SwissTable randomizes on pointer addresses, we keep old tables // around to ensure we don't reuse old memory. std::vector> garbage; for (int i = 0; i < 100; ++i) { absl::flat_hash_map t; for (int j = 0; j < 100; ++j) { t[j]; for (const auto& p : t) EXPECT_THAT(p, Pair(_, _)); } garbage.push_back(std::move(t)); } } // Demonstration of the "Lazy Key" pattern. This uses heterogeneous insert to // avoid creating expensive key elements when the item is already present in the // map. struct LazyInt { explicit LazyInt(size_t value, int* tracker) : value(value), tracker(tracker) {} explicit operator size_t() const { ++*tracker; return value; } size_t value; int* tracker; }; struct Hash { using is_transparent = void; int* tracker; size_t operator()(size_t obj) const { ++*tracker; return obj; } size_t operator()(const LazyInt& obj) const { ++*tracker; return obj.value; } }; struct Eq { using is_transparent = void; bool operator()(size_t lhs, size_t rhs) const { return lhs == rhs; } bool operator()(size_t lhs, const LazyInt& rhs) const { return lhs == rhs.value; } }; TEST(FlatHashMap, LazyKeyPattern) { // hashes are only guaranteed in opt mode, we use assertions to track internal // state that can cause extra calls to hash. int conversions = 0; int hashes = 0; flat_hash_map m(0, Hash{&hashes}); m.reserve(3); m[LazyInt(1, &conversions)] = 1; EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 1))); EXPECT_EQ(conversions, 1); #ifdef NDEBUG EXPECT_EQ(hashes, 1); #endif m[LazyInt(1, &conversions)] = 2; EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2))); EXPECT_EQ(conversions, 1); #ifdef NDEBUG EXPECT_EQ(hashes, 2); #endif m.try_emplace(LazyInt(2, &conversions), 3); EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); EXPECT_EQ(conversions, 2); #ifdef NDEBUG EXPECT_EQ(hashes, 3); #endif m.try_emplace(LazyInt(2, &conversions), 4); EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3))); EXPECT_EQ(conversions, 2); #ifdef NDEBUG EXPECT_EQ(hashes, 4); #endif } TEST(FlatHashMap, BitfieldArgument) { union { int n : 1; }; n = 0; flat_hash_map m; m.erase(n); m.count(n); m.prefetch(n); m.find(n); m.contains(n); m.equal_range(n); m.insert_or_assign(n, n); m.insert_or_assign(m.end(), n, n); m.try_emplace(n); m.try_emplace(m.end(), n); m.at(n); m[n]; } TEST(FlatHashMap, MergeExtractInsert) { // We can't test mutable keys, or non-copyable keys with flat_hash_map. // Test that the nodes have the proper API. absl::flat_hash_map m = {{1, 7}, {2, 9}}; auto node = m.extract(1); EXPECT_TRUE(node); EXPECT_EQ(node.key(), 1); EXPECT_EQ(node.mapped(), 7); EXPECT_THAT(m, UnorderedElementsAre(Pair(2, 9))); node.mapped() = 17; m.insert(std::move(node)); EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9))); } bool FirstIsEven(std::pair p) { return p.first % 2 == 0; } TEST(FlatHashMap, EraseIf) { // Erase all elements. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; EXPECT_EQ(erase_if(s, [](std::pair) { return true; }), 5); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; EXPECT_EQ(erase_if(s, [](std::pair) { return false; }), 0); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(4, 4), Pair(5, 5))); } // Erase specific elements. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; EXPECT_EQ(erase_if(s, [](std::pair kvp) { return kvp.first % 2 == 1; }), 3); EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4))); } // Predicate is function reference. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; EXPECT_EQ(erase_if(s, FirstIsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } // Predicate is function pointer. { flat_hash_map s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}}; EXPECT_EQ(erase_if(s, &FirstIsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5))); } } // This test requires std::launder for mutable key access in node handles. #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 TEST(FlatHashMap, NodeHandleMutableKeyAccess) { flat_hash_map map; map["key1"] = "mapped"; auto nh = map.extract(map.begin()); nh.key().resize(3); map.insert(std::move(nh)); EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped"))); } #endif TEST(FlatHashMap, Reserve) { // Verify that if we reserve(size() + n) then we can perform n insertions // without a rehash, i.e., without invalidating any references. for (size_t trial = 0; trial < 20; ++trial) { for (size_t initial = 3; initial < 100; ++initial) { // Fill in `initial` entries, then erase 2 of them, then reserve space for // two inserts and check for reference stability while doing the inserts. flat_hash_map map; for (size_t i = 0; i < initial; ++i) { map[i] = i; } map.erase(0); map.erase(1); map.reserve(map.size() + 2); size_t& a2 = map[2]; // In the event of a failure, asan will complain in one of these two // assignments. map[initial] = a2; map[initial + 1] = a2; // Fail even when not under asan: size_t& a2new = map[2]; EXPECT_EQ(&a2, &a2new); } } } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/flat_hash_set.h000066400000000000000000000451101430371345100212340ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: flat_hash_set.h // ----------------------------------------------------------------------------- // // An `absl::flat_hash_set` is an unordered associative container designed to // be a more efficient replacement for `std::unordered_set`. Like // `unordered_set`, search, insertion, and deletion of set elements can be done // as an `O(1)` operation. However, `flat_hash_set` (and other unordered // associative containers known as the collection of Abseil "Swiss tables") // contain other optimizations that result in both memory and computation // advantages. // // In most cases, your default choice for a hash set should be a set of type // `flat_hash_set`. #ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_ #define ABSL_CONTAINER_FLAT_HASH_SET_H_ #include #include #include "absl/algorithm/container.h" #include "absl/base/macros.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export #include "absl/memory/memory.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template struct FlatHashSetPolicy; } // namespace container_internal // ----------------------------------------------------------------------------- // absl::flat_hash_set // ----------------------------------------------------------------------------- // // An `absl::flat_hash_set` is an unordered associative container which has // been optimized for both speed and memory footprint in most common use cases. // Its interface is similar to that of `std::unordered_set` with the // following notable differences: // // * Requires keys that are CopyConstructible // * Supports heterogeneous lookup, through `find()` and `insert()`, provided // that the set is provided a compatible heterogeneous hashing function and // equality operator. // * Invalidates any references and pointers to elements within the table after // `rehash()`. // * Contains a `capacity()` member function indicating the number of element // slots (open, deleted, and empty) within the hash set. // * Returns `void` from the `erase(iterator)` overload. // // By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All // fundamental and Abseil types that support the `absl::Hash` framework have a // compatible equality operator for comparing insertions into `flat_hash_set`. // If your type is not yet supported by the `absl::Hash` framework, see // absl/hash/hash.h for information on extending Abseil hashing to user-defined // types. // // Using `absl::flat_hash_set` at interface boundaries in dynamically loaded // libraries (e.g. .dll, .so) is unsupported due to way `absl::Hash` values may // be randomized across dynamically loaded libraries. // // NOTE: A `flat_hash_set` stores its keys directly inside its implementation // array to avoid memory indirection. Because a `flat_hash_set` is designed to // move data when rehashed, set keys will not retain pointer stability. If you // require pointer stability, consider using // `absl::flat_hash_set>`. If your type is not moveable and // you require pointer stability, consider `absl::node_hash_set` instead. // // Example: // // // Create a flat hash set of three strings // absl::flat_hash_set ducks = // {"huey", "dewey", "louie"}; // // // Insert a new element into the flat hash set // ducks.insert("donald"); // // // Force a rehash of the flat hash set // ducks.rehash(0); // // // See if "dewey" is present // if (ducks.contains("dewey")) { // std::cout << "We found dewey!" << std::endl; // } template , class Eq = absl::container_internal::hash_default_eq, class Allocator = std::allocator> class flat_hash_set : public absl::container_internal::raw_hash_set< absl::container_internal::FlatHashSetPolicy, Hash, Eq, Allocator> { using Base = typename flat_hash_set::raw_hash_set; public: // Constructors and Assignment Operators // // A flat_hash_set supports the same overload set as `std::unordered_set` // for construction and assignment: // // * Default constructor // // // No allocation for the table's elements is made. // absl::flat_hash_set set1; // // * Initializer List constructor // // absl::flat_hash_set set2 = // {{"huey"}, {"dewey"}, {"louie"},}; // // * Copy constructor // // absl::flat_hash_set set3(set2); // // * Copy assignment operator // // // Hash functor and Comparator are copied as well // absl::flat_hash_set set4; // set4 = set3; // // * Move constructor // // // Move is guaranteed efficient // absl::flat_hash_set set5(std::move(set4)); // // * Move assignment operator // // // May be efficient if allocators are compatible // absl::flat_hash_set set6; // set6 = std::move(set5); // // * Range constructor // // std::vector v = {"a", "b"}; // absl::flat_hash_set set7(v.begin(), v.end()); flat_hash_set() {} using Base::Base; // flat_hash_set::begin() // // Returns an iterator to the beginning of the `flat_hash_set`. using Base::begin; // flat_hash_set::cbegin() // // Returns a const iterator to the beginning of the `flat_hash_set`. using Base::cbegin; // flat_hash_set::cend() // // Returns a const iterator to the end of the `flat_hash_set`. using Base::cend; // flat_hash_set::end() // // Returns an iterator to the end of the `flat_hash_set`. using Base::end; // flat_hash_set::capacity() // // Returns the number of element slots (assigned, deleted, and empty) // available within the `flat_hash_set`. // // NOTE: this member function is particular to `absl::flat_hash_set` and is // not provided in the `std::unordered_set` API. using Base::capacity; // flat_hash_set::empty() // // Returns whether or not the `flat_hash_set` is empty. using Base::empty; // flat_hash_set::max_size() // // Returns the largest theoretical possible number of elements within a // `flat_hash_set` under current memory constraints. This value can be thought // of the largest value of `std::distance(begin(), end())` for a // `flat_hash_set`. using Base::max_size; // flat_hash_set::size() // // Returns the number of elements currently within the `flat_hash_set`. using Base::size; // flat_hash_set::clear() // // Removes all elements from the `flat_hash_set`. Invalidates any references, // pointers, or iterators referring to contained elements. // // NOTE: this operation may shrink the underlying buffer. To avoid shrinking // the underlying buffer call `erase(begin(), end())`. using Base::clear; // flat_hash_set::erase() // // Erases elements within the `flat_hash_set`. Erasing does not trigger a // rehash. Overloads are listed below. // // void erase(const_iterator pos): // // Erases the element at `position` of the `flat_hash_set`, returning // `void`. // // NOTE: returning `void` in this case is different than that of STL // containers in general and `std::unordered_set` in particular (which // return an iterator to the element following the erased element). If that // iterator is needed, simply post increment the iterator: // // set.erase(it++); // // iterator erase(const_iterator first, const_iterator last): // // Erases the elements in the open interval [`first`, `last`), returning an // iterator pointing to `last`. // // size_type erase(const key_type& key): // // Erases the element with the matching key, if it exists, returning the // number of elements erased (0 or 1). using Base::erase; // flat_hash_set::insert() // // Inserts an element of the specified value into the `flat_hash_set`, // returning an iterator pointing to the newly inserted element, provided that // an element with the given key does not already exist. If rehashing occurs // due to the insertion, all iterators are invalidated. Overloads are listed // below. // // std::pair insert(const T& value): // // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an // iterator to the inserted element (or to the element that prevented the // insertion) and a bool denoting whether the insertion took place. // // std::pair insert(T&& value): // // Inserts a moveable value into the `flat_hash_set`. Returns a pair // consisting of an iterator to the inserted element (or to the element that // prevented the insertion) and a bool denoting whether the insertion took // place. // // iterator insert(const_iterator hint, const T& value): // iterator insert(const_iterator hint, T&& value): // // Inserts a value, using the position of `hint` as a non-binding suggestion // for where to begin the insertion search. Returns an iterator to the // inserted element, or to the existing element that prevented the // insertion. // // void insert(InputIterator first, InputIterator last): // // Inserts a range of values [`first`, `last`). // // NOTE: Although the STL does not specify which element may be inserted if // multiple keys compare equivalently, for `flat_hash_set` we guarantee the // first match is inserted. // // void insert(std::initializer_list ilist): // // Inserts the elements within the initializer list `ilist`. // // NOTE: Although the STL does not specify which element may be inserted if // multiple keys compare equivalently within the initializer list, for // `flat_hash_set` we guarantee the first match is inserted. using Base::insert; // flat_hash_set::emplace() // // Inserts an element of the specified value by constructing it in-place // within the `flat_hash_set`, provided that no element with the given key // already exists. // // The element may be constructed even if there already is an element with the // key in the container, in which case the newly constructed element will be // destroyed immediately. // // If rehashing occurs due to the insertion, all iterators are invalidated. using Base::emplace; // flat_hash_set::emplace_hint() // // Inserts an element of the specified value by constructing it in-place // within the `flat_hash_set`, using the position of `hint` as a non-binding // suggestion for where to begin the insertion search, and only inserts // provided that no element with the given key already exists. // // The element may be constructed even if there already is an element with the // key in the container, in which case the newly constructed element will be // destroyed immediately. // // If rehashing occurs due to the insertion, all iterators are invalidated. using Base::emplace_hint; // flat_hash_set::extract() // // Extracts the indicated element, erasing it in the process, and returns it // as a C++17-compatible node handle. Overloads are listed below. // // node_type extract(const_iterator position): // // Extracts the element at the indicated position and returns a node handle // owning that extracted data. // // node_type extract(const key_type& x): // // Extracts the element with the key matching the passed key value and // returns a node handle owning that extracted data. If the `flat_hash_set` // does not contain an element with a matching key, this function returns an // empty node handle. using Base::extract; // flat_hash_set::merge() // // Extracts elements from a given `source` flat hash set into this // `flat_hash_set`. If the destination `flat_hash_set` already contains an // element with an equivalent key, that element is not extracted. using Base::merge; // flat_hash_set::swap(flat_hash_set& other) // // Exchanges the contents of this `flat_hash_set` with those of the `other` // flat hash set, avoiding invocation of any move, copy, or swap operations on // individual elements. // // All iterators and references on the `flat_hash_set` remain valid, excepting // for the past-the-end iterator, which is invalidated. // // `swap()` requires that the flat hash set's hashing and key equivalence // functions be Swappable, and are exchaged using unqualified calls to // non-member `swap()`. If the set's allocator has // `std::allocator_traits::propagate_on_container_swap::value` // set to `true`, the allocators are also exchanged using an unqualified call // to non-member `swap()`; otherwise, the allocators are not swapped. using Base::swap; // flat_hash_set::rehash(count) // // Rehashes the `flat_hash_set`, setting the number of slots to be at least // the passed value. If the new number of slots increases the load factor more // than the current maximum load factor // (`count` < `size()` / `max_load_factor()`), then the new number of slots // will be at least `size()` / `max_load_factor()`. // // To force a rehash, pass rehash(0). // // NOTE: unlike behavior in `std::unordered_set`, references are also // invalidated upon a `rehash()`. using Base::rehash; // flat_hash_set::reserve(count) // // Sets the number of slots in the `flat_hash_set` to the number needed to // accommodate at least `count` total elements without exceeding the current // maximum load factor, and may rehash the container if needed. using Base::reserve; // flat_hash_set::contains() // // Determines whether an element comparing equal to the given `key` exists // within the `flat_hash_set`, returning `true` if so or `false` otherwise. using Base::contains; // flat_hash_set::count(const Key& key) const // // Returns the number of elements comparing equal to the given `key` within // the `flat_hash_set`. note that this function will return either `1` or `0` // since duplicate elements are not allowed within a `flat_hash_set`. using Base::count; // flat_hash_set::equal_range() // // Returns a closed range [first, last], defined by a `std::pair` of two // iterators, containing all elements with the passed key in the // `flat_hash_set`. using Base::equal_range; // flat_hash_set::find() // // Finds an element with the passed `key` within the `flat_hash_set`. using Base::find; // flat_hash_set::bucket_count() // // Returns the number of "buckets" within the `flat_hash_set`. Note that // because a flat hash set contains all elements within its internal storage, // this value simply equals the current capacity of the `flat_hash_set`. using Base::bucket_count; // flat_hash_set::load_factor() // // Returns the current load factor of the `flat_hash_set` (the average number // of slots occupied with a value within the hash set). using Base::load_factor; // flat_hash_set::max_load_factor() // // Manages the maximum load factor of the `flat_hash_set`. Overloads are // listed below. // // float flat_hash_set::max_load_factor() // // Returns the current maximum load factor of the `flat_hash_set`. // // void flat_hash_set::max_load_factor(float ml) // // Sets the maximum load factor of the `flat_hash_set` to the passed value. // // NOTE: This overload is provided only for API compatibility with the STL; // `flat_hash_set` will ignore any set load factor and manage its rehashing // internally as an implementation detail. using Base::max_load_factor; // flat_hash_set::get_allocator() // // Returns the allocator function associated with this `flat_hash_set`. using Base::get_allocator; // flat_hash_set::hash_function() // // Returns the hashing function used to hash the keys within this // `flat_hash_set`. using Base::hash_function; // flat_hash_set::key_eq() // // Returns the function used for comparing keys equality. using Base::key_eq; }; // erase_if(flat_hash_set<>, Pred) // // Erases all elements that satisfy the predicate `pred` from the container `c`. // Returns the number of erased elements. template typename flat_hash_set::size_type erase_if( flat_hash_set& c, Predicate pred) { return container_internal::EraseIf(pred, &c); } namespace container_internal { template struct FlatHashSetPolicy { using slot_type = T; using key_type = T; using init_type = T; using constant_iterators = std::true_type; template static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { absl::allocator_traits::construct(*alloc, slot, std::forward(args)...); } template static void destroy(Allocator* alloc, slot_type* slot) { absl::allocator_traits::destroy(*alloc, slot); } template static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { construct(alloc, new_slot, std::move(*old_slot)); destroy(alloc, old_slot); } static T& element(slot_type* slot) { return *slot; } template static decltype(absl::container_internal::DecomposeValue( std::declval(), std::declval()...)) apply(F&& f, Args&&... args) { return absl::container_internal::DecomposeValue( std::forward(f), std::forward(args)...); } static size_t space_used(const T*) { return 0; } }; } // namespace container_internal namespace container_algorithm_internal { // Specialization of trait in absl/algorithm/container.h template struct IsUnorderedContainer> : std::true_type {}; } // namespace container_algorithm_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_FLAT_HASH_SET_H_ abseil-20220623.1/absl/container/flat_hash_set_test.cc000066400000000000000000000130001430371345100224220ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/flat_hash_set.h" #include #include "absl/base/internal/raw_logging.h" #include "absl/container/internal/hash_generator_testing.h" #include "absl/container/internal/unordered_set_constructor_test.h" #include "absl/container/internal/unordered_set_lookup_test.h" #include "absl/container/internal/unordered_set_members_test.h" #include "absl/container/internal/unordered_set_modifiers_test.h" #include "absl/memory/memory.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::absl::container_internal::hash_internal::Enum; using ::absl::container_internal::hash_internal::EnumClass; using ::testing::IsEmpty; using ::testing::Pointee; using ::testing::UnorderedElementsAre; using ::testing::UnorderedElementsAreArray; // Check that absl::flat_hash_set works in a global constructor. struct BeforeMain { BeforeMain() { absl::flat_hash_set x; x.insert(1); ABSL_RAW_CHECK(!x.contains(0), "x should not contain 0"); ABSL_RAW_CHECK(x.contains(1), "x should contain 1"); } }; const BeforeMain before_main; template using Set = absl::flat_hash_set>; using SetTypes = ::testing::Types, Set, Set, Set>; INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ConstructorTest, SetTypes); INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, LookupTest, SetTypes); INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, MembersTest, SetTypes); INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ModifiersTest, SetTypes); TEST(FlatHashSet, EmplaceString) { std::vector v = {"a", "b"}; absl::flat_hash_set hs(v.begin(), v.end()); EXPECT_THAT(hs, UnorderedElementsAreArray(v)); } TEST(FlatHashSet, BitfieldArgument) { union { int n : 1; }; n = 0; absl::flat_hash_set s = {n}; s.insert(n); s.insert(s.end(), n); s.insert({n}); s.erase(n); s.count(n); s.prefetch(n); s.find(n); s.contains(n); s.equal_range(n); } TEST(FlatHashSet, MergeExtractInsert) { struct Hash { size_t operator()(const std::unique_ptr& p) const { return *p; } }; struct Eq { bool operator()(const std::unique_ptr& a, const std::unique_ptr& b) const { return *a == *b; } }; absl::flat_hash_set, Hash, Eq> set1, set2; set1.insert(absl::make_unique(7)); set1.insert(absl::make_unique(17)); set2.insert(absl::make_unique(7)); set2.insert(absl::make_unique(19)); EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17))); EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19))); set1.merge(set2); EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19))); EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); auto node = set1.extract(absl::make_unique(7)); EXPECT_TRUE(node); EXPECT_THAT(node.value(), Pointee(7)); EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19))); auto insert_result = set2.insert(std::move(node)); EXPECT_FALSE(node); EXPECT_FALSE(insert_result.inserted); EXPECT_TRUE(insert_result.node); EXPECT_THAT(insert_result.node.value(), Pointee(7)); EXPECT_EQ(**insert_result.position, 7); EXPECT_NE(insert_result.position->get(), insert_result.node.value().get()); EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7))); node = set1.extract(absl::make_unique(17)); EXPECT_TRUE(node); EXPECT_THAT(node.value(), Pointee(17)); EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19))); node.value() = absl::make_unique(23); insert_result = set2.insert(std::move(node)); EXPECT_FALSE(node); EXPECT_TRUE(insert_result.inserted); EXPECT_FALSE(insert_result.node); EXPECT_EQ(**insert_result.position, 23); EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23))); } bool IsEven(int k) { return k % 2 == 0; } TEST(FlatHashSet, EraseIf) { // Erase all elements. { flat_hash_set s = {1, 2, 3, 4, 5}; EXPECT_EQ(erase_if(s, [](int) { return true; }), 5); EXPECT_THAT(s, IsEmpty()); } // Erase no elements. { flat_hash_set s = {1, 2, 3, 4, 5}; EXPECT_EQ(erase_if(s, [](int) { return false; }), 0); EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5)); } // Erase specific elements. { flat_hash_set s = {1, 2, 3, 4, 5}; EXPECT_EQ(erase_if(s, [](int k) { return k % 2 == 1; }), 3); EXPECT_THAT(s, UnorderedElementsAre(2, 4)); } // Predicate is function reference. { flat_hash_set s = {1, 2, 3, 4, 5}; EXPECT_EQ(erase_if(s, IsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } // Predicate is function pointer. { flat_hash_set s = {1, 2, 3, 4, 5}; EXPECT_EQ(erase_if(s, &IsEven), 2); EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5)); } } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/inlined_vector.h000066400000000000000000001006371430371345100214420ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: inlined_vector.h // ----------------------------------------------------------------------------- // // This header file contains the declaration and definition of an "inlined // vector" which behaves in an equivalent fashion to a `std::vector`, except // that storage for small sequences of the vector are provided inline without // requiring any heap allocation. // // An `absl::InlinedVector` specifies the default capacity `N` as one of // its template parameters. Instances where `size() <= N` hold contained // elements in inline space. Typically `N` is very small so that sequences that // are expected to be short do not require allocations. // // An `absl::InlinedVector` does not usually require a specific allocator. If // the inlined vector grows beyond its initial constraints, it will need to // allocate (as any normal `std::vector` would). This is usually performed with // the default allocator (defined as `std::allocator`). Optionally, a custom // allocator type may be specified as `A` in `absl::InlinedVector`. #ifndef ABSL_CONTAINER_INLINED_VECTOR_H_ #define ABSL_CONTAINER_INLINED_VECTOR_H_ #include #include #include #include #include #include #include #include #include #include "absl/algorithm/algorithm.h" #include "absl/base/internal/throw_delegate.h" #include "absl/base/macros.h" #include "absl/base/optimization.h" #include "absl/base/port.h" #include "absl/container/internal/inlined_vector.h" #include "absl/memory/memory.h" namespace absl { ABSL_NAMESPACE_BEGIN // ----------------------------------------------------------------------------- // InlinedVector // ----------------------------------------------------------------------------- // // An `absl::InlinedVector` is designed to be a drop-in replacement for // `std::vector` for use cases where the vector's size is sufficiently small // that it can be inlined. If the inlined vector does grow beyond its estimated // capacity, it will trigger an initial allocation on the heap, and will behave // as a `std::vector`. The API of the `absl::InlinedVector` within this file is // designed to cover the same API footprint as covered by `std::vector`. template > class InlinedVector { static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity."); using Storage = inlined_vector_internal::Storage; template using AllocatorTraits = inlined_vector_internal::AllocatorTraits; template using MoveIterator = inlined_vector_internal::MoveIterator; template using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk; template using IteratorValueAdapter = inlined_vector_internal::IteratorValueAdapter; template using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter; template using DefaultValueAdapter = inlined_vector_internal::DefaultValueAdapter; template using EnableIfAtLeastForwardIterator = absl::enable_if_t< inlined_vector_internal::IsAtLeastForwardIterator::value, int>; template using DisableIfAtLeastForwardIterator = absl::enable_if_t< !inlined_vector_internal::IsAtLeastForwardIterator::value, int>; public: using allocator_type = A; using value_type = inlined_vector_internal::ValueType; using pointer = inlined_vector_internal::Pointer; using const_pointer = inlined_vector_internal::ConstPointer; using size_type = inlined_vector_internal::SizeType; using difference_type = inlined_vector_internal::DifferenceType; using reference = inlined_vector_internal::Reference; using const_reference = inlined_vector_internal::ConstReference; using iterator = inlined_vector_internal::Iterator; using const_iterator = inlined_vector_internal::ConstIterator; using reverse_iterator = inlined_vector_internal::ReverseIterator; using const_reverse_iterator = inlined_vector_internal::ConstReverseIterator; // --------------------------------------------------------------------------- // InlinedVector Constructors and Destructor // --------------------------------------------------------------------------- // Creates an empty inlined vector with a value-initialized allocator. InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {} // Creates an empty inlined vector with a copy of `allocator`. explicit InlinedVector(const allocator_type& allocator) noexcept : storage_(allocator) {} // Creates an inlined vector with `n` copies of `value_type()`. explicit InlinedVector(size_type n, const allocator_type& allocator = allocator_type()) : storage_(allocator) { storage_.Initialize(DefaultValueAdapter(), n); } // Creates an inlined vector with `n` copies of `v`. InlinedVector(size_type n, const_reference v, const allocator_type& allocator = allocator_type()) : storage_(allocator) { storage_.Initialize(CopyValueAdapter(std::addressof(v)), n); } // Creates an inlined vector with copies of the elements of `list`. InlinedVector(std::initializer_list list, const allocator_type& allocator = allocator_type()) : InlinedVector(list.begin(), list.end(), allocator) {} // Creates an inlined vector with elements constructed from the provided // forward iterator range [`first`, `last`). // // NOTE: the `enable_if` prevents ambiguous interpretation between a call to // this constructor with two integral arguments and a call to the above // `InlinedVector(size_type, const_reference)` constructor. template = 0> InlinedVector(ForwardIterator first, ForwardIterator last, const allocator_type& allocator = allocator_type()) : storage_(allocator) { storage_.Initialize(IteratorValueAdapter(first), static_cast(std::distance(first, last))); } // Creates an inlined vector with elements constructed from the provided input // iterator range [`first`, `last`). template = 0> InlinedVector(InputIterator first, InputIterator last, const allocator_type& allocator = allocator_type()) : storage_(allocator) { std::copy(first, last, std::back_inserter(*this)); } // Creates an inlined vector by copying the contents of `other` using // `other`'s allocator. InlinedVector(const InlinedVector& other) : InlinedVector(other, other.storage_.GetAllocator()) {} // Creates an inlined vector by copying the contents of `other` using the // provided `allocator`. InlinedVector(const InlinedVector& other, const allocator_type& allocator) : storage_(allocator) { if (other.empty()) { // Empty; nothing to do. } else if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) { // Memcpy-able and do not need allocation. storage_.MemcpyFrom(other.storage_); } else { storage_.InitFrom(other.storage_); } } // Creates an inlined vector by moving in the contents of `other` without // allocating. If `other` contains allocated memory, the newly-created inlined // vector will take ownership of that memory. However, if `other` does not // contain allocated memory, the newly-created inlined vector will perform // element-wise move construction of the contents of `other`. // // NOTE: since no allocation is performed for the inlined vector in either // case, the `noexcept(...)` specification depends on whether moving the // underlying objects can throw. It is assumed assumed that... // a) move constructors should only throw due to allocation failure. // b) if `value_type`'s move constructor allocates, it uses the same // allocation function as the inlined vector's allocator. // Thus, the move constructor is non-throwing if the allocator is non-throwing // or `value_type`'s move constructor is specified as `noexcept`. InlinedVector(InlinedVector&& other) noexcept( absl::allocator_is_nothrow::value || std::is_nothrow_move_constructible::value) : storage_(other.storage_.GetAllocator()) { if (IsMemcpyOk::value) { storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else if (other.storage_.GetIsAllocated()) { storage_.SetAllocation({other.storage_.GetAllocatedData(), other.storage_.GetAllocatedCapacity()}); storage_.SetAllocatedSize(other.storage_.GetSize()); other.storage_.SetInlinedSize(0); } else { IteratorValueAdapter> other_values( MoveIterator(other.storage_.GetInlinedData())); inlined_vector_internal::ConstructElements( storage_.GetAllocator(), storage_.GetInlinedData(), other_values, other.storage_.GetSize()); storage_.SetInlinedSize(other.storage_.GetSize()); } } // Creates an inlined vector by moving in the contents of `other` with a copy // of `allocator`. // // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other` // contains allocated memory, this move constructor will still allocate. Since // allocation is performed, this constructor can only be `noexcept` if the // specified allocator is also `noexcept`. InlinedVector( InlinedVector&& other, const allocator_type& allocator) noexcept(absl::allocator_is_nothrow::value) : storage_(allocator) { if (IsMemcpyOk::value) { storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) && other.storage_.GetIsAllocated()) { storage_.SetAllocation({other.storage_.GetAllocatedData(), other.storage_.GetAllocatedCapacity()}); storage_.SetAllocatedSize(other.storage_.GetSize()); other.storage_.SetInlinedSize(0); } else { storage_.Initialize(IteratorValueAdapter>( MoveIterator(other.data())), other.size()); } } ~InlinedVector() {} // --------------------------------------------------------------------------- // InlinedVector Member Accessors // --------------------------------------------------------------------------- // `InlinedVector::empty()` // // Returns whether the inlined vector contains no elements. bool empty() const noexcept { return !size(); } // `InlinedVector::size()` // // Returns the number of elements in the inlined vector. size_type size() const noexcept { return storage_.GetSize(); } // `InlinedVector::max_size()` // // Returns the maximum number of elements the inlined vector can hold. size_type max_size() const noexcept { // One bit of the size storage is used to indicate whether the inlined // vector contains allocated memory. As a result, the maximum size that the // inlined vector can express is half of the max for `size_type`. return (std::numeric_limits::max)() / 2; } // `InlinedVector::capacity()` // // Returns the number of elements that could be stored in the inlined vector // without requiring a reallocation. // // NOTE: for most inlined vectors, `capacity()` should be equal to the // template parameter `N`. For inlined vectors which exceed this capacity, // they will no longer be inlined and `capacity()` will equal the capactity of // the allocated memory. size_type capacity() const noexcept { return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity() : storage_.GetInlinedCapacity(); } // `InlinedVector::data()` // // Returns a `pointer` to the elements of the inlined vector. This pointer // can be used to access and modify the contained elements. // // NOTE: only elements within [`data()`, `data() + size()`) are valid. pointer data() noexcept { return storage_.GetIsAllocated() ? storage_.GetAllocatedData() : storage_.GetInlinedData(); } // Overload of `InlinedVector::data()` that returns a `const_pointer` to the // elements of the inlined vector. This pointer can be used to access but not // modify the contained elements. // // NOTE: only elements within [`data()`, `data() + size()`) are valid. const_pointer data() const noexcept { return storage_.GetIsAllocated() ? storage_.GetAllocatedData() : storage_.GetInlinedData(); } // `InlinedVector::operator[](...)` // // Returns a `reference` to the `i`th element of the inlined vector. reference operator[](size_type i) { ABSL_HARDENING_ASSERT(i < size()); return data()[i]; } // Overload of `InlinedVector::operator[](...)` that returns a // `const_reference` to the `i`th element of the inlined vector. const_reference operator[](size_type i) const { ABSL_HARDENING_ASSERT(i < size()); return data()[i]; } // `InlinedVector::at(...)` // // Returns a `reference` to the `i`th element of the inlined vector. // // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, // in both debug and non-debug builds, `std::out_of_range` will be thrown. reference at(size_type i) { if (ABSL_PREDICT_FALSE(i >= size())) { base_internal::ThrowStdOutOfRange( "`InlinedVector::at(size_type)` failed bounds check"); } return data()[i]; } // Overload of `InlinedVector::at(...)` that returns a `const_reference` to // the `i`th element of the inlined vector. // // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`, // in both debug and non-debug builds, `std::out_of_range` will be thrown. const_reference at(size_type i) const { if (ABSL_PREDICT_FALSE(i >= size())) { base_internal::ThrowStdOutOfRange( "`InlinedVector::at(size_type) const` failed bounds check"); } return data()[i]; } // `InlinedVector::front()` // // Returns a `reference` to the first element of the inlined vector. reference front() { ABSL_HARDENING_ASSERT(!empty()); return data()[0]; } // Overload of `InlinedVector::front()` that returns a `const_reference` to // the first element of the inlined vector. const_reference front() const { ABSL_HARDENING_ASSERT(!empty()); return data()[0]; } // `InlinedVector::back()` // // Returns a `reference` to the last element of the inlined vector. reference back() { ABSL_HARDENING_ASSERT(!empty()); return data()[size() - 1]; } // Overload of `InlinedVector::back()` that returns a `const_reference` to the // last element of the inlined vector. const_reference back() const { ABSL_HARDENING_ASSERT(!empty()); return data()[size() - 1]; } // `InlinedVector::begin()` // // Returns an `iterator` to the beginning of the inlined vector. iterator begin() noexcept { return data(); } // Overload of `InlinedVector::begin()` that returns a `const_iterator` to // the beginning of the inlined vector. const_iterator begin() const noexcept { return data(); } // `InlinedVector::end()` // // Returns an `iterator` to the end of the inlined vector. iterator end() noexcept { return data() + size(); } // Overload of `InlinedVector::end()` that returns a `const_iterator` to the // end of the inlined vector. const_iterator end() const noexcept { return data() + size(); } // `InlinedVector::cbegin()` // // Returns a `const_iterator` to the beginning of the inlined vector. const_iterator cbegin() const noexcept { return begin(); } // `InlinedVector::cend()` // // Returns a `const_iterator` to the end of the inlined vector. const_iterator cend() const noexcept { return end(); } // `InlinedVector::rbegin()` // // Returns a `reverse_iterator` from the end of the inlined vector. reverse_iterator rbegin() noexcept { return reverse_iterator(end()); } // Overload of `InlinedVector::rbegin()` that returns a // `const_reverse_iterator` from the end of the inlined vector. const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(end()); } // `InlinedVector::rend()` // // Returns a `reverse_iterator` from the beginning of the inlined vector. reverse_iterator rend() noexcept { return reverse_iterator(begin()); } // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator` // from the beginning of the inlined vector. const_reverse_iterator rend() const noexcept { return const_reverse_iterator(begin()); } // `InlinedVector::crbegin()` // // Returns a `const_reverse_iterator` from the end of the inlined vector. const_reverse_iterator crbegin() const noexcept { return rbegin(); } // `InlinedVector::crend()` // // Returns a `const_reverse_iterator` from the beginning of the inlined // vector. const_reverse_iterator crend() const noexcept { return rend(); } // `InlinedVector::get_allocator()` // // Returns a copy of the inlined vector's allocator. allocator_type get_allocator() const { return storage_.GetAllocator(); } // --------------------------------------------------------------------------- // InlinedVector Member Mutators // --------------------------------------------------------------------------- // `InlinedVector::operator=(...)` // // Replaces the elements of the inlined vector with copies of the elements of // `list`. InlinedVector& operator=(std::initializer_list list) { assign(list.begin(), list.end()); return *this; } // Overload of `InlinedVector::operator=(...)` that replaces the elements of // the inlined vector with copies of the elements of `other`. InlinedVector& operator=(const InlinedVector& other) { if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { const_pointer other_data = other.data(); assign(other_data, other_data + other.size()); } return *this; } // Overload of `InlinedVector::operator=(...)` that moves the elements of // `other` into the inlined vector. // // NOTE: as a result of calling this overload, `other` is left in a valid but // unspecified state. InlinedVector& operator=(InlinedVector&& other) { if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) { inlined_vector_internal::DestroyAdapter::DestroyElements( storage_.GetAllocator(), data(), size()); storage_.DeallocateIfAllocated(); storage_.MemcpyFrom(other.storage_); other.storage_.SetInlinedSize(0); } else { storage_.Assign(IteratorValueAdapter>( MoveIterator(other.storage_.GetInlinedData())), other.size()); } } return *this; } // `InlinedVector::assign(...)` // // Replaces the contents of the inlined vector with `n` copies of `v`. void assign(size_type n, const_reference v) { storage_.Assign(CopyValueAdapter(std::addressof(v)), n); } // Overload of `InlinedVector::assign(...)` that replaces the contents of the // inlined vector with copies of the elements of `list`. void assign(std::initializer_list list) { assign(list.begin(), list.end()); } // Overload of `InlinedVector::assign(...)` to replace the contents of the // inlined vector with the range [`first`, `last`). // // NOTE: this overload is for iterators that are "forward" category or better. template = 0> void assign(ForwardIterator first, ForwardIterator last) { storage_.Assign(IteratorValueAdapter(first), static_cast(std::distance(first, last))); } // Overload of `InlinedVector::assign(...)` to replace the contents of the // inlined vector with the range [`first`, `last`). // // NOTE: this overload is for iterators that are "input" category. template = 0> void assign(InputIterator first, InputIterator last) { size_type i = 0; for (; i < size() && first != last; ++i, static_cast(++first)) { data()[i] = *first; } erase(data() + i, data() + size()); std::copy(first, last, std::back_inserter(*this)); } // `InlinedVector::resize(...)` // // Resizes the inlined vector to contain `n` elements. // // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n` // is larger than `size()`, new elements are value-initialized. void resize(size_type n) { ABSL_HARDENING_ASSERT(n <= max_size()); storage_.Resize(DefaultValueAdapter(), n); } // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to // contain `n` elements. // // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n` // is larger than `size()`, new elements are copied-constructed from `v`. void resize(size_type n, const_reference v) { ABSL_HARDENING_ASSERT(n <= max_size()); storage_.Resize(CopyValueAdapter(std::addressof(v)), n); } // `InlinedVector::insert(...)` // // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly // inserted element. iterator insert(const_iterator pos, const_reference v) { return emplace(pos, v); } // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using // move semantics, returning an `iterator` to the newly inserted element. iterator insert(const_iterator pos, value_type&& v) { return emplace(pos, std::move(v)); } // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies // of `v` starting at `pos`, returning an `iterator` pointing to the first of // the newly inserted elements. iterator insert(const_iterator pos, size_type n, const_reference v) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos <= end()); if (ABSL_PREDICT_TRUE(n != 0)) { value_type dealias = v; // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102329#c2 // It appears that GCC thinks that since `pos` is a const pointer and may // point to uninitialized memory at this point, a warning should be // issued. But `pos` is actually only used to compute an array index to // write to. #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" #endif return storage_.Insert(pos, CopyValueAdapter(std::addressof(dealias)), n); #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop #endif } else { return const_cast(pos); } } // Overload of `InlinedVector::insert(...)` that inserts copies of the // elements of `list` starting at `pos`, returning an `iterator` pointing to // the first of the newly inserted elements. iterator insert(const_iterator pos, std::initializer_list list) { return insert(pos, list.begin(), list.end()); } // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, // `last`) starting at `pos`, returning an `iterator` pointing to the first // of the newly inserted elements. // // NOTE: this overload is for iterators that are "forward" category or better. template = 0> iterator insert(const_iterator pos, ForwardIterator first, ForwardIterator last) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos <= end()); if (ABSL_PREDICT_TRUE(first != last)) { return storage_.Insert(pos, IteratorValueAdapter(first), std::distance(first, last)); } else { return const_cast(pos); } } // Overload of `InlinedVector::insert(...)` that inserts the range [`first`, // `last`) starting at `pos`, returning an `iterator` pointing to the first // of the newly inserted elements. // // NOTE: this overload is for iterators that are "input" category. template = 0> iterator insert(const_iterator pos, InputIterator first, InputIterator last) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos <= end()); size_type index = std::distance(cbegin(), pos); for (size_type i = index; first != last; ++i, static_cast(++first)) { insert(data() + i, *first); } return iterator(data() + index); } // `InlinedVector::emplace(...)` // // Constructs and inserts an element using `args...` in the inlined vector at // `pos`, returning an `iterator` pointing to the newly emplaced element. template iterator emplace(const_iterator pos, Args&&... args) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos <= end()); value_type dealias(std::forward(args)...); return storage_.Insert(pos, IteratorValueAdapter>( MoveIterator(std::addressof(dealias))), 1); } // `InlinedVector::emplace_back(...)` // // Constructs and inserts an element using `args...` in the inlined vector at // `end()`, returning a `reference` to the newly emplaced element. template reference emplace_back(Args&&... args) { return storage_.EmplaceBack(std::forward(args)...); } // `InlinedVector::push_back(...)` // // Inserts a copy of `v` in the inlined vector at `end()`. void push_back(const_reference v) { static_cast(emplace_back(v)); } // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()` // using move semantics. void push_back(value_type&& v) { static_cast(emplace_back(std::move(v))); } // `InlinedVector::pop_back()` // // Destroys the element at `back()`, reducing the size by `1`. void pop_back() noexcept { ABSL_HARDENING_ASSERT(!empty()); AllocatorTraits::destroy(storage_.GetAllocator(), data() + (size() - 1)); storage_.SubtractSize(1); } // `InlinedVector::erase(...)` // // Erases the element at `pos`, returning an `iterator` pointing to where the // erased element was located. // // NOTE: may return `end()`, which is not dereferencable. iterator erase(const_iterator pos) { ABSL_HARDENING_ASSERT(pos >= begin()); ABSL_HARDENING_ASSERT(pos < end()); return storage_.Erase(pos, pos + 1); } // Overload of `InlinedVector::erase(...)` that erases every element in the // range [`from`, `to`), returning an `iterator` pointing to where the first // erased element was located. // // NOTE: may return `end()`, which is not dereferencable. iterator erase(const_iterator from, const_iterator to) { ABSL_HARDENING_ASSERT(from >= begin()); ABSL_HARDENING_ASSERT(from <= to); ABSL_HARDENING_ASSERT(to <= end()); if (ABSL_PREDICT_TRUE(from != to)) { return storage_.Erase(from, to); } else { return const_cast(from); } } // `InlinedVector::clear()` // // Destroys all elements in the inlined vector, setting the size to `0` and // deallocating any held memory. void clear() noexcept { inlined_vector_internal::DestroyAdapter::DestroyElements( storage_.GetAllocator(), data(), size()); storage_.DeallocateIfAllocated(); storage_.SetInlinedSize(0); } // `InlinedVector::reserve(...)` // // Ensures that there is enough room for at least `n` elements. void reserve(size_type n) { storage_.Reserve(n); } // `InlinedVector::shrink_to_fit()` // // Attempts to reduce memory usage by moving elements to (or keeping elements // in) the smallest available buffer sufficient for containing `size()` // elements. // // If `size()` is sufficiently small, the elements will be moved into (or kept // in) the inlined space. void shrink_to_fit() { if (storage_.GetIsAllocated()) { storage_.ShrinkToFit(); } } // `InlinedVector::swap(...)` // // Swaps the contents of the inlined vector with `other`. void swap(InlinedVector& other) { if (ABSL_PREDICT_TRUE(this != std::addressof(other))) { storage_.Swap(std::addressof(other.storage_)); } } private: template friend H AbslHashValue(H h, const absl::InlinedVector& a); Storage storage_; }; // ----------------------------------------------------------------------------- // InlinedVector Non-Member Functions // ----------------------------------------------------------------------------- // `swap(...)` // // Swaps the contents of two inlined vectors. template void swap(absl::InlinedVector& a, absl::InlinedVector& b) noexcept(noexcept(a.swap(b))) { a.swap(b); } // `operator==(...)` // // Tests for value-equality of two inlined vectors. template bool operator==(const absl::InlinedVector& a, const absl::InlinedVector& b) { auto a_data = a.data(); auto b_data = b.data(); return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size()); } // `operator!=(...)` // // Tests for value-inequality of two inlined vectors. template bool operator!=(const absl::InlinedVector& a, const absl::InlinedVector& b) { return !(a == b); } // `operator<(...)` // // Tests whether the value of an inlined vector is less than the value of // another inlined vector using a lexicographical comparison algorithm. template bool operator<(const absl::InlinedVector& a, const absl::InlinedVector& b) { auto a_data = a.data(); auto b_data = b.data(); return std::lexicographical_compare(a_data, a_data + a.size(), b_data, b_data + b.size()); } // `operator>(...)` // // Tests whether the value of an inlined vector is greater than the value of // another inlined vector using a lexicographical comparison algorithm. template bool operator>(const absl::InlinedVector& a, const absl::InlinedVector& b) { return b < a; } // `operator<=(...)` // // Tests whether the value of an inlined vector is less than or equal to the // value of another inlined vector using a lexicographical comparison algorithm. template bool operator<=(const absl::InlinedVector& a, const absl::InlinedVector& b) { return !(b < a); } // `operator>=(...)` // // Tests whether the value of an inlined vector is greater than or equal to the // value of another inlined vector using a lexicographical comparison algorithm. template bool operator>=(const absl::InlinedVector& a, const absl::InlinedVector& b) { return !(a < b); } // `AbslHashValue(...)` // // Provides `absl::Hash` support for `absl::InlinedVector`. It is uncommon to // call this directly. template H AbslHashValue(H h, const absl::InlinedVector& a) { auto size = a.size(); return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size); } ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INLINED_VECTOR_H_ abseil-20220623.1/absl/container/inlined_vector_benchmark.cc000066400000000000000000000621121430371345100236050ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include "benchmark/benchmark.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/inlined_vector.h" #include "absl/strings/str_cat.h" namespace { void BM_InlinedVectorFill(benchmark::State& state) { const int len = state.range(0); absl::InlinedVector v; v.reserve(len); for (auto _ : state) { v.resize(0); // Use resize(0) as InlinedVector releases storage on clear(). for (int i = 0; i < len; ++i) { v.push_back(i); } benchmark::DoNotOptimize(v); } } BENCHMARK(BM_InlinedVectorFill)->Range(1, 256); void BM_InlinedVectorFillRange(benchmark::State& state) { const int len = state.range(0); const std::vector src(len, len); absl::InlinedVector v; v.reserve(len); for (auto _ : state) { benchmark::DoNotOptimize(src); v.assign(src.begin(), src.end()); benchmark::DoNotOptimize(v); } } BENCHMARK(BM_InlinedVectorFillRange)->Range(1, 256); void BM_StdVectorFill(benchmark::State& state) { const int len = state.range(0); std::vector v; v.reserve(len); for (auto _ : state) { v.clear(); for (int i = 0; i < len; ++i) { v.push_back(i); } benchmark::DoNotOptimize(v); } } BENCHMARK(BM_StdVectorFill)->Range(1, 256); // The purpose of the next two benchmarks is to verify that // absl::InlinedVector is efficient when moving is more efficent than // copying. To do so, we use strings that are larger than the short // string optimization. bool StringRepresentedInline(std::string s) { const char* chars = s.data(); std::string s1 = std::move(s); return s1.data() != chars; } int GetNonShortStringOptimizationSize() { for (int i = 24; i <= 192; i *= 2) { if (!StringRepresentedInline(std::string(i, 'A'))) { return i; } } ABSL_RAW_LOG( FATAL, "Failed to find a string larger than the short string optimization"); return -1; } void BM_InlinedVectorFillString(benchmark::State& state) { const int len = state.range(0); const int no_sso = GetNonShortStringOptimizationSize(); std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'), std::string(no_sso, 'C'), std::string(no_sso, 'D')}; for (auto _ : state) { absl::InlinedVector v; for (int i = 0; i < len; i++) { v.push_back(strings[i & 3]); } } state.SetItemsProcessed(static_cast(state.iterations()) * len); } BENCHMARK(BM_InlinedVectorFillString)->Range(0, 1024); void BM_StdVectorFillString(benchmark::State& state) { const int len = state.range(0); const int no_sso = GetNonShortStringOptimizationSize(); std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'), std::string(no_sso, 'C'), std::string(no_sso, 'D')}; for (auto _ : state) { std::vector v; for (int i = 0; i < len; i++) { v.push_back(strings[i & 3]); } } state.SetItemsProcessed(static_cast(state.iterations()) * len); } BENCHMARK(BM_StdVectorFillString)->Range(0, 1024); struct Buffer { // some arbitrary structure for benchmarking. char* base; int length; int capacity; void* user_data; }; void BM_InlinedVectorAssignments(benchmark::State& state) { const int len = state.range(0); using BufferVec = absl::InlinedVector; BufferVec src; src.resize(len); BufferVec dst; for (auto _ : state) { benchmark::DoNotOptimize(dst); benchmark::DoNotOptimize(src); dst = src; } } BENCHMARK(BM_InlinedVectorAssignments) ->Arg(0) ->Arg(1) ->Arg(2) ->Arg(3) ->Arg(4) ->Arg(20); void BM_CreateFromContainer(benchmark::State& state) { for (auto _ : state) { absl::InlinedVector src{1, 2, 3}; benchmark::DoNotOptimize(src); absl::InlinedVector dst(std::move(src)); benchmark::DoNotOptimize(dst); } } BENCHMARK(BM_CreateFromContainer); struct LargeCopyableOnly { LargeCopyableOnly() : d(1024, 17) {} LargeCopyableOnly(const LargeCopyableOnly& o) = default; LargeCopyableOnly& operator=(const LargeCopyableOnly& o) = default; std::vector d; }; struct LargeCopyableSwappable { LargeCopyableSwappable() : d(1024, 17) {} LargeCopyableSwappable(const LargeCopyableSwappable& o) = default; LargeCopyableSwappable& operator=(LargeCopyableSwappable o) { using std::swap; swap(*this, o); return *this; } friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) { using std::swap; swap(a.d, b.d); } std::vector d; }; struct LargeCopyableMovable { LargeCopyableMovable() : d(1024, 17) {} // Use implicitly defined copy and move. std::vector d; }; struct LargeCopyableMovableSwappable { LargeCopyableMovableSwappable() : d(1024, 17) {} LargeCopyableMovableSwappable(const LargeCopyableMovableSwappable& o) = default; LargeCopyableMovableSwappable(LargeCopyableMovableSwappable&& o) = default; LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable o) { using std::swap; swap(*this, o); return *this; } LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable&& o) = default; friend void swap(LargeCopyableMovableSwappable& a, LargeCopyableMovableSwappable& b) { using std::swap; swap(a.d, b.d); } std::vector d; }; template void BM_SwapElements(benchmark::State& state) { const int len = state.range(0); using Vec = absl::InlinedVector; Vec a(len); Vec b; for (auto _ : state) { using std::swap; benchmark::DoNotOptimize(a); benchmark::DoNotOptimize(b); swap(a, b); } } BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableOnly)->Range(0, 1024); BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableSwappable)->Range(0, 1024); BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovable)->Range(0, 1024); BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovableSwappable) ->Range(0, 1024); // The following benchmark is meant to track the efficiency of the vector size // as a function of stored type via the benchmark label. It is not meant to // output useful sizeof operator performance. The loop is a dummy operation // to fulfill the requirement of running the benchmark. template void BM_Sizeof(benchmark::State& state) { int size = 0; for (auto _ : state) { VecType vec; size = sizeof(vec); } state.SetLabel(absl::StrCat("sz=", size)); } BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector); void BM_InlinedVectorIndexInlined(benchmark::State& state) { absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v[4]); } } BENCHMARK(BM_InlinedVectorIndexInlined); void BM_InlinedVectorIndexExternal(benchmark::State& state) { absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v[4]); } } BENCHMARK(BM_InlinedVectorIndexExternal); void BM_StdVectorIndex(benchmark::State& state) { std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v[4]); } } BENCHMARK(BM_StdVectorIndex); void BM_InlinedVectorDataInlined(benchmark::State& state) { absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v.data()); } } BENCHMARK(BM_InlinedVectorDataInlined); void BM_InlinedVectorDataExternal(benchmark::State& state) { absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v.data()); } state.SetItemsProcessed(16 * static_cast(state.iterations())); } BENCHMARK(BM_InlinedVectorDataExternal); void BM_StdVectorData(benchmark::State& state) { std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v.data()); } state.SetItemsProcessed(16 * static_cast(state.iterations())); } BENCHMARK(BM_StdVectorData); void BM_InlinedVectorSizeInlined(benchmark::State& state) { absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v.size()); } } BENCHMARK(BM_InlinedVectorSizeInlined); void BM_InlinedVectorSizeExternal(benchmark::State& state) { absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v.size()); } } BENCHMARK(BM_InlinedVectorSizeExternal); void BM_StdVectorSize(benchmark::State& state) { std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v.size()); } } BENCHMARK(BM_StdVectorSize); void BM_InlinedVectorEmptyInlined(benchmark::State& state) { absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v.empty()); } } BENCHMARK(BM_InlinedVectorEmptyInlined); void BM_InlinedVectorEmptyExternal(benchmark::State& state) { absl::InlinedVector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v.empty()); } } BENCHMARK(BM_InlinedVectorEmptyExternal); void BM_StdVectorEmpty(benchmark::State& state) { std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; for (auto _ : state) { benchmark::DoNotOptimize(v); benchmark::DoNotOptimize(v.empty()); } } BENCHMARK(BM_StdVectorEmpty); constexpr size_t kInlinedCapacity = 4; constexpr size_t kLargeSize = kInlinedCapacity * 2; constexpr size_t kSmallSize = kInlinedCapacity / 2; constexpr size_t kBatchSize = 100; #define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize) #define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T) \ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize) template using InlVec = absl::InlinedVector; struct TrivialType { size_t val; }; class NontrivialType { public: ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() { benchmark::DoNotOptimize(*this); } ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other) : val_(other.val_) { benchmark::DoNotOptimize(*this); } ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=( const NontrivialType& other) { val_ = other.val_; benchmark::DoNotOptimize(*this); return *this; } ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept { benchmark::DoNotOptimize(*this); } private: size_t val_; }; template void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec, TestVecFn test_vec) { std::array, kBatchSize> vector_batch{}; while (state.KeepRunningBatch(kBatchSize)) { // Prepare batch state.PauseTiming(); for (size_t i = 0; i < kBatchSize; ++i) { prepare_vec(vector_batch.data() + i, i); } benchmark::DoNotOptimize(vector_batch); state.ResumeTiming(); // Test batch for (size_t i = 0; i < kBatchSize; ++i) { test_vec(vector_batch.data() + i, i); } } } template void BM_ConstructFromSize(benchmark::State& state) { using VecT = InlVec; auto size = ToSize; BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, /* test_vec = */ [&](void* ptr, size_t) { benchmark::DoNotOptimize(size); ::new (ptr) VecT(size); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType); template void BM_ConstructFromSizeRef(benchmark::State& state) { using VecT = InlVec; auto size = ToSize; auto ref = T(); BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, /* test_vec = */ [&](void* ptr, size_t) { benchmark::DoNotOptimize(size); benchmark::DoNotOptimize(ref); ::new (ptr) VecT(size, ref); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType); template void BM_ConstructFromRange(benchmark::State& state) { using VecT = InlVec; std::array arr{}; BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, /* test_vec = */ [&](void* ptr, size_t) { benchmark::DoNotOptimize(arr); ::new (ptr) VecT(arr.begin(), arr.end()); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType); template void BM_ConstructFromCopy(benchmark::State& state) { using VecT = InlVec; VecT other_vec(ToSize); BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->~VecT(); }, /* test_vec = */ [&](void* ptr, size_t) { benchmark::DoNotOptimize(other_vec); ::new (ptr) VecT(other_vec); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType); template void BM_ConstructFromMove(benchmark::State& state) { using VecT = InlVec; std::array vector_batch{}; BatchedBenchmark( state, /* prepare_vec = */ [&](InlVec* vec, size_t i) { vector_batch[i].clear(); vector_batch[i].resize(ToSize); vec->~VecT(); }, /* test_vec = */ [&](void* ptr, size_t i) { benchmark::DoNotOptimize(vector_batch[i]); ::new (ptr) VecT(std::move(vector_batch[i])); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType); // Measure cost of copy-constructor+destructor. void BM_CopyTrivial(benchmark::State& state) { const int n = state.range(0); InlVec src(n); for (auto s : state) { InlVec copy(src); benchmark::DoNotOptimize(copy); } } BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize); // Measure cost of copy-constructor+destructor. void BM_CopyNonTrivial(benchmark::State& state) { const int n = state.range(0); InlVec> src(n); for (auto s : state) { InlVec> copy(src); benchmark::DoNotOptimize(copy); } } BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize); template void BM_AssignSizeRef(benchmark::State& state) { auto size = ToSize; auto ref = T(); BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, /* test_vec = */ [&](InlVec* vec, size_t) { benchmark::DoNotOptimize(size); benchmark::DoNotOptimize(ref); vec->assign(size, ref); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType); template void BM_AssignRange(benchmark::State& state) { std::array arr{}; BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, /* test_vec = */ [&](InlVec* vec, size_t) { benchmark::DoNotOptimize(arr); vec->assign(arr.begin(), arr.end()); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType); template void BM_AssignFromCopy(benchmark::State& state) { InlVec other_vec(ToSize); BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, /* test_vec = */ [&](InlVec* vec, size_t) { benchmark::DoNotOptimize(other_vec); *vec = other_vec; }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType); template void BM_AssignFromMove(benchmark::State& state) { using VecT = InlVec; std::array vector_batch{}; BatchedBenchmark( state, /* prepare_vec = */ [&](InlVec* vec, size_t i) { vector_batch[i].clear(); vector_batch[i].resize(ToSize); vec->resize(FromSize); }, /* test_vec = */ [&](InlVec* vec, size_t i) { benchmark::DoNotOptimize(vector_batch[i]); *vec = std::move(vector_batch[i]); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType); template void BM_ResizeSize(benchmark::State& state) { BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(FromSize); }, /* test_vec = */ [](InlVec* vec, size_t) { vec->resize(ToSize); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType); template void BM_ResizeSizeRef(benchmark::State& state) { auto t = T(); BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(FromSize); }, /* test_vec = */ [&](InlVec* vec, size_t) { benchmark::DoNotOptimize(t); vec->resize(ToSize, t); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType); template void BM_InsertSizeRef(benchmark::State& state) { auto t = T(); BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(FromSize); }, /* test_vec = */ [&](InlVec* vec, size_t) { benchmark::DoNotOptimize(t); auto* pos = vec->data() + (vec->size() / 2); vec->insert(pos, t); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType); template void BM_InsertRange(benchmark::State& state) { InlVec other_vec(ToSize); BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(FromSize); }, /* test_vec = */ [&](InlVec* vec, size_t) { benchmark::DoNotOptimize(other_vec); auto* pos = vec->data() + (vec->size() / 2); vec->insert(pos, other_vec.begin(), other_vec.end()); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType); template void BM_EmplaceBack(benchmark::State& state) { BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(FromSize); }, /* test_vec = */ [](InlVec* vec, size_t) { vec->emplace_back(); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType); template void BM_PopBack(benchmark::State& state) { BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(FromSize); }, /* test_vec = */ [](InlVec* vec, size_t) { vec->pop_back(); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType); template void BM_EraseOne(benchmark::State& state) { BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(FromSize); }, /* test_vec = */ [](InlVec* vec, size_t) { auto* pos = vec->data() + (vec->size() / 2); vec->erase(pos); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType); template void BM_EraseRange(benchmark::State& state) { BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(FromSize); }, /* test_vec = */ [](InlVec* vec, size_t) { auto* pos = vec->data() + (vec->size() / 2); vec->erase(pos, pos + 1); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType); template void BM_Clear(benchmark::State& state) { BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->resize(FromSize); }, /* test_vec = */ [](InlVec* vec, size_t) { vec->clear(); }); } ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType); ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType); template void BM_Reserve(benchmark::State& state) { BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(FromSize); }, /* test_vec = */ [](InlVec* vec, size_t) { vec->reserve(ToCapacity); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType); template void BM_ShrinkToFit(benchmark::State& state) { BatchedBenchmark( state, /* prepare_vec = */ [](InlVec* vec, size_t) { vec->clear(); vec->resize(ToCapacity); vec->reserve(FromCapacity); }, /* test_vec = */ [](InlVec* vec, size_t) { vec->shrink_to_fit(); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType); template void BM_Swap(benchmark::State& state) { using VecT = InlVec; std::array vector_batch{}; BatchedBenchmark( state, /* prepare_vec = */ [&](InlVec* vec, size_t i) { vector_batch[i].clear(); vector_batch[i].resize(ToSize); vec->resize(FromSize); }, /* test_vec = */ [&](InlVec* vec, size_t i) { using std::swap; benchmark::DoNotOptimize(vector_batch[i]); swap(*vec, vector_batch[i]); }); } ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType); ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType); } // namespace abseil-20220623.1/absl/container/inlined_vector_exception_safety_test.cc000066400000000000000000000417561430371345100262760ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/inlined_vector.h" #include "absl/base/config.h" #if defined(ABSL_HAVE_EXCEPTIONS) #include #include #include #include #include #include "gtest/gtest.h" #include "absl/base/internal/exception_safety_testing.h" namespace { constexpr size_t kInlinedCapacity = 4; constexpr size_t kLargeSize = kInlinedCapacity * 2; constexpr size_t kSmallSize = kInlinedCapacity / 2; using Thrower = testing::ThrowingValue<>; using MovableThrower = testing::ThrowingValue; using ThrowAlloc = testing::ThrowingAllocator; using ThrowerVec = absl::InlinedVector; using MovableThrowerVec = absl::InlinedVector; using ThrowAllocThrowerVec = absl::InlinedVector; using ThrowAllocMovableThrowerVec = absl::InlinedVector; // In GCC, if an element of a `std::initializer_list` throws during construction // the elements that were constructed before it are not destroyed. This causes // incorrect exception safety test failures. Thus, `testing::nothrow_ctor` is // required. See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66139 #define ABSL_INTERNAL_MAKE_INIT_LIST(T, N) \ (N > kInlinedCapacity \ ? std::initializer_list{T(0, testing::nothrow_ctor), \ T(1, testing::nothrow_ctor), \ T(2, testing::nothrow_ctor), \ T(3, testing::nothrow_ctor), \ T(4, testing::nothrow_ctor), \ T(5, testing::nothrow_ctor), \ T(6, testing::nothrow_ctor), \ T(7, testing::nothrow_ctor)} \ \ : std::initializer_list{T(0, testing::nothrow_ctor), \ T(1, testing::nothrow_ctor)}) static_assert(kLargeSize == 8, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)"); static_assert(kSmallSize == 2, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)"); template class TestParams { public: using VecT = TheVecT; constexpr static size_t GetSizeAt(size_t i) { return kSizes[1 + i]; } private: constexpr static size_t kSizes[1 + sizeof...(TheSizes)] = {1, TheSizes...}; }; using NoSizeTestParams = ::testing::Types, TestParams, TestParams, TestParams>; using OneSizeTestParams = ::testing::Types, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams>; using TwoSizeTestParams = ::testing::Types< TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams, TestParams>; template struct NoSizeTest : ::testing::Test {}; TYPED_TEST_SUITE(NoSizeTest, NoSizeTestParams); template struct OneSizeTest : ::testing::Test {}; TYPED_TEST_SUITE(OneSizeTest, OneSizeTestParams); template struct TwoSizeTest : ::testing::Test {}; TYPED_TEST_SUITE(TwoSizeTest, TwoSizeTestParams); template bool InlinedVectorInvariants(VecT* vec) { if (*vec != *vec) return false; if (vec->size() > vec->capacity()) return false; if (vec->size() > vec->max_size()) return false; if (vec->capacity() > vec->max_size()) return false; if (vec->data() != std::addressof(vec->at(0))) return false; if (vec->data() != vec->begin()) return false; if (*vec->data() != *vec->begin()) return false; if (vec->begin() > vec->end()) return false; if ((vec->end() - vec->begin()) != vec->size()) return false; if (std::distance(vec->begin(), vec->end()) != vec->size()) return false; return true; } // Function that always returns false is correct, but refactoring is required // for clarity. It's needed to express that, as a contract, certain operations // should not throw at all. Execution of this function means an exception was // thrown and thus the test should fail. // TODO(johnsoncj): Add `testing::NoThrowGuarantee` to the framework template bool NoThrowGuarantee(VecT* /* vec */) { return false; } TYPED_TEST(NoSizeTest, DefaultConstructor) { using VecT = typename TypeParam::VecT; using allocator_type = typename VecT::allocator_type; testing::TestThrowingCtor(); testing::TestThrowingCtor(allocator_type{}); } TYPED_TEST(OneSizeTest, SizeConstructor) { using VecT = typename TypeParam::VecT; using allocator_type = typename VecT::allocator_type; constexpr static auto size = TypeParam::GetSizeAt(0); testing::TestThrowingCtor(size); testing::TestThrowingCtor(size, allocator_type{}); } TYPED_TEST(OneSizeTest, SizeRefConstructor) { using VecT = typename TypeParam::VecT; using value_type = typename VecT::value_type; using allocator_type = typename VecT::allocator_type; constexpr static auto size = TypeParam::GetSizeAt(0); testing::TestThrowingCtor(size, value_type{}); testing::TestThrowingCtor(size, value_type{}, allocator_type{}); } TYPED_TEST(OneSizeTest, InitializerListConstructor) { using VecT = typename TypeParam::VecT; using value_type = typename VecT::value_type; using allocator_type = typename VecT::allocator_type; constexpr static auto size = TypeParam::GetSizeAt(0); testing::TestThrowingCtor( ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size)); testing::TestThrowingCtor( ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size), allocator_type{}); } TYPED_TEST(OneSizeTest, RangeConstructor) { using VecT = typename TypeParam::VecT; using value_type = typename VecT::value_type; using allocator_type = typename VecT::allocator_type; constexpr static auto size = TypeParam::GetSizeAt(0); std::array arr{}; testing::TestThrowingCtor(arr.begin(), arr.end()); testing::TestThrowingCtor(arr.begin(), arr.end(), allocator_type{}); } TYPED_TEST(OneSizeTest, CopyConstructor) { using VecT = typename TypeParam::VecT; using allocator_type = typename VecT::allocator_type; constexpr static auto size = TypeParam::GetSizeAt(0); VecT other_vec{size}; testing::TestThrowingCtor(other_vec); testing::TestThrowingCtor(other_vec, allocator_type{}); } TYPED_TEST(OneSizeTest, MoveConstructor) { using VecT = typename TypeParam::VecT; using allocator_type = typename VecT::allocator_type; constexpr static auto size = TypeParam::GetSizeAt(0); if (!absl::allocator_is_nothrow::value) { testing::TestThrowingCtor(VecT{size}); testing::TestThrowingCtor(VecT{size}, allocator_type{}); } } TYPED_TEST(TwoSizeTest, Assign) { using VecT = typename TypeParam::VecT; using value_type = typename VecT::value_type; constexpr static auto from_size = TypeParam::GetSizeAt(0); constexpr static auto to_size = TypeParam::GetSizeAt(1); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{from_size}) .WithContracts(InlinedVectorInvariants); EXPECT_TRUE(tester.Test([](VecT* vec) { *vec = ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size); })); EXPECT_TRUE(tester.Test([](VecT* vec) { VecT other_vec{to_size}; *vec = other_vec; })); EXPECT_TRUE(tester.Test([](VecT* vec) { VecT other_vec{to_size}; *vec = std::move(other_vec); })); EXPECT_TRUE(tester.Test([](VecT* vec) { value_type val{}; vec->assign(to_size, val); })); EXPECT_TRUE(tester.Test([](VecT* vec) { vec->assign(ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size)); })); EXPECT_TRUE(tester.Test([](VecT* vec) { std::array arr{}; vec->assign(arr.begin(), arr.end()); })); } TYPED_TEST(TwoSizeTest, Resize) { using VecT = typename TypeParam::VecT; using value_type = typename VecT::value_type; constexpr static auto from_size = TypeParam::GetSizeAt(0); constexpr static auto to_size = TypeParam::GetSizeAt(1); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{from_size}) .WithContracts(InlinedVectorInvariants, testing::strong_guarantee); EXPECT_TRUE(tester.Test([](VecT* vec) { vec->resize(to_size); // })); EXPECT_TRUE(tester.Test([](VecT* vec) { vec->resize(to_size, value_type{}); // })); } TYPED_TEST(OneSizeTest, Insert) { using VecT = typename TypeParam::VecT; using value_type = typename VecT::value_type; constexpr static auto from_size = TypeParam::GetSizeAt(0); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{from_size}) .WithContracts(InlinedVectorInvariants); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin(); vec->insert(it, value_type{}); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() / 2); vec->insert(it, value_type{}); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->end(); vec->insert(it, value_type{}); })); } TYPED_TEST(TwoSizeTest, Insert) { using VecT = typename TypeParam::VecT; using value_type = typename VecT::value_type; constexpr static auto from_size = TypeParam::GetSizeAt(0); constexpr static auto count = TypeParam::GetSizeAt(1); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{from_size}) .WithContracts(InlinedVectorInvariants); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin(); vec->insert(it, count, value_type{}); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() / 2); vec->insert(it, count, value_type{}); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->end(); vec->insert(it, count, value_type{}); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin(); vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count)); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() / 2); vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count)); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->end(); vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count)); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin(); std::array arr{}; vec->insert(it, arr.begin(), arr.end()); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() / 2); std::array arr{}; vec->insert(it, arr.begin(), arr.end()); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->end(); std::array arr{}; vec->insert(it, arr.begin(), arr.end()); })); } TYPED_TEST(OneSizeTest, EmplaceBack) { using VecT = typename TypeParam::VecT; constexpr static auto size = TypeParam::GetSizeAt(0); // For testing calls to `emplace_back(...)` that reallocate. VecT full_vec{size}; full_vec.resize(full_vec.capacity()); // For testing calls to `emplace_back(...)` that don't reallocate. VecT nonfull_vec{size}; nonfull_vec.reserve(size + 1); auto tester = testing::MakeExceptionSafetyTester().WithContracts( InlinedVectorInvariants); EXPECT_TRUE(tester.WithInitialValue(nonfull_vec).Test([](VecT* vec) { vec->emplace_back(); })); EXPECT_TRUE(tester.WithInitialValue(full_vec).Test( [](VecT* vec) { vec->emplace_back(); })); } TYPED_TEST(OneSizeTest, PopBack) { using VecT = typename TypeParam::VecT; constexpr static auto size = TypeParam::GetSizeAt(0); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{size}) .WithContracts(NoThrowGuarantee); EXPECT_TRUE(tester.Test([](VecT* vec) { vec->pop_back(); // })); } TYPED_TEST(OneSizeTest, Erase) { using VecT = typename TypeParam::VecT; constexpr static auto size = TypeParam::GetSizeAt(0); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{size}) .WithContracts(InlinedVectorInvariants); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin(); vec->erase(it); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() / 2); vec->erase(it); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() - 1); vec->erase(it); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin(); vec->erase(it, it); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() / 2); vec->erase(it, it); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() - 1); vec->erase(it, it); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin(); vec->erase(it, it + 1); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() / 2); vec->erase(it, it + 1); })); EXPECT_TRUE(tester.Test([](VecT* vec) { auto it = vec->begin() + (vec->size() - 1); vec->erase(it, it + 1); })); } TYPED_TEST(OneSizeTest, Clear) { using VecT = typename TypeParam::VecT; constexpr static auto size = TypeParam::GetSizeAt(0); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{size}) .WithContracts(NoThrowGuarantee); EXPECT_TRUE(tester.Test([](VecT* vec) { vec->clear(); // })); } TYPED_TEST(TwoSizeTest, Reserve) { using VecT = typename TypeParam::VecT; constexpr static auto from_size = TypeParam::GetSizeAt(0); constexpr static auto to_capacity = TypeParam::GetSizeAt(1); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{from_size}) .WithContracts(InlinedVectorInvariants); EXPECT_TRUE(tester.Test([](VecT* vec) { vec->reserve(to_capacity); })); } TYPED_TEST(OneSizeTest, ShrinkToFit) { using VecT = typename TypeParam::VecT; constexpr static auto size = TypeParam::GetSizeAt(0); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{size}) .WithContracts(InlinedVectorInvariants); EXPECT_TRUE(tester.Test([](VecT* vec) { vec->shrink_to_fit(); // })); } TYPED_TEST(TwoSizeTest, Swap) { using VecT = typename TypeParam::VecT; constexpr static auto from_size = TypeParam::GetSizeAt(0); constexpr static auto to_size = TypeParam::GetSizeAt(1); auto tester = testing::MakeExceptionSafetyTester() .WithInitialValue(VecT{from_size}) .WithContracts(InlinedVectorInvariants); EXPECT_TRUE(tester.Test([](VecT* vec) { VecT other_vec{to_size}; vec->swap(other_vec); })); EXPECT_TRUE(tester.Test([](VecT* vec) { using std::swap; VecT other_vec{to_size}; swap(*vec, other_vec); })); } } // namespace #endif // defined(ABSL_HAVE_EXCEPTIONS) abseil-20220623.1/absl/container/inlined_vector_test.cc000066400000000000000000001551031430371345100226350ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/inlined_vector.h" #include #include #include #include #include #include #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/internal/exception_testing.h" #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/base/options.h" #include "absl/container/internal/counting_allocator.h" #include "absl/container/internal/test_instance_tracker.h" #include "absl/hash/hash_testing.h" #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" namespace { using absl::container_internal::CountingAllocator; using absl::test_internal::CopyableMovableInstance; using absl::test_internal::CopyableOnlyInstance; using absl::test_internal::InstanceTracker; using testing::AllOf; using testing::Each; using testing::ElementsAre; using testing::ElementsAreArray; using testing::Eq; using testing::Gt; using testing::PrintToString; using IntVec = absl::InlinedVector; MATCHER_P(SizeIs, n, "") { return testing::ExplainMatchResult(n, arg.size(), result_listener); } MATCHER_P(CapacityIs, n, "") { return testing::ExplainMatchResult(n, arg.capacity(), result_listener); } MATCHER_P(ValueIs, e, "") { return testing::ExplainMatchResult(e, arg.value(), result_listener); } // TODO(bsamwel): Add support for movable-only types. // Test fixture for typed tests on BaseCountedInstance derived classes, see // test_instance_tracker.h. template class InstanceTest : public ::testing::Test {}; TYPED_TEST_SUITE_P(InstanceTest); // A simple reference counted class to make sure that the proper elements are // destroyed in the erase(begin, end) test. class RefCounted { public: RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); } RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) { Ref(); } ~RefCounted() { Unref(); count_ = nullptr; } friend void swap(RefCounted& a, RefCounted& b) { using std::swap; swap(a.value_, b.value_); swap(a.count_, b.count_); } RefCounted& operator=(RefCounted v) { using std::swap; swap(*this, v); return *this; } void Ref() const { ABSL_RAW_CHECK(count_ != nullptr, ""); ++(*count_); } void Unref() const { --(*count_); ABSL_RAW_CHECK(*count_ >= 0, ""); } int value_; int* count_; }; using RefCountedVec = absl::InlinedVector; // A class with a vtable pointer class Dynamic { public: virtual ~Dynamic() {} }; using DynamicVec = absl::InlinedVector; // Append 0..len-1 to *v template static void Fill(Container* v, int len, int offset = 0) { for (int i = 0; i < len; i++) { v->push_back(i + offset); } } static IntVec Fill(int len, int offset = 0) { IntVec v; Fill(&v, len, offset); return v; } TEST(IntVec, SimpleOps) { for (int len = 0; len < 20; len++) { IntVec v; const IntVec& cv = v; // const alias Fill(&v, len); EXPECT_EQ(len, v.size()); EXPECT_LE(len, v.capacity()); for (int i = 0; i < len; i++) { EXPECT_EQ(i, v[i]); EXPECT_EQ(i, v.at(i)); } EXPECT_EQ(v.begin(), v.data()); EXPECT_EQ(cv.begin(), cv.data()); int counter = 0; for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) { EXPECT_EQ(counter, *iter); counter++; } EXPECT_EQ(counter, len); counter = 0; for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) { EXPECT_EQ(counter, *iter); counter++; } EXPECT_EQ(counter, len); counter = 0; for (IntVec::const_iterator iter = v.cbegin(); iter != v.cend(); ++iter) { EXPECT_EQ(counter, *iter); counter++; } EXPECT_EQ(counter, len); if (len > 0) { EXPECT_EQ(0, v.front()); EXPECT_EQ(len - 1, v.back()); v.pop_back(); EXPECT_EQ(len - 1, v.size()); for (int i = 0; i < v.size(); ++i) { EXPECT_EQ(i, v[i]); EXPECT_EQ(i, v.at(i)); } } } } TEST(IntVec, PopBackNoOverflow) { IntVec v = {1}; v.pop_back(); EXPECT_EQ(v.size(), 0); } TEST(IntVec, AtThrows) { IntVec v = {1, 2, 3}; EXPECT_EQ(v.at(2), 3); ABSL_BASE_INTERNAL_EXPECT_FAIL(v.at(3), std::out_of_range, "failed bounds check"); } TEST(IntVec, ReverseIterator) { for (int len = 0; len < 20; len++) { IntVec v; Fill(&v, len); int counter = len; for (IntVec::reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) { counter--; EXPECT_EQ(counter, *iter); } EXPECT_EQ(counter, 0); counter = len; for (IntVec::const_reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) { counter--; EXPECT_EQ(counter, *iter); } EXPECT_EQ(counter, 0); counter = len; for (IntVec::const_reverse_iterator iter = v.crbegin(); iter != v.crend(); ++iter) { counter--; EXPECT_EQ(counter, *iter); } EXPECT_EQ(counter, 0); } } TEST(IntVec, Erase) { for (int len = 1; len < 20; len++) { for (int i = 0; i < len; ++i) { IntVec v; Fill(&v, len); v.erase(v.begin() + i); EXPECT_EQ(len - 1, v.size()); for (int j = 0; j < i; ++j) { EXPECT_EQ(j, v[j]); } for (int j = i; j < len - 1; ++j) { EXPECT_EQ(j + 1, v[j]); } } } } TEST(IntVec, Hardened) { IntVec v; Fill(&v, 10); EXPECT_EQ(v[9], 9); #if !defined(NDEBUG) || ABSL_OPTION_HARDENED EXPECT_DEATH_IF_SUPPORTED(v[10], ""); EXPECT_DEATH_IF_SUPPORTED(v[-1], ""); #endif } // At the end of this test loop, the elements between [erase_begin, erase_end) // should have reference counts == 0, and all others elements should have // reference counts == 1. TEST(RefCountedVec, EraseBeginEnd) { for (int len = 1; len < 20; ++len) { for (int erase_begin = 0; erase_begin < len; ++erase_begin) { for (int erase_end = erase_begin; erase_end <= len; ++erase_end) { std::vector counts(len, 0); RefCountedVec v; for (int i = 0; i < len; ++i) { v.push_back(RefCounted(i, &counts[i])); } int erase_len = erase_end - erase_begin; v.erase(v.begin() + erase_begin, v.begin() + erase_end); EXPECT_EQ(len - erase_len, v.size()); // Check the elements before the first element erased. for (int i = 0; i < erase_begin; ++i) { EXPECT_EQ(i, v[i].value_); } // Check the elements after the first element erased. for (int i = erase_begin; i < v.size(); ++i) { EXPECT_EQ(i + erase_len, v[i].value_); } // Check that the elements at the beginning are preserved. for (int i = 0; i < erase_begin; ++i) { EXPECT_EQ(1, counts[i]); } // Check that the erased elements are destroyed for (int i = erase_begin; i < erase_end; ++i) { EXPECT_EQ(0, counts[i]); } // Check that the elements at the end are preserved. for (int i = erase_end; i < len; ++i) { EXPECT_EQ(1, counts[i]); } } } } } struct NoDefaultCtor { explicit NoDefaultCtor(int) {} }; struct NoCopy { NoCopy() {} NoCopy(const NoCopy&) = delete; }; struct NoAssign { NoAssign() {} NoAssign& operator=(const NoAssign&) = delete; }; struct MoveOnly { MoveOnly() {} MoveOnly(MoveOnly&&) = default; MoveOnly& operator=(MoveOnly&&) = default; }; TEST(InlinedVectorTest, NoDefaultCtor) { absl::InlinedVector v(10, NoDefaultCtor(2)); (void)v; } TEST(InlinedVectorTest, NoCopy) { absl::InlinedVector v(10); (void)v; } TEST(InlinedVectorTest, NoAssign) { absl::InlinedVector v(10); (void)v; } TEST(InlinedVectorTest, MoveOnly) { absl::InlinedVector v; v.push_back(MoveOnly{}); v.push_back(MoveOnly{}); v.push_back(MoveOnly{}); v.erase(v.begin()); v.push_back(MoveOnly{}); v.erase(v.begin(), v.begin() + 1); v.insert(v.begin(), MoveOnly{}); v.emplace(v.begin()); v.emplace(v.begin(), MoveOnly{}); } TEST(InlinedVectorTest, Noexcept) { EXPECT_TRUE(std::is_nothrow_move_constructible::value); EXPECT_TRUE((std::is_nothrow_move_constructible< absl::InlinedVector>::value)); struct MoveCanThrow { MoveCanThrow(MoveCanThrow&&) {} }; EXPECT_EQ(absl::default_allocator_is_nothrow::value, (std::is_nothrow_move_constructible< absl::InlinedVector>::value)); } TEST(InlinedVectorTest, EmplaceBack) { absl::InlinedVector, 1> v; auto& inlined_element = v.emplace_back("answer", 42); EXPECT_EQ(&inlined_element, &v[0]); EXPECT_EQ(inlined_element.first, "answer"); EXPECT_EQ(inlined_element.second, 42); auto& allocated_element = v.emplace_back("taxicab", 1729); EXPECT_EQ(&allocated_element, &v[1]); EXPECT_EQ(allocated_element.first, "taxicab"); EXPECT_EQ(allocated_element.second, 1729); } TEST(InlinedVectorTest, ShrinkToFitGrowingVector) { absl::InlinedVector, 1> v; v.shrink_to_fit(); EXPECT_EQ(v.capacity(), 1); v.emplace_back("answer", 42); v.shrink_to_fit(); EXPECT_EQ(v.capacity(), 1); v.emplace_back("taxicab", 1729); EXPECT_GE(v.capacity(), 2); v.shrink_to_fit(); EXPECT_EQ(v.capacity(), 2); v.reserve(100); EXPECT_GE(v.capacity(), 100); v.shrink_to_fit(); EXPECT_EQ(v.capacity(), 2); } TEST(InlinedVectorTest, ShrinkToFitEdgeCases) { { absl::InlinedVector, 1> v; v.emplace_back("answer", 42); v.emplace_back("taxicab", 1729); EXPECT_GE(v.capacity(), 2); v.pop_back(); v.shrink_to_fit(); EXPECT_EQ(v.capacity(), 1); EXPECT_EQ(v[0].first, "answer"); EXPECT_EQ(v[0].second, 42); } { absl::InlinedVector v(100); v.resize(0); v.shrink_to_fit(); EXPECT_EQ(v.capacity(), 2); // inlined capacity } { absl::InlinedVector v(100); v.resize(1); v.shrink_to_fit(); EXPECT_EQ(v.capacity(), 2); // inlined capacity } { absl::InlinedVector v(100); v.resize(2); v.shrink_to_fit(); EXPECT_EQ(v.capacity(), 2); } { absl::InlinedVector v(100); v.resize(3); v.shrink_to_fit(); EXPECT_EQ(v.capacity(), 3); } } TEST(IntVec, Insert) { for (int len = 0; len < 20; len++) { for (int pos = 0; pos <= len; pos++) { { // Single element std::vector std_v; Fill(&std_v, len); IntVec v; Fill(&v, len); std_v.insert(std_v.begin() + pos, 9999); IntVec::iterator it = v.insert(v.cbegin() + pos, 9999); EXPECT_THAT(v, ElementsAreArray(std_v)); EXPECT_EQ(it, v.cbegin() + pos); } { // n elements std::vector std_v; Fill(&std_v, len); IntVec v; Fill(&v, len); IntVec::size_type n = 5; std_v.insert(std_v.begin() + pos, n, 9999); IntVec::iterator it = v.insert(v.cbegin() + pos, n, 9999); EXPECT_THAT(v, ElementsAreArray(std_v)); EXPECT_EQ(it, v.cbegin() + pos); } { // Iterator range (random access iterator) std::vector std_v; Fill(&std_v, len); IntVec v; Fill(&v, len); const std::vector input = {9999, 8888, 7777}; std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend()); IntVec::iterator it = v.insert(v.cbegin() + pos, input.cbegin(), input.cend()); EXPECT_THAT(v, ElementsAreArray(std_v)); EXPECT_EQ(it, v.cbegin() + pos); } { // Iterator range (forward iterator) std::vector std_v; Fill(&std_v, len); IntVec v; Fill(&v, len); const std::forward_list input = {9999, 8888, 7777}; std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend()); IntVec::iterator it = v.insert(v.cbegin() + pos, input.cbegin(), input.cend()); EXPECT_THAT(v, ElementsAreArray(std_v)); EXPECT_EQ(it, v.cbegin() + pos); } { // Iterator range (input iterator) std::vector std_v; Fill(&std_v, len); IntVec v; Fill(&v, len); std_v.insert(std_v.begin() + pos, {9999, 8888, 7777}); std::istringstream input("9999 8888 7777"); IntVec::iterator it = v.insert(v.cbegin() + pos, std::istream_iterator(input), std::istream_iterator()); EXPECT_THAT(v, ElementsAreArray(std_v)); EXPECT_EQ(it, v.cbegin() + pos); } { // Initializer list std::vector std_v; Fill(&std_v, len); IntVec v; Fill(&v, len); std_v.insert(std_v.begin() + pos, {9999, 8888}); IntVec::iterator it = v.insert(v.cbegin() + pos, {9999, 8888}); EXPECT_THAT(v, ElementsAreArray(std_v)); EXPECT_EQ(it, v.cbegin() + pos); } } } } TEST(RefCountedVec, InsertConstructorDestructor) { // Make sure the proper construction/destruction happen during insert // operations. for (int len = 0; len < 20; len++) { SCOPED_TRACE(len); for (int pos = 0; pos <= len; pos++) { SCOPED_TRACE(pos); std::vector counts(len, 0); int inserted_count = 0; RefCountedVec v; for (int i = 0; i < len; ++i) { SCOPED_TRACE(i); v.push_back(RefCounted(i, &counts[i])); } EXPECT_THAT(counts, Each(Eq(1))); RefCounted insert_element(9999, &inserted_count); EXPECT_EQ(1, inserted_count); v.insert(v.begin() + pos, insert_element); EXPECT_EQ(2, inserted_count); // Check that the elements at the end are preserved. EXPECT_THAT(counts, Each(Eq(1))); EXPECT_EQ(2, inserted_count); } } } TEST(IntVec, Resize) { for (int len = 0; len < 20; len++) { IntVec v; Fill(&v, len); // Try resizing up and down by k elements static const int kResizeElem = 1000000; for (int k = 0; k < 10; k++) { // Enlarging resize v.resize(len + k, kResizeElem); EXPECT_EQ(len + k, v.size()); EXPECT_LE(len + k, v.capacity()); for (int i = 0; i < len + k; i++) { if (i < len) { EXPECT_EQ(i, v[i]); } else { EXPECT_EQ(kResizeElem, v[i]); } } // Shrinking resize v.resize(len, kResizeElem); EXPECT_EQ(len, v.size()); EXPECT_LE(len, v.capacity()); for (int i = 0; i < len; i++) { EXPECT_EQ(i, v[i]); } } } } TEST(IntVec, InitWithLength) { for (int len = 0; len < 20; len++) { IntVec v(len, 7); EXPECT_EQ(len, v.size()); EXPECT_LE(len, v.capacity()); for (int i = 0; i < len; i++) { EXPECT_EQ(7, v[i]); } } } TEST(IntVec, CopyConstructorAndAssignment) { for (int len = 0; len < 20; len++) { IntVec v; Fill(&v, len); EXPECT_EQ(len, v.size()); EXPECT_LE(len, v.capacity()); IntVec v2(v); EXPECT_TRUE(v == v2) << PrintToString(v) << PrintToString(v2); for (int start_len = 0; start_len < 20; start_len++) { IntVec v3; Fill(&v3, start_len, 99); // Add dummy elements that should go away v3 = v; EXPECT_TRUE(v == v3) << PrintToString(v) << PrintToString(v3); } } } TEST(IntVec, AliasingCopyAssignment) { for (int len = 0; len < 20; ++len) { IntVec original; Fill(&original, len); IntVec dup = original; dup = *&dup; EXPECT_EQ(dup, original); } } TEST(IntVec, MoveConstructorAndAssignment) { for (int len = 0; len < 20; len++) { IntVec v_in; const int inlined_capacity = v_in.capacity(); Fill(&v_in, len); EXPECT_EQ(len, v_in.size()); EXPECT_LE(len, v_in.capacity()); { IntVec v_temp(v_in); auto* old_data = v_temp.data(); IntVec v_out(std::move(v_temp)); EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out); if (v_in.size() > inlined_capacity) { // Allocation is moved as a whole, data stays in place. EXPECT_TRUE(v_out.data() == old_data); } else { EXPECT_FALSE(v_out.data() == old_data); } } for (int start_len = 0; start_len < 20; start_len++) { IntVec v_out; Fill(&v_out, start_len, 99); // Add dummy elements that should go away IntVec v_temp(v_in); auto* old_data = v_temp.data(); v_out = std::move(v_temp); EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out); if (v_in.size() > inlined_capacity) { // Allocation is moved as a whole, data stays in place. EXPECT_TRUE(v_out.data() == old_data); } else { EXPECT_FALSE(v_out.data() == old_data); } } } } class NotTriviallyDestructible { public: NotTriviallyDestructible() : p_(new int(1)) {} explicit NotTriviallyDestructible(int i) : p_(new int(i)) {} NotTriviallyDestructible(const NotTriviallyDestructible& other) : p_(new int(*other.p_)) {} NotTriviallyDestructible& operator=(const NotTriviallyDestructible& other) { p_ = absl::make_unique(*other.p_); return *this; } bool operator==(const NotTriviallyDestructible& other) const { return *p_ == *other.p_; } private: std::unique_ptr p_; }; TEST(AliasingTest, Emplace) { for (int i = 2; i < 20; ++i) { absl::InlinedVector vec; for (int j = 0; j < i; ++j) { vec.push_back(NotTriviallyDestructible(j)); } vec.emplace(vec.begin(), vec[0]); EXPECT_EQ(vec[0], vec[1]); vec.emplace(vec.begin() + i / 2, vec[i / 2]); EXPECT_EQ(vec[i / 2], vec[i / 2 + 1]); vec.emplace(vec.end() - 1, vec.back()); EXPECT_EQ(vec[vec.size() - 2], vec.back()); } } TEST(AliasingTest, InsertWithCount) { for (int i = 1; i < 20; ++i) { absl::InlinedVector vec; for (int j = 0; j < i; ++j) { vec.push_back(NotTriviallyDestructible(j)); } for (int n = 0; n < 5; ++n) { // We use back where we can because it's guaranteed to become invalidated vec.insert(vec.begin(), n, vec.back()); auto b = vec.begin(); EXPECT_TRUE( std::all_of(b, b + n, [&vec](const NotTriviallyDestructible& x) { return x == vec.back(); })); auto m_idx = vec.size() / 2; vec.insert(vec.begin() + m_idx, n, vec.back()); auto m = vec.begin() + m_idx; EXPECT_TRUE( std::all_of(m, m + n, [&vec](const NotTriviallyDestructible& x) { return x == vec.back(); })); // We want distinct values so the equality test is meaningful, // vec[vec.size() - 1] is also almost always invalidated. auto old_e = vec.size() - 1; auto val = vec[old_e]; vec.insert(vec.end(), n, vec[old_e]); auto e = vec.begin() + old_e; EXPECT_TRUE(std::all_of( e, e + n, [&val](const NotTriviallyDestructible& x) { return x == val; })); } } } TEST(OverheadTest, Storage) { // Check for size overhead. // In particular, ensure that std::allocator doesn't cost anything to store. // The union should be absorbing some of the allocation bookkeeping overhead // in the larger vectors, leaving only the size_ field as overhead. struct T { void* val; }; size_t expected_overhead = sizeof(T); EXPECT_EQ((2 * expected_overhead), sizeof(absl::InlinedVector) - sizeof(T[1])); EXPECT_EQ(expected_overhead, sizeof(absl::InlinedVector) - sizeof(T[2])); EXPECT_EQ(expected_overhead, sizeof(absl::InlinedVector) - sizeof(T[3])); EXPECT_EQ(expected_overhead, sizeof(absl::InlinedVector) - sizeof(T[4])); EXPECT_EQ(expected_overhead, sizeof(absl::InlinedVector) - sizeof(T[5])); EXPECT_EQ(expected_overhead, sizeof(absl::InlinedVector) - sizeof(T[6])); EXPECT_EQ(expected_overhead, sizeof(absl::InlinedVector) - sizeof(T[7])); EXPECT_EQ(expected_overhead, sizeof(absl::InlinedVector) - sizeof(T[8])); } TEST(IntVec, Clear) { for (int len = 0; len < 20; len++) { SCOPED_TRACE(len); IntVec v; Fill(&v, len); v.clear(); EXPECT_EQ(0, v.size()); EXPECT_EQ(v.begin(), v.end()); } } TEST(IntVec, Reserve) { for (int len = 0; len < 20; len++) { IntVec v; Fill(&v, len); for (int newlen = 0; newlen < 100; newlen++) { const int* start_rep = v.data(); v.reserve(newlen); const int* final_rep = v.data(); if (newlen <= len) { EXPECT_EQ(start_rep, final_rep); } EXPECT_LE(newlen, v.capacity()); // Filling up to newlen should not change rep while (v.size() < newlen) { v.push_back(0); } EXPECT_EQ(final_rep, v.data()); } } } TEST(StringVec, SelfRefPushBack) { std::vector std_v; absl::InlinedVector v; const std::string s = "A quite long string to ensure heap."; std_v.push_back(s); v.push_back(s); for (int i = 0; i < 20; ++i) { EXPECT_THAT(v, ElementsAreArray(std_v)); v.push_back(v.back()); std_v.push_back(std_v.back()); } EXPECT_THAT(v, ElementsAreArray(std_v)); } TEST(StringVec, SelfRefPushBackWithMove) { std::vector std_v; absl::InlinedVector v; const std::string s = "A quite long string to ensure heap."; std_v.push_back(s); v.push_back(s); for (int i = 0; i < 20; ++i) { EXPECT_EQ(v.back(), std_v.back()); v.push_back(std::move(v.back())); std_v.push_back(std::move(std_v.back())); } EXPECT_EQ(v.back(), std_v.back()); } TEST(StringVec, SelfMove) { const std::string s = "A quite long string to ensure heap."; for (int len = 0; len < 20; len++) { SCOPED_TRACE(len); absl::InlinedVector v; for (int i = 0; i < len; ++i) { SCOPED_TRACE(i); v.push_back(s); } // Indirection necessary to avoid compiler warning. v = std::move(*(&v)); // Ensure that the inlined vector is still in a valid state by copying it. // We don't expect specific contents since a self-move results in an // unspecified valid state. std::vector copy(v.begin(), v.end()); } } TEST(IntVec, Swap) { for (int l1 = 0; l1 < 20; l1++) { SCOPED_TRACE(l1); for (int l2 = 0; l2 < 20; l2++) { SCOPED_TRACE(l2); IntVec a = Fill(l1, 0); IntVec b = Fill(l2, 100); { using std::swap; swap(a, b); } EXPECT_EQ(l1, b.size()); EXPECT_EQ(l2, a.size()); for (int i = 0; i < l1; i++) { SCOPED_TRACE(i); EXPECT_EQ(i, b[i]); } for (int i = 0; i < l2; i++) { SCOPED_TRACE(i); EXPECT_EQ(100 + i, a[i]); } } } } TYPED_TEST_P(InstanceTest, Swap) { using Instance = TypeParam; using InstanceVec = absl::InlinedVector; for (int l1 = 0; l1 < 20; l1++) { SCOPED_TRACE(l1); for (int l2 = 0; l2 < 20; l2++) { SCOPED_TRACE(l2); InstanceTracker tracker; InstanceVec a, b; const size_t inlined_capacity = a.capacity(); auto min_len = std::min(l1, l2); auto max_len = std::max(l1, l2); for (int i = 0; i < l1; i++) a.push_back(Instance(i)); for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i)); EXPECT_EQ(tracker.instances(), l1 + l2); tracker.ResetCopiesMovesSwaps(); { using std::swap; swap(a, b); } EXPECT_EQ(tracker.instances(), l1 + l2); if (a.size() > inlined_capacity && b.size() > inlined_capacity) { EXPECT_EQ(tracker.swaps(), 0); // Allocations are swapped. EXPECT_EQ(tracker.moves(), 0); } else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) { EXPECT_EQ(tracker.swaps(), min_len); EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), max_len - min_len); } else { // One is allocated and the other isn't. The allocation is transferred // without copying elements, and the inlined instances are copied/moved. EXPECT_EQ(tracker.swaps(), 0); EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()), min_len); } EXPECT_EQ(l1, b.size()); EXPECT_EQ(l2, a.size()); for (int i = 0; i < l1; i++) { EXPECT_EQ(i, b[i].value()); } for (int i = 0; i < l2; i++) { EXPECT_EQ(100 + i, a[i].value()); } } } } TEST(IntVec, EqualAndNotEqual) { IntVec a, b; EXPECT_TRUE(a == b); EXPECT_FALSE(a != b); a.push_back(3); EXPECT_FALSE(a == b); EXPECT_TRUE(a != b); b.push_back(3); EXPECT_TRUE(a == b); EXPECT_FALSE(a != b); b.push_back(7); EXPECT_FALSE(a == b); EXPECT_TRUE(a != b); a.push_back(6); EXPECT_FALSE(a == b); EXPECT_TRUE(a != b); a.clear(); b.clear(); for (int i = 0; i < 100; i++) { a.push_back(i); b.push_back(i); EXPECT_TRUE(a == b); EXPECT_FALSE(a != b); b[i] = b[i] + 1; EXPECT_FALSE(a == b); EXPECT_TRUE(a != b); b[i] = b[i] - 1; // Back to before EXPECT_TRUE(a == b); EXPECT_FALSE(a != b); } } TEST(IntVec, RelationalOps) { IntVec a, b; EXPECT_FALSE(a < b); EXPECT_FALSE(b < a); EXPECT_FALSE(a > b); EXPECT_FALSE(b > a); EXPECT_TRUE(a <= b); EXPECT_TRUE(b <= a); EXPECT_TRUE(a >= b); EXPECT_TRUE(b >= a); b.push_back(3); EXPECT_TRUE(a < b); EXPECT_FALSE(b < a); EXPECT_FALSE(a > b); EXPECT_TRUE(b > a); EXPECT_TRUE(a <= b); EXPECT_FALSE(b <= a); EXPECT_FALSE(a >= b); EXPECT_TRUE(b >= a); } TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) { using Instance = TypeParam; using InstanceVec = absl::InlinedVector; InstanceTracker tracker; for (int len = 0; len < 20; len++) { SCOPED_TRACE(len); tracker.ResetCopiesMovesSwaps(); InstanceVec v; const size_t inlined_capacity = v.capacity(); for (int i = 0; i < len; i++) { v.push_back(Instance(i)); } EXPECT_EQ(tracker.instances(), len); EXPECT_GE(tracker.copies() + tracker.moves(), len); // More due to reallocation. tracker.ResetCopiesMovesSwaps(); // Enlarging resize() must construct some objects tracker.ResetCopiesMovesSwaps(); v.resize(len + 10, Instance(100)); EXPECT_EQ(tracker.instances(), len + 10); if (len <= inlined_capacity && len + 10 > inlined_capacity) { EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + len); } else { // Only specify a minimum number of copies + moves. We don't want to // depend on the reallocation policy here. EXPECT_GE(tracker.copies() + tracker.moves(), 10); // More due to reallocation. } // Shrinking resize() must destroy some objects tracker.ResetCopiesMovesSwaps(); v.resize(len, Instance(100)); EXPECT_EQ(tracker.instances(), len); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 0); // reserve() must not increase the number of initialized objects SCOPED_TRACE("reserve"); v.reserve(len + 1000); EXPECT_EQ(tracker.instances(), len); EXPECT_EQ(tracker.copies() + tracker.moves(), len); // pop_back() and erase() must destroy one object if (len > 0) { tracker.ResetCopiesMovesSwaps(); v.pop_back(); EXPECT_EQ(tracker.instances(), len - 1); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 0); if (!v.empty()) { tracker.ResetCopiesMovesSwaps(); v.erase(v.begin()); EXPECT_EQ(tracker.instances(), len - 2); EXPECT_EQ(tracker.copies() + tracker.moves(), len - 2); } } tracker.ResetCopiesMovesSwaps(); int instances_before_empty_erase = tracker.instances(); v.erase(v.begin(), v.begin()); EXPECT_EQ(tracker.instances(), instances_before_empty_erase); EXPECT_EQ(tracker.copies() + tracker.moves(), 0); } } TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnCopyConstruction) { using Instance = TypeParam; using InstanceVec = absl::InlinedVector; InstanceTracker tracker; for (int len = 0; len < 20; len++) { SCOPED_TRACE(len); tracker.ResetCopiesMovesSwaps(); InstanceVec v; for (int i = 0; i < len; i++) { v.push_back(Instance(i)); } EXPECT_EQ(tracker.instances(), len); EXPECT_GE(tracker.copies() + tracker.moves(), len); // More due to reallocation. tracker.ResetCopiesMovesSwaps(); { // Copy constructor should create 'len' more instances. InstanceVec v_copy(v); EXPECT_EQ(tracker.instances(), len + len); EXPECT_EQ(tracker.copies(), len); EXPECT_EQ(tracker.moves(), 0); } EXPECT_EQ(tracker.instances(), len); } } TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveConstruction) { using Instance = TypeParam; using InstanceVec = absl::InlinedVector; InstanceTracker tracker; for (int len = 0; len < 20; len++) { SCOPED_TRACE(len); tracker.ResetCopiesMovesSwaps(); InstanceVec v; const size_t inlined_capacity = v.capacity(); for (int i = 0; i < len; i++) { v.push_back(Instance(i)); } EXPECT_EQ(tracker.instances(), len); EXPECT_GE(tracker.copies() + tracker.moves(), len); // More due to reallocation. tracker.ResetCopiesMovesSwaps(); { InstanceVec v_copy(std::move(v)); if (len > inlined_capacity) { // Allocation is moved as a whole. EXPECT_EQ(tracker.instances(), len); EXPECT_EQ(tracker.live_instances(), len); // Tests an implementation detail, don't rely on this in your code. EXPECT_EQ(v.size(), 0); // NOLINT misc-use-after-move EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 0); } else { EXPECT_EQ(tracker.instances(), len + len); if (Instance::supports_move()) { EXPECT_EQ(tracker.live_instances(), len); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), len); } else { EXPECT_EQ(tracker.live_instances(), len + len); EXPECT_EQ(tracker.copies(), len); EXPECT_EQ(tracker.moves(), 0); } } EXPECT_EQ(tracker.swaps(), 0); } } } TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnAssignment) { using Instance = TypeParam; using InstanceVec = absl::InlinedVector; InstanceTracker tracker; for (int len = 0; len < 20; len++) { SCOPED_TRACE(len); for (int longorshort = 0; longorshort <= 1; ++longorshort) { SCOPED_TRACE(longorshort); tracker.ResetCopiesMovesSwaps(); InstanceVec longer, shorter; for (int i = 0; i < len; i++) { longer.push_back(Instance(i)); shorter.push_back(Instance(i)); } longer.push_back(Instance(len)); EXPECT_EQ(tracker.instances(), len + len + 1); EXPECT_GE(tracker.copies() + tracker.moves(), len + len + 1); // More due to reallocation. tracker.ResetCopiesMovesSwaps(); if (longorshort) { shorter = longer; EXPECT_EQ(tracker.instances(), (len + 1) + (len + 1)); EXPECT_GE(tracker.copies() + tracker.moves(), len + 1); // More due to reallocation. } else { longer = shorter; EXPECT_EQ(tracker.instances(), len + len); EXPECT_EQ(tracker.copies() + tracker.moves(), len); } } } } TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) { using Instance = TypeParam; using InstanceVec = absl::InlinedVector; InstanceTracker tracker; for (int len = 0; len < 20; len++) { SCOPED_TRACE(len); for (int longorshort = 0; longorshort <= 1; ++longorshort) { SCOPED_TRACE(longorshort); tracker.ResetCopiesMovesSwaps(); InstanceVec longer, shorter; const int inlined_capacity = longer.capacity(); for (int i = 0; i < len; i++) { longer.push_back(Instance(i)); shorter.push_back(Instance(i)); } longer.push_back(Instance(len)); EXPECT_EQ(tracker.instances(), len + len + 1); EXPECT_GE(tracker.copies() + tracker.moves(), len + len + 1); // More due to reallocation. tracker.ResetCopiesMovesSwaps(); int src_len; if (longorshort) { src_len = len + 1; shorter = std::move(longer); } else { src_len = len; longer = std::move(shorter); } if (src_len > inlined_capacity) { // Allocation moved as a whole. EXPECT_EQ(tracker.instances(), src_len); EXPECT_EQ(tracker.live_instances(), src_len); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 0); } else { // Elements are all copied. EXPECT_EQ(tracker.instances(), src_len + src_len); if (Instance::supports_move()) { EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), src_len); EXPECT_EQ(tracker.live_instances(), src_len); } else { EXPECT_EQ(tracker.copies(), src_len); EXPECT_EQ(tracker.moves(), 0); EXPECT_EQ(tracker.live_instances(), src_len + src_len); } } EXPECT_EQ(tracker.swaps(), 0); } } } TEST(CountElemAssign, SimpleTypeWithInlineBacking) { for (size_t original_size = 0; original_size <= 5; ++original_size) { SCOPED_TRACE(original_size); // Original contents are [12345, 12345, ...] std::vector original_contents(original_size, 12345); absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(2, 123); EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(123, 123))); if (original_size <= 2) { // If the original had inline backing, it should stay inline. EXPECT_EQ(2, v.capacity()); } } } TEST(CountElemAssign, SimpleTypeWithAllocation) { for (size_t original_size = 0; original_size <= 5; ++original_size) { SCOPED_TRACE(original_size); // Original contents are [12345, 12345, ...] std::vector original_contents(original_size, 12345); absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(3, 123); EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(123, 123, 123))); EXPECT_LE(v.size(), v.capacity()); } } TYPED_TEST_P(InstanceTest, CountElemAssignInlineBacking) { using Instance = TypeParam; for (size_t original_size = 0; original_size <= 5; ++original_size) { SCOPED_TRACE(original_size); // Original contents are [12345, 12345, ...] std::vector original_contents(original_size, Instance(12345)); absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(2, Instance(123)); EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(ValueIs(123), ValueIs(123)))); if (original_size <= 2) { // If the original had inline backing, it should stay inline. EXPECT_EQ(2, v.capacity()); } } } template void InstanceCountElemAssignWithAllocationTest() { for (size_t original_size = 0; original_size <= 5; ++original_size) { SCOPED_TRACE(original_size); // Original contents are [12345, 12345, ...] std::vector original_contents(original_size, Instance(12345)); absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(3, Instance(123)); EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(ValueIs(123), ValueIs(123), ValueIs(123)))); EXPECT_LE(v.size(), v.capacity()); } } TEST(CountElemAssign, WithAllocationCopyableInstance) { InstanceCountElemAssignWithAllocationTest(); } TEST(CountElemAssign, WithAllocationCopyableMovableInstance) { InstanceCountElemAssignWithAllocationTest(); } TEST(RangedConstructor, SimpleType) { std::vector source_v = {4, 5, 6}; // First try to fit in inline backing absl::InlinedVector v(source_v.begin(), source_v.end()); EXPECT_EQ(3, v.size()); EXPECT_EQ(4, v.capacity()); // Indication that we're still on inlined storage EXPECT_EQ(4, v[0]); EXPECT_EQ(5, v[1]); EXPECT_EQ(6, v[2]); // Now, force a re-allocate absl::InlinedVector realloc_v(source_v.begin(), source_v.end()); EXPECT_EQ(3, realloc_v.size()); EXPECT_LT(2, realloc_v.capacity()); EXPECT_EQ(4, realloc_v[0]); EXPECT_EQ(5, realloc_v[1]); EXPECT_EQ(6, realloc_v[2]); } // Test for ranged constructors using Instance as the element type and // SourceContainer as the source container type. template void InstanceRangedConstructorTestForContainer() { InstanceTracker tracker; SourceContainer source_v = {Instance(0), Instance(1)}; tracker.ResetCopiesMovesSwaps(); absl::InlinedVector v(source_v.begin(), source_v.end()); EXPECT_EQ(2, v.size()); EXPECT_LT(1, v.capacity()); EXPECT_EQ(0, v[0].value()); EXPECT_EQ(1, v[1].value()); EXPECT_EQ(tracker.copies(), 2); EXPECT_EQ(tracker.moves(), 0); } template void InstanceRangedConstructorTestWithCapacity() { // Test with const and non-const, random access and non-random-access sources. // TODO(bsamwel): Test with an input iterator source. { SCOPED_TRACE("std::list"); InstanceRangedConstructorTestForContainer, inlined_capacity>(); { SCOPED_TRACE("const std::list"); InstanceRangedConstructorTestForContainer< Instance, const std::list, inlined_capacity>(); } { SCOPED_TRACE("std::vector"); InstanceRangedConstructorTestForContainer, inlined_capacity>(); } { SCOPED_TRACE("const std::vector"); InstanceRangedConstructorTestForContainer< Instance, const std::vector, inlined_capacity>(); } } } TYPED_TEST_P(InstanceTest, RangedConstructor) { using Instance = TypeParam; SCOPED_TRACE("capacity=1"); InstanceRangedConstructorTestWithCapacity(); SCOPED_TRACE("capacity=2"); InstanceRangedConstructorTestWithCapacity(); } TEST(RangedConstructor, ElementsAreConstructed) { std::vector source_v = {"cat", "dog"}; // Force expansion and re-allocation of v. Ensures that when the vector is // expanded that new elements are constructed. absl::InlinedVector v(source_v.begin(), source_v.end()); EXPECT_EQ("cat", v[0]); EXPECT_EQ("dog", v[1]); } TEST(RangedAssign, SimpleType) { // Test for all combinations of original sizes (empty and non-empty inline, // and out of line) and target sizes. for (size_t original_size = 0; original_size <= 5; ++original_size) { SCOPED_TRACE(original_size); // Original contents are [12345, 12345, ...] std::vector original_contents(original_size, 12345); for (size_t target_size = 0; target_size <= 5; ++target_size) { SCOPED_TRACE(target_size); // New contents are [3, 4, ...] std::vector new_contents; for (size_t i = 0; i < target_size; ++i) { new_contents.push_back(i + 3); } absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(new_contents.begin(), new_contents.end()); EXPECT_EQ(new_contents.size(), v.size()); EXPECT_LE(new_contents.size(), v.capacity()); if (target_size <= 3 && original_size <= 3) { // Storage should stay inline when target size is small. EXPECT_EQ(3, v.capacity()); } EXPECT_THAT(v, ElementsAreArray(new_contents)); } } } // Returns true if lhs and rhs have the same value. template static bool InstanceValuesEqual(const Instance& lhs, const Instance& rhs) { return lhs.value() == rhs.value(); } // Test for ranged assign() using Instance as the element type and // SourceContainer as the source container type. template void InstanceRangedAssignTestForContainer() { // Test for all combinations of original sizes (empty and non-empty inline, // and out of line) and target sizes. for (size_t original_size = 0; original_size <= 5; ++original_size) { SCOPED_TRACE(original_size); // Original contents are [12345, 12345, ...] std::vector original_contents(original_size, Instance(12345)); for (size_t target_size = 0; target_size <= 5; ++target_size) { SCOPED_TRACE(target_size); // New contents are [3, 4, ...] // Generate data using a non-const container, because SourceContainer // itself may be const. // TODO(bsamwel): Test with an input iterator. std::vector new_contents_in; for (size_t i = 0; i < target_size; ++i) { new_contents_in.push_back(Instance(i + 3)); } SourceContainer new_contents(new_contents_in.begin(), new_contents_in.end()); absl::InlinedVector v(original_contents.begin(), original_contents.end()); v.assign(new_contents.begin(), new_contents.end()); EXPECT_EQ(new_contents.size(), v.size()); EXPECT_LE(new_contents.size(), v.capacity()); if (target_size <= 3 && original_size <= 3) { // Storage should stay inline when target size is small. EXPECT_EQ(3, v.capacity()); } EXPECT_TRUE(std::equal(v.begin(), v.end(), new_contents.begin(), InstanceValuesEqual)); } } } TYPED_TEST_P(InstanceTest, RangedAssign) { using Instance = TypeParam; // Test with const and non-const, random access and non-random-access sources. // TODO(bsamwel): Test with an input iterator source. SCOPED_TRACE("std::list"); InstanceRangedAssignTestForContainer>(); SCOPED_TRACE("const std::list"); InstanceRangedAssignTestForContainer>(); SCOPED_TRACE("std::vector"); InstanceRangedAssignTestForContainer>(); SCOPED_TRACE("const std::vector"); InstanceRangedAssignTestForContainer>(); } TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) { EXPECT_THAT((absl::InlinedVector{4, 5, 6}), AllOf(SizeIs(3), CapacityIs(4), ElementsAre(4, 5, 6))); } TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) { EXPECT_THAT((absl::InlinedVector{4, 5, 6}), AllOf(SizeIs(3), CapacityIs(Gt(2)), ElementsAre(4, 5, 6))); } TEST(InitializerListConstructor, DisparateTypesInList) { EXPECT_THAT((absl::InlinedVector{-7, 8ULL}), ElementsAre(-7, 8)); EXPECT_THAT((absl::InlinedVector{"foo", std::string("bar")}), ElementsAre("foo", "bar")); } TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) { EXPECT_THAT((absl::InlinedVector{ CopyableMovableInstance(0)}), AllOf(SizeIs(1), CapacityIs(1), ElementsAre(ValueIs(0)))); } TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) { EXPECT_THAT( (absl::InlinedVector{ CopyableMovableInstance(0), CopyableMovableInstance(1)}), AllOf(SizeIs(2), CapacityIs(Gt(1)), ElementsAre(ValueIs(0), ValueIs(1)))); } TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) { for (size_t original_size = 0; original_size <= 4; ++original_size) { SCOPED_TRACE(original_size); absl::InlinedVector v1(original_size, 12345); const size_t original_capacity_v1 = v1.capacity(); v1.assign({3}); EXPECT_THAT( v1, AllOf(SizeIs(1), CapacityIs(original_capacity_v1), ElementsAre(3))); absl::InlinedVector v2(original_size, 12345); const size_t original_capacity_v2 = v2.capacity(); v2 = {3}; EXPECT_THAT( v2, AllOf(SizeIs(1), CapacityIs(original_capacity_v2), ElementsAre(3))); } } TEST(InitializerListAssign, SimpleTypeDoesNotFitInlineBacking) { for (size_t original_size = 0; original_size <= 4; ++original_size) { SCOPED_TRACE(original_size); absl::InlinedVector v1(original_size, 12345); v1.assign({3, 4, 5}); EXPECT_THAT(v1, AllOf(SizeIs(3), ElementsAre(3, 4, 5))); EXPECT_LE(3, v1.capacity()); absl::InlinedVector v2(original_size, 12345); v2 = {3, 4, 5}; EXPECT_THAT(v2, AllOf(SizeIs(3), ElementsAre(3, 4, 5))); EXPECT_LE(3, v2.capacity()); } } TEST(InitializerListAssign, DisparateTypesInList) { absl::InlinedVector v_int1; v_int1.assign({-7, 8ULL}); EXPECT_THAT(v_int1, ElementsAre(-7, 8)); absl::InlinedVector v_int2; v_int2 = {-7, 8ULL}; EXPECT_THAT(v_int2, ElementsAre(-7, 8)); absl::InlinedVector v_string1; v_string1.assign({"foo", std::string("bar")}); EXPECT_THAT(v_string1, ElementsAre("foo", "bar")); absl::InlinedVector v_string2; v_string2 = {"foo", std::string("bar")}; EXPECT_THAT(v_string2, ElementsAre("foo", "bar")); } TYPED_TEST_P(InstanceTest, InitializerListAssign) { using Instance = TypeParam; for (size_t original_size = 0; original_size <= 4; ++original_size) { SCOPED_TRACE(original_size); absl::InlinedVector v(original_size, Instance(12345)); const size_t original_capacity = v.capacity(); v.assign({Instance(3)}); EXPECT_THAT(v, AllOf(SizeIs(1), CapacityIs(original_capacity), ElementsAre(ValueIs(3)))); } for (size_t original_size = 0; original_size <= 4; ++original_size) { SCOPED_TRACE(original_size); absl::InlinedVector v(original_size, Instance(12345)); v.assign({Instance(3), Instance(4), Instance(5)}); EXPECT_THAT( v, AllOf(SizeIs(3), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5)))); EXPECT_LE(3, v.capacity()); } } REGISTER_TYPED_TEST_SUITE_P(InstanceTest, Swap, CountConstructorsDestructors, CountConstructorsDestructorsOnCopyConstruction, CountConstructorsDestructorsOnMoveConstruction, CountConstructorsDestructorsOnAssignment, CountConstructorsDestructorsOnMoveAssignment, CountElemAssignInlineBacking, RangedConstructor, RangedAssign, InitializerListAssign); using InstanceTypes = ::testing::Types; INSTANTIATE_TYPED_TEST_SUITE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes); TEST(DynamicVec, DynamicVecCompiles) { DynamicVec v; (void)v; } TEST(AllocatorSupportTest, Constructors) { using MyAlloc = CountingAllocator; using AllocVec = absl::InlinedVector; const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; int64_t allocated = 0; MyAlloc alloc(&allocated); { AllocVec ABSL_ATTRIBUTE_UNUSED v; } { AllocVec ABSL_ATTRIBUTE_UNUSED v(alloc); } { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); } { AllocVec ABSL_ATTRIBUTE_UNUSED v({1, 2, 3}, alloc); } AllocVec v2; { AllocVec ABSL_ATTRIBUTE_UNUSED v(v2, alloc); } { AllocVec ABSL_ATTRIBUTE_UNUSED v(std::move(v2), alloc); } } TEST(AllocatorSupportTest, CountAllocations) { using MyAlloc = CountingAllocator; using AllocVec = absl::InlinedVector; const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7}; int64_t allocated = 0; MyAlloc alloc(&allocated); { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc); EXPECT_THAT(allocated, 0); } EXPECT_THAT(allocated, 0); { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); EXPECT_THAT(allocated, v.size() * sizeof(int)); } EXPECT_THAT(allocated, 0); { AllocVec v(4, 1, alloc); EXPECT_THAT(allocated, 0); int64_t allocated2 = 0; MyAlloc alloc2(&allocated2); AllocVec v2(v, alloc2); EXPECT_THAT(allocated2, 0); int64_t allocated3 = 0; MyAlloc alloc3(&allocated3); AllocVec v3(std::move(v), alloc3); EXPECT_THAT(allocated3, 0); } EXPECT_THAT(allocated, 0); { AllocVec v(8, 2, alloc); EXPECT_THAT(allocated, v.size() * sizeof(int)); int64_t allocated2 = 0; MyAlloc alloc2(&allocated2); AllocVec v2(v, alloc2); EXPECT_THAT(allocated2, v2.size() * sizeof(int)); int64_t allocated3 = 0; MyAlloc alloc3(&allocated3); AllocVec v3(std::move(v), alloc3); EXPECT_THAT(allocated3, v3.size() * sizeof(int)); } EXPECT_EQ(allocated, 0); { // Test shrink_to_fit deallocations. AllocVec v(8, 2, alloc); EXPECT_EQ(allocated, 8 * sizeof(int)); v.resize(5); EXPECT_EQ(allocated, 8 * sizeof(int)); v.shrink_to_fit(); EXPECT_EQ(allocated, 5 * sizeof(int)); v.resize(4); EXPECT_EQ(allocated, 5 * sizeof(int)); v.shrink_to_fit(); EXPECT_EQ(allocated, 0); } } TEST(AllocatorSupportTest, SwapBothAllocated) { using MyAlloc = CountingAllocator; using AllocVec = absl::InlinedVector; int64_t allocated1 = 0; int64_t allocated2 = 0; { const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7}; const int ia2[] = {0, 1, 2, 3, 4, 5, 6, 7, 8}; MyAlloc a1(&allocated1); MyAlloc a2(&allocated2); AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); EXPECT_LT(v1.capacity(), v2.capacity()); EXPECT_THAT(allocated1, v1.capacity() * sizeof(int)); EXPECT_THAT(allocated2, v2.capacity() * sizeof(int)); v1.swap(v2); EXPECT_THAT(v1, ElementsAreArray(ia2)); EXPECT_THAT(v2, ElementsAreArray(ia1)); EXPECT_THAT(allocated1, v2.capacity() * sizeof(int)); EXPECT_THAT(allocated2, v1.capacity() * sizeof(int)); } EXPECT_THAT(allocated1, 0); EXPECT_THAT(allocated2, 0); } TEST(AllocatorSupportTest, SwapOneAllocated) { using MyAlloc = CountingAllocator; using AllocVec = absl::InlinedVector; int64_t allocated1 = 0; int64_t allocated2 = 0; { const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7}; const int ia2[] = {0, 1, 2, 3}; MyAlloc a1(&allocated1); MyAlloc a2(&allocated2); AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1); AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2); EXPECT_THAT(allocated1, v1.capacity() * sizeof(int)); EXPECT_THAT(allocated2, 0); v1.swap(v2); EXPECT_THAT(v1, ElementsAreArray(ia2)); EXPECT_THAT(v2, ElementsAreArray(ia1)); EXPECT_THAT(allocated1, v2.capacity() * sizeof(int)); EXPECT_THAT(allocated2, 0); EXPECT_TRUE(v2.get_allocator() == a1); EXPECT_TRUE(v1.get_allocator() == a2); } EXPECT_THAT(allocated1, 0); EXPECT_THAT(allocated2, 0); } TEST(AllocatorSupportTest, ScopedAllocatorWorksInlined) { using StdVector = std::vector>; using Alloc = CountingAllocator; using ScopedAlloc = std::scoped_allocator_adaptor; using AllocVec = absl::InlinedVector; int64_t total_allocated_byte_count = 0; AllocVec inlined_case(ScopedAlloc(Alloc(+&total_allocated_byte_count))); // Called only once to remain inlined inlined_case.emplace_back(); int64_t absl_responsible_for_count = total_allocated_byte_count; // MSVC's allocator preemptively allocates in debug mode #if !defined(_MSC_VER) EXPECT_EQ(absl_responsible_for_count, 0); #endif // !defined(_MSC_VER) inlined_case[0].emplace_back(); EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count); inlined_case.clear(); inlined_case.shrink_to_fit(); EXPECT_EQ(total_allocated_byte_count, 0); } TEST(AllocatorSupportTest, ScopedAllocatorWorksAllocated) { using StdVector = std::vector>; using Alloc = CountingAllocator; using ScopedAlloc = std::scoped_allocator_adaptor; using AllocVec = absl::InlinedVector; int64_t total_allocated_byte_count = 0; AllocVec allocated_case(ScopedAlloc(Alloc(+&total_allocated_byte_count))); // Called twice to force into being allocated allocated_case.emplace_back(); allocated_case.emplace_back(); int64_t absl_responsible_for_count = total_allocated_byte_count; EXPECT_GT(absl_responsible_for_count, 0); allocated_case[1].emplace_back(); EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count); allocated_case.clear(); allocated_case.shrink_to_fit(); EXPECT_EQ(total_allocated_byte_count, 0); } TEST(AllocatorSupportTest, SizeAllocConstructor) { constexpr int inlined_size = 4; using Alloc = CountingAllocator; using AllocVec = absl::InlinedVector; { auto len = inlined_size / 2; int64_t allocated = 0; auto v = AllocVec(len, Alloc(&allocated)); // Inline storage used; allocator should not be invoked EXPECT_THAT(allocated, 0); EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); } { auto len = inlined_size * 2; int64_t allocated = 0; auto v = AllocVec(len, Alloc(&allocated)); // Out of line storage used; allocation of 8 elements expected EXPECT_THAT(allocated, len * sizeof(int)); EXPECT_THAT(v, AllOf(SizeIs(len), Each(0))); } } TEST(InlinedVectorTest, MinimumAllocatorCompilesUsingTraits) { using T = int; using A = std::allocator; using ATraits = absl::allocator_traits; struct MinimumAllocator { using value_type = T; value_type* allocate(size_t n) { A a; return ATraits::allocate(a, n); } void deallocate(value_type* p, size_t n) { A a; ATraits::deallocate(a, p, n); } }; absl::InlinedVector vec; vec.emplace_back(); vec.resize(0); } TEST(InlinedVectorTest, AbslHashValueWorks) { using V = absl::InlinedVector; std::vector cases; // Generate a variety of vectors some of these are small enough for the inline // space but are stored out of line. for (int i = 0; i < 10; ++i) { V v; for (int j = 0; j < i; ++j) { v.push_back(j); } cases.push_back(v); v.resize(i % 4); cases.push_back(v); } EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases)); } } // anonymous namespace abseil-20220623.1/absl/container/internal/000077500000000000000000000000001430371345100200725ustar00rootroot00000000000000abseil-20220623.1/absl/container/internal/btree.h000066400000000000000000003213041430371345100213470ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // A btree implementation of the STL set and map interfaces. A btree is smaller // and generally also faster than STL set/map (refer to the benchmarks below). // The red-black tree implementation of STL set/map has an overhead of 3 // pointers (left, right and parent) plus the node color information for each // stored value. So a set consumes 40 bytes for each value stored in // 64-bit mode. This btree implementation stores multiple values on fixed // size nodes (usually 256 bytes) and doesn't store child pointers for leaf // nodes. The result is that a btree_set may use much less memory per // stored value. For the random insertion benchmark in btree_bench.cc, a // btree_set with node-size of 256 uses 5.1 bytes per stored value. // // The packing of multiple values on to each node of a btree has another effect // besides better space utilization: better cache locality due to fewer cache // lines being accessed. Better cache locality translates into faster // operations. // // CAVEATS // // Insertions and deletions on a btree can cause splitting, merging or // rebalancing of btree nodes. And even without these operations, insertions // and deletions on a btree will move values around within a node. In both // cases, the result is that insertions and deletions can invalidate iterators // pointing to values other than the one being inserted/deleted. Therefore, this // container does not provide pointer stability. This is notably different from // STL set/map which takes care to not invalidate iterators on insert/erase // except, of course, for iterators pointing to the value being erased. A // partial workaround when erasing is available: erase() returns an iterator // pointing to the item just after the one that was erased (or end() if none // exists). #ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_ #define ABSL_CONTAINER_INTERNAL_BTREE_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include "absl/base/internal/raw_logging.h" #include "absl/base/macros.h" #include "absl/container/internal/common.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/layout.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/strings/cord.h" #include "absl/strings/string_view.h" #include "absl/types/compare.h" #include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { #ifdef ABSL_BTREE_ENABLE_GENERATIONS #error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set #elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \ defined(ABSL_HAVE_MEMORY_SANITIZER) // When compiled in sanitizer mode, we add generation integers to the nodes and // iterators. When iterators are used, we validate that the container has not // been mutated since the iterator was constructed. #define ABSL_BTREE_ENABLE_GENERATIONS #endif template using compare_result_t = absl::result_of_t; // A helper class that indicates if the Compare parameter is a key-compare-to // comparator. template using btree_is_key_compare_to = std::is_convertible, absl::weak_ordering>; struct StringBtreeDefaultLess { using is_transparent = void; StringBtreeDefaultLess() = default; // Compatibility constructor. StringBtreeDefaultLess(std::less) {} // NOLINT StringBtreeDefaultLess(std::less) {} // NOLINT // Allow converting to std::less for use in key_comp()/value_comp(). explicit operator std::less() const { return {}; } explicit operator std::less() const { return {}; } explicit operator std::less() const { return {}; } absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const { return compare_internal::compare_result_as_ordering(lhs.compare(rhs)); } StringBtreeDefaultLess(std::less) {} // NOLINT absl::weak_ordering operator()(const absl::Cord &lhs, const absl::Cord &rhs) const { return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); } absl::weak_ordering operator()(const absl::Cord &lhs, absl::string_view rhs) const { return compare_internal::compare_result_as_ordering(lhs.Compare(rhs)); } absl::weak_ordering operator()(absl::string_view lhs, const absl::Cord &rhs) const { return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs)); } }; struct StringBtreeDefaultGreater { using is_transparent = void; StringBtreeDefaultGreater() = default; StringBtreeDefaultGreater(std::greater) {} // NOLINT StringBtreeDefaultGreater(std::greater) {} // NOLINT // Allow converting to std::greater for use in key_comp()/value_comp(). explicit operator std::greater() const { return {}; } explicit operator std::greater() const { return {}; } explicit operator std::greater() const { return {}; } absl::weak_ordering operator()(absl::string_view lhs, absl::string_view rhs) const { return compare_internal::compare_result_as_ordering(rhs.compare(lhs)); } StringBtreeDefaultGreater(std::greater) {} // NOLINT absl::weak_ordering operator()(const absl::Cord &lhs, const absl::Cord &rhs) const { return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); } absl::weak_ordering operator()(const absl::Cord &lhs, absl::string_view rhs) const { return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs)); } absl::weak_ordering operator()(absl::string_view lhs, const absl::Cord &rhs) const { return compare_internal::compare_result_as_ordering(rhs.Compare(lhs)); } }; // See below comments for checked_compare. template ::value> struct checked_compare_base : Compare { using Compare::Compare; explicit checked_compare_base(Compare c) : Compare(std::move(c)) {} const Compare &comp() const { return *this; } }; template struct checked_compare_base { explicit checked_compare_base(Compare c) : compare(std::move(c)) {} const Compare &comp() const { return compare; } Compare compare; }; // A mechanism for opting out of checked_compare for use only in btree_test.cc. struct BtreeTestOnlyCheckedCompareOptOutBase {}; // A helper class to adapt the specified comparator for two use cases: // (1) When using common Abseil string types with common comparison functors, // convert a boolean comparison into a three-way comparison that returns an // `absl::weak_ordering`. This helper class is specialized for // less, greater, less, // greater, less, and greater. // (2) Adapt the comparator to diagnose cases of non-strict-weak-ordering (see // https://en.cppreference.com/w/cpp/named_req/Compare) in debug mode. Whenever // a comparison is made, we will make assertions to verify that the comparator // is valid. template struct key_compare_adapter { // Inherit from checked_compare_base to support function pointers and also // keep empty-base-optimization (EBO) support for classes. // Note: we can't use CompressedTuple here because that would interfere // with the EBO for `btree::rightmost_`. `btree::rightmost_` is itself a // CompressedTuple and nested `CompressedTuple`s don't support EBO. // TODO(b/214288561): use CompressedTuple instead once it supports EBO for // nested `CompressedTuple`s. struct checked_compare : checked_compare_base { private: using Base = typename checked_compare::checked_compare_base; using Base::comp; // If possible, returns whether `t` is equivalent to itself. We can only do // this for `Key`s because we can't be sure that it's safe to call // `comp()(k, k)` otherwise. Even if SFINAE allows it, there could be a // compilation failure inside the implementation of the comparison operator. bool is_self_equivalent(const Key &k) const { // Note: this works for both boolean and three-way comparators. return comp()(k, k) == 0; } // If we can't compare `t` with itself, returns true unconditionally. template bool is_self_equivalent(const T &) const { return true; } public: using Base::Base; checked_compare(Compare comp) : Base(std::move(comp)) {} // NOLINT // Allow converting to Compare for use in key_comp()/value_comp(). explicit operator Compare() const { return comp(); } template >::value, int> = 0> bool operator()(const T &lhs, const U &rhs) const { // NOTE: if any of these assertions fail, then the comparator does not // establish a strict-weak-ordering (see // https://en.cppreference.com/w/cpp/named_req/Compare). assert(is_self_equivalent(lhs)); assert(is_self_equivalent(rhs)); const bool lhs_comp_rhs = comp()(lhs, rhs); assert(!lhs_comp_rhs || !comp()(rhs, lhs)); return lhs_comp_rhs; } template < typename T, typename U, absl::enable_if_t, absl::weak_ordering>::value, int> = 0> absl::weak_ordering operator()(const T &lhs, const U &rhs) const { // NOTE: if any of these assertions fail, then the comparator does not // establish a strict-weak-ordering (see // https://en.cppreference.com/w/cpp/named_req/Compare). assert(is_self_equivalent(lhs)); assert(is_self_equivalent(rhs)); const absl::weak_ordering lhs_comp_rhs = comp()(lhs, rhs); #ifndef NDEBUG const absl::weak_ordering rhs_comp_lhs = comp()(rhs, lhs); if (lhs_comp_rhs > 0) { assert(rhs_comp_lhs < 0 && "lhs_comp_rhs > 0 -> rhs_comp_lhs < 0"); } else if (lhs_comp_rhs == 0) { assert(rhs_comp_lhs == 0 && "lhs_comp_rhs == 0 -> rhs_comp_lhs == 0"); } else { assert(rhs_comp_lhs > 0 && "lhs_comp_rhs < 0 -> rhs_comp_lhs > 0"); } #endif return lhs_comp_rhs; } }; using type = absl::conditional_t< std::is_base_of::value, Compare, checked_compare>; }; template <> struct key_compare_adapter, std::string> { using type = StringBtreeDefaultLess; }; template <> struct key_compare_adapter, std::string> { using type = StringBtreeDefaultGreater; }; template <> struct key_compare_adapter, absl::string_view> { using type = StringBtreeDefaultLess; }; template <> struct key_compare_adapter, absl::string_view> { using type = StringBtreeDefaultGreater; }; template <> struct key_compare_adapter, absl::Cord> { using type = StringBtreeDefaultLess; }; template <> struct key_compare_adapter, absl::Cord> { using type = StringBtreeDefaultGreater; }; // Detects an 'absl_btree_prefer_linear_node_search' member. This is // a protocol used as an opt-in or opt-out of linear search. // // For example, this would be useful for key types that wrap an integer // and define their own cheap operator<(). For example: // // class K { // public: // using absl_btree_prefer_linear_node_search = std::true_type; // ... // private: // friend bool operator<(K a, K b) { return a.k_ < b.k_; } // int k_; // }; // // btree_map m; // Uses linear search // // If T has the preference tag, then it has a preference. // Btree will use the tag's truth value. template struct has_linear_node_search_preference : std::false_type {}; template struct prefers_linear_node_search : std::false_type {}; template struct has_linear_node_search_preference< T, absl::void_t> : std::true_type {}; template struct prefers_linear_node_search< T, absl::void_t> : T::absl_btree_prefer_linear_node_search {}; template constexpr bool compare_has_valid_result_type() { using compare_result_type = compare_result_t; return std::is_same::value || std::is_convertible::value; } template class map_value_compare { template friend class btree; // Note: this `protected` is part of the API of std::map::value_compare. See // https://en.cppreference.com/w/cpp/container/map/value_compare. protected: explicit map_value_compare(original_key_compare c) : comp(std::move(c)) {} original_key_compare comp; // NOLINT public: auto operator()(const value_type &lhs, const value_type &rhs) const -> decltype(comp(lhs.first, rhs.first)) { return comp(lhs.first, rhs.first); } }; template struct common_params { using original_key_compare = Compare; // If Compare is a common comparator for a string-like type, then we adapt it // to use heterogeneous lookup and to be a key-compare-to comparator. // We also adapt the comparator to diagnose invalid comparators in debug mode. // We disable this when `Compare` is invalid in a way that will cause // adaptation to fail (having invalid return type) so that we can give a // better compilation failure in static_assert_validation. If we don't do // this, then there will be cascading compilation failures that are confusing // for users. using key_compare = absl::conditional_t(), Compare, typename key_compare_adapter::type>; static constexpr bool kIsKeyCompareStringAdapted = std::is_same::value || std::is_same::value; static constexpr bool kIsKeyCompareTransparent = IsTransparent::value || kIsKeyCompareStringAdapted; static constexpr bool kEnableGenerations = #ifdef ABSL_BTREE_ENABLE_GENERATIONS true; #else false; #endif // A type which indicates if we have a key-compare-to functor or a plain old // key-compare functor. using is_key_compare_to = btree_is_key_compare_to; using allocator_type = Alloc; using key_type = Key; using size_type = size_t; using difference_type = ptrdiff_t; using slot_policy = SlotPolicy; using slot_type = typename slot_policy::slot_type; using value_type = typename slot_policy::value_type; using init_type = typename slot_policy::mutable_value_type; using pointer = value_type *; using const_pointer = const value_type *; using reference = value_type &; using const_reference = const value_type &; using value_compare = absl::conditional_t, original_key_compare>; using is_map_container = std::integral_constant; // For the given lookup key type, returns whether we can have multiple // equivalent keys in the btree. If this is a multi-container, then we can. // Otherwise, we can have multiple equivalent keys only if all of the // following conditions are met: // - The comparator is transparent. // - The lookup key type is not the same as key_type. // - The comparator is not a StringBtreeDefault{Less,Greater} comparator // that we know has the same equivalence classes for all lookup types. template constexpr static bool can_have_multiple_equivalent_keys() { return IsMulti || (IsTransparent::value && !std::is_same::value && !kIsKeyCompareStringAdapted); } enum { kTargetNodeSize = TargetNodeSize, // Upper bound for the available space for slots. This is largest for leaf // nodes, which have overhead of at least a pointer + 4 bytes (for storing // 3 field_types and an enum). kNodeSlotSpace = TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4), }; // This is an integral type large enough to hold as many slots as will fit a // node of TargetNodeSize bytes. using node_count_type = absl::conditional_t<(kNodeSlotSpace / sizeof(slot_type) > (std::numeric_limits::max)()), uint16_t, uint8_t>; // NOLINT // The following methods are necessary for passing this struct as PolicyTraits // for node_handle and/or are used within btree. static value_type &element(slot_type *slot) { return slot_policy::element(slot); } static const value_type &element(const slot_type *slot) { return slot_policy::element(slot); } template static void construct(Alloc *alloc, slot_type *slot, Args &&... args) { slot_policy::construct(alloc, slot, std::forward(args)...); } static void construct(Alloc *alloc, slot_type *slot, slot_type *other) { slot_policy::construct(alloc, slot, other); } static void destroy(Alloc *alloc, slot_type *slot) { slot_policy::destroy(alloc, slot); } static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) { slot_policy::transfer(alloc, new_slot, old_slot); } }; // An adapter class that converts a lower-bound compare into an upper-bound // compare. Note: there is no need to make a version of this adapter specialized // for key-compare-to functors because the upper-bound (the first value greater // than the input) is never an exact match. template struct upper_bound_adapter { explicit upper_bound_adapter(const Compare &c) : comp(c) {} template bool operator()(const K1 &a, const K2 &b) const { // Returns true when a is not greater than b. return !compare_internal::compare_result_as_less_than(comp(b, a)); } private: Compare comp; }; enum class MatchKind : uint8_t { kEq, kNe }; template struct SearchResult { V value; MatchKind match; static constexpr bool HasMatch() { return true; } bool IsEq() const { return match == MatchKind::kEq; } }; // When we don't use CompareTo, `match` is not present. // This ensures that callers can't use it accidentally when it provides no // useful information. template struct SearchResult { SearchResult() {} explicit SearchResult(V v) : value(v) {} SearchResult(V v, MatchKind /*match*/) : value(v) {} V value; static constexpr bool HasMatch() { return false; } static constexpr bool IsEq() { return false; } }; // A node in the btree holding. The same node type is used for both internal // and leaf nodes in the btree, though the nodes are allocated in such a way // that the children array is only valid in internal nodes. template class btree_node { using is_key_compare_to = typename Params::is_key_compare_to; using field_type = typename Params::node_count_type; using allocator_type = typename Params::allocator_type; using slot_type = typename Params::slot_type; using original_key_compare = typename Params::original_key_compare; public: using params_type = Params; using key_type = typename Params::key_type; using value_type = typename Params::value_type; using pointer = typename Params::pointer; using const_pointer = typename Params::const_pointer; using reference = typename Params::reference; using const_reference = typename Params::const_reference; using key_compare = typename Params::key_compare; using size_type = typename Params::size_type; using difference_type = typename Params::difference_type; // Btree decides whether to use linear node search as follows: // - If the comparator expresses a preference, use that. // - If the key expresses a preference, use that. // - If the key is arithmetic and the comparator is std::less or // std::greater, choose linear. // - Otherwise, choose binary. // TODO(ezb): Might make sense to add condition(s) based on node-size. using use_linear_search = std::integral_constant< bool, has_linear_node_search_preference::value ? prefers_linear_node_search::value : has_linear_node_search_preference::value ? prefers_linear_node_search::value : std::is_arithmetic::value && (std::is_same, original_key_compare>::value || std::is_same, original_key_compare>::value)>; // This class is organized by absl::container_internal::Layout as if it had // the following structure: // // A pointer to the node's parent. // btree_node *parent; // // // When ABSL_BTREE_ENABLE_GENERATIONS is defined, we also have a // // generation integer in order to check that when iterators are // // used, they haven't been invalidated already. Only the generation on // // the root is used, but we have one on each node because whether a node // // is root or not can change. // uint32_t generation; // // // The position of the node in the node's parent. // field_type position; // // The index of the first populated value in `values`. // // TODO(ezb): right now, `start` is always 0. Update insertion/merge // // logic to allow for floating storage within nodes. // field_type start; // // The index after the last populated value in `values`. Currently, this // // is the same as the count of values. // field_type finish; // // The maximum number of values the node can hold. This is an integer in // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal // // nodes (even though there are still kNodeSlots values in the node). // // TODO(ezb): make max_count use only 4 bits and record log2(capacity) // // to free extra bits for is_root, etc. // field_type max_count; // // // The array of values. The capacity is `max_count` for leaf nodes and // // kNodeSlots for internal nodes. Only the values in // // [start, finish) have been initialized and are valid. // slot_type values[max_count]; // // // The array of child pointers. The keys in children[i] are all less // // than key(i). The keys in children[i + 1] are all greater than key(i). // // There are 0 children for leaf nodes and kNodeSlots + 1 children for // // internal nodes. // btree_node *children[kNodeSlots + 1]; // // This class is only constructed by EmptyNodeType. Normally, pointers to the // layout above are allocated, cast to btree_node*, and de-allocated within // the btree implementation. ~btree_node() = default; btree_node(btree_node const &) = delete; btree_node &operator=(btree_node const &) = delete; // Public for EmptyNodeType. constexpr static size_type Alignment() { static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(), "Alignment of all nodes must be equal."); return InternalLayout().Alignment(); } protected: btree_node() = default; private: using layout_type = absl::container_internal::Layout; constexpr static size_type SizeWithNSlots(size_type n) { return layout_type( /*parent*/ 1, /*generation*/ params_type::kEnableGenerations ? 1 : 0, /*position, start, finish, max_count*/ 4, /*slots*/ n, /*children*/ 0) .AllocSize(); } // A lower bound for the overhead of fields other than slots in a leaf node. constexpr static size_type MinimumOverhead() { return SizeWithNSlots(1) - sizeof(slot_type); } // Compute how many values we can fit onto a leaf node taking into account // padding. constexpr static size_type NodeTargetSlots(const size_type begin, const size_type end) { return begin == end ? begin : SizeWithNSlots((begin + end) / 2 + 1) > params_type::kTargetNodeSize ? NodeTargetSlots(begin, (begin + end) / 2) : NodeTargetSlots((begin + end) / 2 + 1, end); } enum { kTargetNodeSize = params_type::kTargetNodeSize, kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize), // We need a minimum of 3 slots per internal node in order to perform // splitting (1 value for the two nodes involved in the split and 1 value // propagated to the parent as the delimiter for the split). For performance // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy // of 1/3 (for a node, not a b-tree). kMinNodeSlots = 4, kNodeSlots = kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots, // The node is internal (i.e. is not a leaf node) if and only if `max_count` // has this value. kInternalNodeMaxCount = 0, }; // Leaves can have less than kNodeSlots values. constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) { return layout_type( /*parent*/ 1, /*generation*/ params_type::kEnableGenerations ? 1 : 0, /*position, start, finish, max_count*/ 4, /*slots*/ slot_count, /*children*/ 0); } constexpr static layout_type InternalLayout() { return layout_type( /*parent*/ 1, /*generation*/ params_type::kEnableGenerations ? 1 : 0, /*position, start, finish, max_count*/ 4, /*slots*/ kNodeSlots, /*children*/ kNodeSlots + 1); } constexpr static size_type LeafSize(const int slot_count = kNodeSlots) { return LeafLayout(slot_count).AllocSize(); } constexpr static size_type InternalSize() { return InternalLayout().AllocSize(); } // N is the index of the type in the Layout definition. // ElementType is the Nth type in the Layout definition. template inline typename layout_type::template ElementType *GetField() { // We assert that we don't read from values that aren't there. assert(N < 4 || is_internal()); return InternalLayout().template Pointer(reinterpret_cast(this)); } template inline const typename layout_type::template ElementType *GetField() const { assert(N < 4 || is_internal()); return InternalLayout().template Pointer( reinterpret_cast(this)); } void set_parent(btree_node *p) { *GetField<0>() = p; } field_type &mutable_finish() { return GetField<2>()[2]; } slot_type *slot(int i) { return &GetField<3>()[i]; } slot_type *start_slot() { return slot(start()); } slot_type *finish_slot() { return slot(finish()); } const slot_type *slot(int i) const { return &GetField<3>()[i]; } void set_position(field_type v) { GetField<2>()[0] = v; } void set_start(field_type v) { GetField<2>()[1] = v; } void set_finish(field_type v) { GetField<2>()[2] = v; } // This method is only called by the node init methods. void set_max_count(field_type v) { GetField<2>()[3] = v; } public: // Whether this is a leaf node or not. This value doesn't change after the // node is created. bool is_leaf() const { return GetField<2>()[3] != kInternalNodeMaxCount; } // Whether this is an internal node or not. This value doesn't change after // the node is created. bool is_internal() const { return !is_leaf(); } // Getter for the position of this node in its parent. field_type position() const { return GetField<2>()[0]; } // Getter for the offset of the first value in the `values` array. field_type start() const { // TODO(ezb): when floating storage is implemented, return GetField<2>()[1]; assert(GetField<2>()[1] == 0); return 0; } // Getter for the offset after the last value in the `values` array. field_type finish() const { return GetField<2>()[2]; } // Getters for the number of values stored in this node. field_type count() const { assert(finish() >= start()); return finish() - start(); } field_type max_count() const { // Internal nodes have max_count==kInternalNodeMaxCount. // Leaf nodes have max_count in [1, kNodeSlots]. const field_type max_count = GetField<2>()[3]; return max_count == field_type{kInternalNodeMaxCount} ? field_type{kNodeSlots} : max_count; } // Getter for the parent of this node. btree_node *parent() const { return *GetField<0>(); } // Getter for whether the node is the root of the tree. The parent of the // root of the tree is the leftmost node in the tree which is guaranteed to // be a leaf. bool is_root() const { return parent()->is_leaf(); } void make_root() { assert(parent()->is_root()); set_generation(parent()->generation()); set_parent(parent()->parent()); } // Gets the root node's generation integer, which is the one used by the tree. uint32_t *get_root_generation() const { assert(params_type::kEnableGenerations); const btree_node *curr = this; for (; !curr->is_root(); curr = curr->parent()) continue; return const_cast(&curr->GetField<1>()[0]); } // Returns the generation for iterator validation. uint32_t generation() const { return params_type::kEnableGenerations ? *get_root_generation() : 0; } // Updates generation. Should only be called on a root node or during node // initialization. void set_generation(uint32_t generation) { if (params_type::kEnableGenerations) GetField<1>()[0] = generation; } // Updates the generation. We do this whenever the node is mutated. void next_generation() { if (params_type::kEnableGenerations) ++*get_root_generation(); } // Getters for the key/value at position i in the node. const key_type &key(int i) const { return params_type::key(slot(i)); } reference value(int i) { return params_type::element(slot(i)); } const_reference value(int i) const { return params_type::element(slot(i)); } // Getters/setter for the child at position i in the node. btree_node *child(int i) const { return GetField<4>()[i]; } btree_node *start_child() const { return child(start()); } btree_node *&mutable_child(int i) { return GetField<4>()[i]; } void clear_child(int i) { absl::container_internal::SanitizerPoisonObject(&mutable_child(i)); } void set_child(int i, btree_node *c) { absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i)); mutable_child(i) = c; c->set_position(i); } void init_child(int i, btree_node *c) { set_child(i, c); c->set_parent(this); } // Returns the position of the first value whose key is not less than k. template SearchResult lower_bound( const K &k, const key_compare &comp) const { return use_linear_search::value ? linear_search(k, comp) : binary_search(k, comp); } // Returns the position of the first value whose key is greater than k. template int upper_bound(const K &k, const key_compare &comp) const { auto upper_compare = upper_bound_adapter(comp); return use_linear_search::value ? linear_search(k, upper_compare).value : binary_search(k, upper_compare).value; } template SearchResult::value> linear_search(const K &k, const Compare &comp) const { return linear_search_impl(k, start(), finish(), comp, btree_is_key_compare_to()); } template SearchResult::value> binary_search(const K &k, const Compare &comp) const { return binary_search_impl(k, start(), finish(), comp, btree_is_key_compare_to()); } // Returns the position of the first value whose key is not less than k using // linear search performed using plain compare. template SearchResult linear_search_impl( const K &k, int s, const int e, const Compare &comp, std::false_type /* IsCompareTo */) const { while (s < e) { if (!comp(key(s), k)) { break; } ++s; } return SearchResult{s}; } // Returns the position of the first value whose key is not less than k using // linear search performed using compare-to. template SearchResult linear_search_impl( const K &k, int s, const int e, const Compare &comp, std::true_type /* IsCompareTo */) const { while (s < e) { const absl::weak_ordering c = comp(key(s), k); if (c == 0) { return {s, MatchKind::kEq}; } else if (c > 0) { break; } ++s; } return {s, MatchKind::kNe}; } // Returns the position of the first value whose key is not less than k using // binary search performed using plain compare. template SearchResult binary_search_impl( const K &k, int s, int e, const Compare &comp, std::false_type /* IsCompareTo */) const { while (s != e) { const int mid = (s + e) >> 1; if (comp(key(mid), k)) { s = mid + 1; } else { e = mid; } } return SearchResult{s}; } // Returns the position of the first value whose key is not less than k using // binary search performed using compare-to. template SearchResult binary_search_impl( const K &k, int s, int e, const CompareTo &comp, std::true_type /* IsCompareTo */) const { if (params_type::template can_have_multiple_equivalent_keys()) { MatchKind exact_match = MatchKind::kNe; while (s != e) { const int mid = (s + e) >> 1; const absl::weak_ordering c = comp(key(mid), k); if (c < 0) { s = mid + 1; } else { e = mid; if (c == 0) { // Need to return the first value whose key is not less than k, // which requires continuing the binary search if there could be // multiple equivalent keys. exact_match = MatchKind::kEq; } } } return {s, exact_match}; } else { // Can't have multiple equivalent keys. while (s != e) { const int mid = (s + e) >> 1; const absl::weak_ordering c = comp(key(mid), k); if (c < 0) { s = mid + 1; } else if (c > 0) { e = mid; } else { return {mid, MatchKind::kEq}; } } return {s, MatchKind::kNe}; } } // Emplaces a value at position i, shifting all existing values and // children at positions >= i to the right by 1. template void emplace_value(size_type i, allocator_type *alloc, Args &&... args); // Removes the values at positions [i, i + to_erase), shifting all existing // values and children after that range to the left by to_erase. Clears all // children between [i, i + to_erase). void remove_values(field_type i, field_type to_erase, allocator_type *alloc); // Rebalances a node with its right sibling. void rebalance_right_to_left(int to_move, btree_node *right, allocator_type *alloc); void rebalance_left_to_right(int to_move, btree_node *right, allocator_type *alloc); // Splits a node, moving a portion of the node's values to its right sibling. void split(int insert_position, btree_node *dest, allocator_type *alloc); // Merges a node with its right sibling, moving all of the values and the // delimiting key in the parent node onto itself, and deleting the src node. void merge(btree_node *src, allocator_type *alloc); // Node allocation/deletion routines. void init_leaf(int max_count, btree_node *parent) { set_generation(0); set_parent(parent); set_position(0); set_start(0); set_finish(0); set_max_count(max_count); absl::container_internal::SanitizerPoisonMemoryRegion( start_slot(), max_count * sizeof(slot_type)); } void init_internal(btree_node *parent) { init_leaf(kNodeSlots, parent); // Set `max_count` to a sentinel value to indicate that this node is // internal. set_max_count(kInternalNodeMaxCount); absl::container_internal::SanitizerPoisonMemoryRegion( &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *)); } static void deallocate(const size_type size, btree_node *node, allocator_type *alloc) { absl::container_internal::Deallocate(alloc, node, size); } // Deletes a node and all of its children. static void clear_and_delete(btree_node *node, allocator_type *alloc); private: template void value_init(const field_type i, allocator_type *alloc, Args &&... args) { next_generation(); absl::container_internal::SanitizerUnpoisonObject(slot(i)); params_type::construct(alloc, slot(i), std::forward(args)...); } void value_destroy(const field_type i, allocator_type *alloc) { next_generation(); params_type::destroy(alloc, slot(i)); absl::container_internal::SanitizerPoisonObject(slot(i)); } void value_destroy_n(const field_type i, const field_type n, allocator_type *alloc) { next_generation(); for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) { params_type::destroy(alloc, s); absl::container_internal::SanitizerPoisonObject(s); } } static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) { absl::container_internal::SanitizerUnpoisonObject(dest); params_type::transfer(alloc, dest, src); absl::container_internal::SanitizerPoisonObject(src); } // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`. void transfer(const size_type dest_i, const size_type src_i, btree_node *src_node, allocator_type *alloc) { next_generation(); transfer(slot(dest_i), src_node->slot(src_i), alloc); } // Transfers `n` values starting at value `src_i` in `src_node` into the // values starting at value `dest_i` in `this`. void transfer_n(const size_type n, const size_type dest_i, const size_type src_i, btree_node *src_node, allocator_type *alloc) { next_generation(); for (slot_type *src = src_node->slot(src_i), *end = src + n, *dest = slot(dest_i); src != end; ++src, ++dest) { transfer(dest, src, alloc); } } // Same as above, except that we start at the end and work our way to the // beginning. void transfer_n_backward(const size_type n, const size_type dest_i, const size_type src_i, btree_node *src_node, allocator_type *alloc) { next_generation(); for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n, *dest = slot(dest_i + n - 1); src != end; --src, --dest) { transfer(dest, src, alloc); } } template friend class btree; template friend class btree_iterator; friend class BtreeNodePeer; friend struct btree_access; }; template class btree_iterator { using key_type = typename Node::key_type; using size_type = typename Node::size_type; using params_type = typename Node::params_type; using is_map_container = typename params_type::is_map_container; using node_type = Node; using normal_node = typename std::remove_const::type; using const_node = const Node; using normal_pointer = typename params_type::pointer; using normal_reference = typename params_type::reference; using const_pointer = typename params_type::const_pointer; using const_reference = typename params_type::const_reference; using slot_type = typename params_type::slot_type; using iterator = btree_iterator; using const_iterator = btree_iterator; public: // These aliases are public for std::iterator_traits. using difference_type = typename Node::difference_type; using value_type = typename params_type::value_type; using pointer = Pointer; using reference = Reference; using iterator_category = std::bidirectional_iterator_tag; btree_iterator() : btree_iterator(nullptr, -1) {} explicit btree_iterator(Node *n) : btree_iterator(n, n->start()) {} btree_iterator(Node *n, int p) : node_(n), position_(p) { #ifdef ABSL_BTREE_ENABLE_GENERATIONS // Use `~uint32_t{}` as a sentinel value for iterator generations so it // doesn't match the initial value for the actual generation. generation_ = n != nullptr ? n->generation() : ~uint32_t{}; #endif } // NOTE: this SFINAE allows for implicit conversions from iterator to // const_iterator, but it specifically avoids hiding the copy constructor so // that the trivial one will be used when possible. template , iterator>::value && std::is_same::value, int> = 0> btree_iterator(const btree_iterator other) // NOLINT : node_(other.node_), position_(other.position_) { #ifdef ABSL_BTREE_ENABLE_GENERATIONS generation_ = other.generation_; #endif } bool operator==(const iterator &other) const { return node_ == other.node_ && position_ == other.position_; } bool operator==(const const_iterator &other) const { return node_ == other.node_ && position_ == other.position_; } bool operator!=(const iterator &other) const { return node_ != other.node_ || position_ != other.position_; } bool operator!=(const const_iterator &other) const { return node_ != other.node_ || position_ != other.position_; } // Accessors for the key/value the iterator is pointing at. reference operator*() const { ABSL_HARDENING_ASSERT(node_ != nullptr); ABSL_HARDENING_ASSERT(node_->start() <= position_); ABSL_HARDENING_ASSERT(node_->finish() > position_); assert_valid_generation(); return node_->value(position_); } pointer operator->() const { return &operator*(); } btree_iterator &operator++() { increment(); return *this; } btree_iterator &operator--() { decrement(); return *this; } btree_iterator operator++(int) { btree_iterator tmp = *this; ++*this; return tmp; } btree_iterator operator--(int) { btree_iterator tmp = *this; --*this; return tmp; } private: friend iterator; friend const_iterator; template friend class btree; template friend class btree_container; template friend class btree_set_container; template friend class btree_map_container; template friend class btree_multiset_container; template friend class base_checker; friend struct btree_access; // This SFINAE allows explicit conversions from const_iterator to // iterator, but also avoids hiding the copy constructor. // NOTE: the const_cast is safe because this constructor is only called by // non-const methods and the container owns the nodes. template , const_iterator>::value && std::is_same::value, int> = 0> explicit btree_iterator(const btree_iterator other) : node_(const_cast(other.node_)), position_(other.position_) { #ifdef ABSL_BTREE_ENABLE_GENERATIONS generation_ = other.generation_; #endif } // Increment/decrement the iterator. void increment() { assert_valid_generation(); if (node_->is_leaf() && ++position_ < node_->finish()) { return; } increment_slow(); } void increment_slow(); void decrement() { assert_valid_generation(); if (node_->is_leaf() && --position_ >= node_->start()) { return; } decrement_slow(); } void decrement_slow(); // Updates the generation. For use internally right before we return an // iterator to the user. void update_generation() { #ifdef ABSL_BTREE_ENABLE_GENERATIONS if (node_ != nullptr) generation_ = node_->generation(); #endif } const key_type &key() const { return node_->key(position_); } decltype(std::declval()->slot(0)) slot() { return node_->slot(position_); } void assert_valid_generation() const { #ifdef ABSL_BTREE_ENABLE_GENERATIONS if (node_ != nullptr && node_->generation() != generation_) { ABSL_INTERNAL_LOG( FATAL, "Attempting to use an invalidated iterator. The corresponding b-tree " "container has been mutated since this iterator was constructed."); } #endif } // The node in the tree the iterator is pointing at. Node *node_; // The position within the node of the tree the iterator is pointing at. // NOTE: this is an int rather than a field_type because iterators can point // to invalid positions (such as -1) in certain circumstances. int position_; #ifdef ABSL_BTREE_ENABLE_GENERATIONS // Used to check that the iterator hasn't been invalidated. uint32_t generation_; #endif }; template class btree { using node_type = btree_node; using is_key_compare_to = typename Params::is_key_compare_to; using field_type = typename node_type::field_type; // We use a static empty node for the root/leftmost/rightmost of empty btrees // in order to avoid branching in begin()/end(). struct alignas(node_type::Alignment()) EmptyNodeType : node_type { using field_type = typename node_type::field_type; node_type *parent; #ifdef ABSL_BTREE_ENABLE_GENERATIONS uint32_t generation = 0; #endif field_type position = 0; field_type start = 0; field_type finish = 0; // max_count must be != kInternalNodeMaxCount (so that this node is regarded // as a leaf node). max_count() is never called when the tree is empty. field_type max_count = node_type::kInternalNodeMaxCount + 1; #ifdef _MSC_VER // MSVC has constexpr code generations bugs here. EmptyNodeType() : parent(this) {} #else constexpr EmptyNodeType(node_type *p) : parent(p) {} #endif }; static node_type *EmptyNode() { #ifdef _MSC_VER static EmptyNodeType *empty_node = new EmptyNodeType; // This assert fails on some other construction methods. assert(empty_node->parent == empty_node); return empty_node; #else static constexpr EmptyNodeType empty_node( const_cast(&empty_node)); return const_cast(&empty_node); #endif } enum : uint32_t { kNodeSlots = node_type::kNodeSlots, kMinNodeValues = kNodeSlots / 2, }; struct node_stats { using size_type = typename Params::size_type; node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {} node_stats &operator+=(const node_stats &other) { leaf_nodes += other.leaf_nodes; internal_nodes += other.internal_nodes; return *this; } size_type leaf_nodes; size_type internal_nodes; }; public: using key_type = typename Params::key_type; using value_type = typename Params::value_type; using size_type = typename Params::size_type; using difference_type = typename Params::difference_type; using key_compare = typename Params::key_compare; using original_key_compare = typename Params::original_key_compare; using value_compare = typename Params::value_compare; using allocator_type = typename Params::allocator_type; using reference = typename Params::reference; using const_reference = typename Params::const_reference; using pointer = typename Params::pointer; using const_pointer = typename Params::const_pointer; using iterator = typename btree_iterator::iterator; using const_iterator = typename iterator::const_iterator; using reverse_iterator = std::reverse_iterator; using const_reverse_iterator = std::reverse_iterator; using node_handle_type = node_handle; // Internal types made public for use by btree_container types. using params_type = Params; using slot_type = typename Params::slot_type; private: // Copies or moves (depending on the template parameter) the values in // other into this btree in their order in other. This btree must be empty // before this method is called. This method is used in copy construction, // copy assignment, and move assignment. template void copy_or_move_values_in_order(Btree &other); // Validates that various assumptions/requirements are true at compile time. constexpr static bool static_assert_validation(); public: btree(const key_compare &comp, const allocator_type &alloc) : root_(EmptyNode()), rightmost_(comp, alloc, EmptyNode()), size_(0) {} btree(const btree &other) : btree(other, other.allocator()) {} btree(const btree &other, const allocator_type &alloc) : btree(other.key_comp(), alloc) { copy_or_move_values_in_order(other); } btree(btree &&other) noexcept : root_(absl::exchange(other.root_, EmptyNode())), rightmost_(std::move(other.rightmost_)), size_(absl::exchange(other.size_, 0)) { other.mutable_rightmost() = EmptyNode(); } btree(btree &&other, const allocator_type &alloc) : btree(other.key_comp(), alloc) { if (alloc == other.allocator()) { swap(other); } else { // Move values from `other` one at a time when allocators are different. copy_or_move_values_in_order(other); } } ~btree() { // Put static_asserts in destructor to avoid triggering them before the type // is complete. static_assert(static_assert_validation(), "This call must be elided."); clear(); } // Assign the contents of other to *this. btree &operator=(const btree &other); btree &operator=(btree &&other) noexcept; iterator begin() { return iterator(leftmost()); } const_iterator begin() const { return const_iterator(leftmost()); } iterator end() { return iterator(rightmost(), rightmost()->finish()); } const_iterator end() const { return const_iterator(rightmost(), rightmost()->finish()); } reverse_iterator rbegin() { return reverse_iterator(end()); } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } reverse_iterator rend() { return reverse_iterator(begin()); } const_reverse_iterator rend() const { return const_reverse_iterator(begin()); } // Finds the first element whose key is not less than `key`. template iterator lower_bound(const K &key) { return internal_end(internal_lower_bound(key).value); } template const_iterator lower_bound(const K &key) const { return internal_end(internal_lower_bound(key).value); } // Finds the first element whose key is not less than `key` and also returns // whether that element is equal to `key`. template std::pair lower_bound_equal(const K &key) const; // Finds the first element whose key is greater than `key`. template iterator upper_bound(const K &key) { return internal_end(internal_upper_bound(key)); } template const_iterator upper_bound(const K &key) const { return internal_end(internal_upper_bound(key)); } // Finds the range of values which compare equal to key. The first member of // the returned pair is equal to lower_bound(key). The second member of the // pair is equal to upper_bound(key). template std::pair equal_range(const K &key); template std::pair equal_range(const K &key) const { return const_cast(this)->equal_range(key); } // Inserts a value into the btree only if it does not already exist. The // boolean return value indicates whether insertion succeeded or failed. // Requirement: if `key` already exists in the btree, does not consume `args`. // Requirement: `key` is never referenced after consuming `args`. template std::pair insert_unique(const K &key, Args &&... args); // Inserts with hint. Checks to see if the value should be placed immediately // before `position` in the tree. If so, then the insertion will take // amortized constant time. If not, the insertion will take amortized // logarithmic time as if a call to insert_unique() were made. // Requirement: if `key` already exists in the btree, does not consume `args`. // Requirement: `key` is never referenced after consuming `args`. template std::pair insert_hint_unique(iterator position, const K &key, Args &&... args); // Insert a range of values into the btree. // Note: the first overload avoids constructing a value_type if the key // already exists in the btree. template ()( params_type::key(*std::declval()), std::declval()))> void insert_iterator_unique(InputIterator b, InputIterator e, int); // We need the second overload for cases in which we need to construct a // value_type in order to compare it with the keys already in the btree. template void insert_iterator_unique(InputIterator b, InputIterator e, char); // Inserts a value into the btree. template iterator insert_multi(const key_type &key, ValueType &&v); // Inserts a value into the btree. template iterator insert_multi(ValueType &&v) { return insert_multi(params_type::key(v), std::forward(v)); } // Insert with hint. Check to see if the value should be placed immediately // before position in the tree. If it does, then the insertion will take // amortized constant time. If not, the insertion will take amortized // logarithmic time as if a call to insert_multi(v) were made. template iterator insert_hint_multi(iterator position, ValueType &&v); // Insert a range of values into the btree. template void insert_iterator_multi(InputIterator b, InputIterator e); // Erase the specified iterator from the btree. The iterator must be valid // (i.e. not equal to end()). Return an iterator pointing to the node after // the one that was erased (or end() if none exists). // Requirement: does not read the value at `*iter`. iterator erase(iterator iter); // Erases range. Returns the number of keys erased and an iterator pointing // to the element after the last erased element. std::pair erase_range(iterator begin, iterator end); // Finds an element with key equivalent to `key` or returns `end()` if `key` // is not present. template iterator find(const K &key) { return internal_end(internal_find(key)); } template const_iterator find(const K &key) const { return internal_end(internal_find(key)); } // Clear the btree, deleting all of the values it contains. void clear(); // Swaps the contents of `this` and `other`. void swap(btree &other); const key_compare &key_comp() const noexcept { return rightmost_.template get<0>(); } template bool compare_keys(const K1 &a, const K2 &b) const { return compare_internal::compare_result_as_less_than(key_comp()(a, b)); } value_compare value_comp() const { return value_compare(original_key_compare(key_comp())); } // Verifies the structure of the btree. void verify() const; // Size routines. size_type size() const { return size_; } size_type max_size() const { return (std::numeric_limits::max)(); } bool empty() const { return size_ == 0; } // The height of the btree. An empty tree will have height 0. size_type height() const { size_type h = 0; if (!empty()) { // Count the length of the chain from the leftmost node up to the // root. We actually count from the root back around to the level below // the root, but the calculation is the same because of the circularity // of that traversal. const node_type *n = root(); do { ++h; n = n->parent(); } while (n != root()); } return h; } // The number of internal, leaf and total nodes used by the btree. size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; } size_type internal_nodes() const { return internal_stats(root()).internal_nodes; } size_type nodes() const { node_stats stats = internal_stats(root()); return stats.leaf_nodes + stats.internal_nodes; } // The total number of bytes used by the btree. // TODO(b/169338300): update to support node_btree_*. size_type bytes_used() const { node_stats stats = internal_stats(root()); if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) { return sizeof(*this) + node_type::LeafSize(root()->max_count()); } else { return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() + stats.internal_nodes * node_type::InternalSize(); } } // The average number of bytes used per value stored in the btree assuming // random insertion order. static double average_bytes_per_value() { // The expected number of values per node with random insertion order is the // average of the maximum and minimum numbers of values per node. const double expected_values_per_node = (kNodeSlots + kMinNodeValues) / 2.0; return node_type::LeafSize() / expected_values_per_node; } // The fullness of the btree. Computed as the number of elements in the btree // divided by the maximum number of elements a tree with the current number // of nodes could hold. A value of 1 indicates perfect space // utilization. Smaller values indicate space wastage. // Returns 0 for empty trees. double fullness() const { if (empty()) return 0.0; return static_cast(size()) / (nodes() * kNodeSlots); } // The overhead of the btree structure in bytes per node. Computed as the // total number of bytes used by the btree minus the number of bytes used for // storing elements divided by the number of elements. // Returns 0 for empty trees. double overhead() const { if (empty()) return 0.0; return (bytes_used() - size() * sizeof(value_type)) / static_cast(size()); } // The allocator used by the btree. allocator_type get_allocator() const { return allocator(); } private: friend struct btree_access; // Internal accessor routines. node_type *root() { return root_; } const node_type *root() const { return root_; } node_type *&mutable_root() noexcept { return root_; } node_type *rightmost() { return rightmost_.template get<2>(); } const node_type *rightmost() const { return rightmost_.template get<2>(); } node_type *&mutable_rightmost() noexcept { return rightmost_.template get<2>(); } key_compare *mutable_key_comp() noexcept { return &rightmost_.template get<0>(); } // The leftmost node is stored as the parent of the root node. node_type *leftmost() { return root()->parent(); } const node_type *leftmost() const { return root()->parent(); } // Allocator routines. allocator_type *mutable_allocator() noexcept { return &rightmost_.template get<1>(); } const allocator_type &allocator() const noexcept { return rightmost_.template get<1>(); } // Allocates a correctly aligned node of at least size bytes using the // allocator. node_type *allocate(const size_type size) { return reinterpret_cast( absl::container_internal::Allocate( mutable_allocator(), size)); } // Node creation/deletion routines. node_type *new_internal_node(node_type *parent) { node_type *n = allocate(node_type::InternalSize()); n->init_internal(parent); return n; } node_type *new_leaf_node(node_type *parent) { node_type *n = allocate(node_type::LeafSize()); n->init_leaf(kNodeSlots, parent); return n; } node_type *new_leaf_root_node(const int max_count) { node_type *n = allocate(node_type::LeafSize(max_count)); n->init_leaf(max_count, /*parent=*/n); return n; } // Deletion helper routines. iterator rebalance_after_delete(iterator iter); // Rebalances or splits the node iter points to. void rebalance_or_split(iterator *iter); // Merges the values of left, right and the delimiting key on their parent // onto left, removing the delimiting key and deleting right. void merge_nodes(node_type *left, node_type *right); // Tries to merge node with its left or right sibling, and failing that, // rebalance with its left or right sibling. Returns true if a merge // occurred, at which point it is no longer valid to access node. Returns // false if no merging took place. bool try_merge_or_rebalance(iterator *iter); // Tries to shrink the height of the tree by 1. void try_shrink(); iterator internal_end(iterator iter) { return iter.node_ != nullptr ? iter : end(); } const_iterator internal_end(const_iterator iter) const { return iter.node_ != nullptr ? iter : end(); } // Emplaces a value into the btree immediately before iter. Requires that // key(v) <= iter.key() and (--iter).key() <= key(v). template iterator internal_emplace(iterator iter, Args &&... args); // Returns an iterator pointing to the first value >= the value "iter" is // pointing at. Note that "iter" might be pointing to an invalid location such // as iter.position_ == iter.node_->finish(). This routine simply moves iter // up in the tree to a valid location. Requires: iter.node_ is non-null. template static IterType internal_last(IterType iter); // Returns an iterator pointing to the leaf position at which key would // reside in the tree, unless there is an exact match - in which case, the // result may not be on a leaf. When there's a three-way comparator, we can // return whether there was an exact match. This allows the caller to avoid a // subsequent comparison to determine if an exact match was made, which is // important for keys with expensive comparison, such as strings. template SearchResult internal_locate( const K &key) const; // Internal routine which implements lower_bound(). template SearchResult internal_lower_bound( const K &key) const; // Internal routine which implements upper_bound(). template iterator internal_upper_bound(const K &key) const; // Internal routine which implements find(). template iterator internal_find(const K &key) const; // Verifies the tree structure of node. int internal_verify(const node_type *node, const key_type *lo, const key_type *hi) const; node_stats internal_stats(const node_type *node) const { // The root can be a static empty node. if (node == nullptr || (node == root() && empty())) { return node_stats(0, 0); } if (node->is_leaf()) { return node_stats(1, 0); } node_stats res(0, 1); for (int i = node->start(); i <= node->finish(); ++i) { res += internal_stats(node->child(i)); } return res; } node_type *root_; // A pointer to the rightmost node. Note that the leftmost node is stored as // the root's parent. We use compressed tuple in order to save space because // key_compare and allocator_type are usually empty. absl::container_internal::CompressedTuple rightmost_; // Number of values. size_type size_; }; //// // btree_node methods template template inline void btree_node

::emplace_value(const size_type i, allocator_type *alloc, Args &&... args) { assert(i >= start()); assert(i <= finish()); // Shift old values to create space for new value and then construct it in // place. if (i < finish()) { transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this, alloc); } value_init(i, alloc, std::forward(args)...); set_finish(finish() + 1); if (is_internal() && finish() > i + 1) { for (field_type j = finish(); j > i + 1; --j) { set_child(j, child(j - 1)); } clear_child(i + 1); } } template inline void btree_node

::remove_values(const field_type i, const field_type to_erase, allocator_type *alloc) { // Transfer values after the removed range into their new places. value_destroy_n(i, to_erase, alloc); const field_type orig_finish = finish(); const field_type src_i = i + to_erase; transfer_n(orig_finish - src_i, i, src_i, this, alloc); if (is_internal()) { // Delete all children between begin and end. for (int j = 0; j < to_erase; ++j) { clear_and_delete(child(i + j + 1), alloc); } // Rotate children after end into new positions. for (int j = i + to_erase + 1; j <= orig_finish; ++j) { set_child(j - to_erase, child(j)); clear_child(j); } } set_finish(orig_finish - to_erase); } template void btree_node

::rebalance_right_to_left(const int to_move, btree_node *right, allocator_type *alloc) { assert(parent() == right->parent()); assert(position() + 1 == right->position()); assert(right->count() >= count()); assert(to_move >= 1); assert(to_move <= right->count()); // 1) Move the delimiting value in the parent to the left node. transfer(finish(), position(), parent(), alloc); // 2) Move the (to_move - 1) values from the right node to the left node. transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc); // 3) Move the new delimiting value to the parent from the right node. parent()->transfer(position(), right->start() + to_move - 1, right, alloc); // 4) Shift the values in the right node to their correct positions. right->transfer_n(right->count() - to_move, right->start(), right->start() + to_move, right, alloc); if (is_internal()) { // Move the child pointers from the right to the left node. for (int i = 0; i < to_move; ++i) { init_child(finish() + i + 1, right->child(i)); } for (int i = right->start(); i <= right->finish() - to_move; ++i) { assert(i + to_move <= right->max_count()); right->init_child(i, right->child(i + to_move)); right->clear_child(i + to_move); } } // Fixup `finish` on the left and right nodes. set_finish(finish() + to_move); right->set_finish(right->finish() - to_move); } template void btree_node

::rebalance_left_to_right(const int to_move, btree_node *right, allocator_type *alloc) { assert(parent() == right->parent()); assert(position() + 1 == right->position()); assert(count() >= right->count()); assert(to_move >= 1); assert(to_move <= count()); // Values in the right node are shifted to the right to make room for the // new to_move values. Then, the delimiting value in the parent and the // other (to_move - 1) values in the left node are moved into the right node. // Lastly, a new delimiting value is moved from the left node into the // parent, and the remaining empty left node entries are destroyed. // 1) Shift existing values in the right node to their correct positions. right->transfer_n_backward(right->count(), right->start() + to_move, right->start(), right, alloc); // 2) Move the delimiting value in the parent to the right node. right->transfer(right->start() + to_move - 1, position(), parent(), alloc); // 3) Move the (to_move - 1) values from the left node to the right node. right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this, alloc); // 4) Move the new delimiting value to the parent from the left node. parent()->transfer(position(), finish() - to_move, this, alloc); if (is_internal()) { // Move the child pointers from the left to the right node. for (int i = right->finish(); i >= right->start(); --i) { right->init_child(i + to_move, right->child(i)); right->clear_child(i); } for (int i = 1; i <= to_move; ++i) { right->init_child(i - 1, child(finish() - to_move + i)); clear_child(finish() - to_move + i); } } // Fixup the counts on the left and right nodes. set_finish(finish() - to_move); right->set_finish(right->finish() + to_move); } template void btree_node

::split(const int insert_position, btree_node *dest, allocator_type *alloc) { assert(dest->count() == 0); assert(max_count() == kNodeSlots); // We bias the split based on the position being inserted. If we're // inserting at the beginning of the left node then bias the split to put // more values on the right node. If we're inserting at the end of the // right node then bias the split to put more values on the left node. if (insert_position == start()) { dest->set_finish(dest->start() + finish() - 1); } else if (insert_position == kNodeSlots) { dest->set_finish(dest->start()); } else { dest->set_finish(dest->start() + count() / 2); } set_finish(finish() - dest->count()); assert(count() >= 1); // Move values from the left sibling to the right sibling. dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc); // The split key is the largest value in the left sibling. --mutable_finish(); parent()->emplace_value(position(), alloc, finish_slot()); value_destroy(finish(), alloc); parent()->init_child(position() + 1, dest); if (is_internal()) { for (int i = dest->start(), j = finish() + 1; i <= dest->finish(); ++i, ++j) { assert(child(j) != nullptr); dest->init_child(i, child(j)); clear_child(j); } } } template void btree_node

::merge(btree_node *src, allocator_type *alloc) { assert(parent() == src->parent()); assert(position() + 1 == src->position()); // Move the delimiting value to the left node. value_init(finish(), alloc, parent()->slot(position())); // Move the values from the right to the left node. transfer_n(src->count(), finish() + 1, src->start(), src, alloc); if (is_internal()) { // Move the child pointers from the right to the left node. for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) { init_child(j, src->child(i)); src->clear_child(i); } } // Fixup `finish` on the src and dest nodes. set_finish(start() + 1 + count() + src->count()); src->set_finish(src->start()); // Remove the value on the parent node and delete the src node. parent()->remove_values(position(), /*to_erase=*/1, alloc); } template void btree_node

::clear_and_delete(btree_node *node, allocator_type *alloc) { if (node->is_leaf()) { node->value_destroy_n(node->start(), node->count(), alloc); deallocate(LeafSize(node->max_count()), node, alloc); return; } if (node->count() == 0) { deallocate(InternalSize(), node, alloc); return; } // The parent of the root of the subtree we are deleting. btree_node *delete_root_parent = node->parent(); // Navigate to the leftmost leaf under node, and then delete upwards. while (node->is_internal()) node = node->start_child(); #ifdef ABSL_BTREE_ENABLE_GENERATIONS // When generations are enabled, we delete the leftmost leaf last in case it's // the parent of the root and we need to check whether it's a leaf before we // can update the root's generation. // TODO(ezb): if we change btree_node::is_root to check a bool inside the node // instead of checking whether the parent is a leaf, we can remove this logic. btree_node *leftmost_leaf = node; #endif // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which // isn't guaranteed to be a valid `field_type`. int pos = node->position(); btree_node *parent = node->parent(); for (;;) { // In each iteration of the next loop, we delete one leaf node and go right. assert(pos <= parent->finish()); do { node = parent->child(pos); if (node->is_internal()) { // Navigate to the leftmost leaf under node. while (node->is_internal()) node = node->start_child(); pos = node->position(); parent = node->parent(); } node->value_destroy_n(node->start(), node->count(), alloc); #ifdef ABSL_BTREE_ENABLE_GENERATIONS if (leftmost_leaf != node) #endif deallocate(LeafSize(node->max_count()), node, alloc); ++pos; } while (pos <= parent->finish()); // Once we've deleted all children of parent, delete parent and go up/right. assert(pos > parent->finish()); do { node = parent; pos = node->position(); parent = node->parent(); node->value_destroy_n(node->start(), node->count(), alloc); deallocate(InternalSize(), node, alloc); if (parent == delete_root_parent) { #ifdef ABSL_BTREE_ENABLE_GENERATIONS deallocate(LeafSize(leftmost_leaf->max_count()), leftmost_leaf, alloc); #endif return; } ++pos; } while (pos > parent->finish()); } } //// // btree_iterator methods template void btree_iterator::increment_slow() { if (node_->is_leaf()) { assert(position_ >= node_->finish()); btree_iterator save(*this); while (position_ == node_->finish() && !node_->is_root()) { assert(node_->parent()->child(node_->position()) == node_); position_ = node_->position(); node_ = node_->parent(); } // TODO(ezb): assert we aren't incrementing end() instead of handling. if (position_ == node_->finish()) { *this = save; } } else { assert(position_ < node_->finish()); node_ = node_->child(position_ + 1); while (node_->is_internal()) { node_ = node_->start_child(); } position_ = node_->start(); } } template void btree_iterator::decrement_slow() { if (node_->is_leaf()) { assert(position_ <= -1); btree_iterator save(*this); while (position_ < node_->start() && !node_->is_root()) { assert(node_->parent()->child(node_->position()) == node_); position_ = node_->position() - 1; node_ = node_->parent(); } // TODO(ezb): assert we aren't decrementing begin() instead of handling. if (position_ < node_->start()) { *this = save; } } else { assert(position_ >= node_->start()); node_ = node_->child(position_); while (node_->is_internal()) { node_ = node_->child(node_->finish()); } position_ = node_->finish() - 1; } } //// // btree methods template template void btree

::copy_or_move_values_in_order(Btree &other) { static_assert(std::is_same::value || std::is_same::value, "Btree type must be same or const."); assert(empty()); // We can avoid key comparisons because we know the order of the // values is the same order we'll store them in. auto iter = other.begin(); if (iter == other.end()) return; insert_multi(iter.slot()); ++iter; for (; iter != other.end(); ++iter) { // If the btree is not empty, we can just insert the new value at the end // of the tree. internal_emplace(end(), iter.slot()); } } template constexpr bool btree

::static_assert_validation() { static_assert(std::is_nothrow_copy_constructible::value, "Key comparison must be nothrow copy constructible"); static_assert(std::is_nothrow_copy_constructible::value, "Allocator must be nothrow copy constructible"); static_assert(type_traits_internal::is_trivially_copyable::value, "iterator not trivially copyable."); // Note: We assert that kTargetValues, which is computed from // Params::kTargetNodeSize, must fit the node_type::field_type. static_assert( kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))), "target node size too large"); // Verify that key_compare returns an absl::{weak,strong}_ordering or bool. static_assert( compare_has_valid_result_type(), "key comparison function must return absl::{weak,strong}_ordering or " "bool."); // Test the assumption made in setting kNodeSlotSpace. static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4, "node space assumption incorrect"); return true; } template template auto btree

::lower_bound_equal(const K &key) const -> std::pair { const SearchResult res = internal_lower_bound(key); const iterator lower = iterator(internal_end(res.value)); const bool equal = res.HasMatch() ? res.IsEq() : lower != end() && !compare_keys(key, lower.key()); return {lower, equal}; } template template auto btree

::equal_range(const K &key) -> std::pair { const std::pair lower_and_equal = lower_bound_equal(key); const iterator lower = lower_and_equal.first; if (!lower_and_equal.second) { return {lower, lower}; } const iterator next = std::next(lower); if (!params_type::template can_have_multiple_equivalent_keys()) { // The next iterator after lower must point to a key greater than `key`. // Note: if this assert fails, then it may indicate that the comparator does // not meet the equivalence requirements for Compare // (see https://en.cppreference.com/w/cpp/named_req/Compare). assert(next == end() || compare_keys(key, next.key())); return {lower, next}; } // Try once more to avoid the call to upper_bound() if there's only one // equivalent key. This should prevent all calls to upper_bound() in cases of // unique-containers with heterogeneous comparators in which all comparison // operators have the same equivalence classes. if (next == end() || compare_keys(key, next.key())) return {lower, next}; // In this case, we need to call upper_bound() to avoid worst case O(N) // behavior if we were to iterate over equal keys. return {lower, upper_bound(key)}; } template template auto btree

::insert_unique(const K &key, Args &&... args) -> std::pair { if (empty()) { mutable_root() = mutable_rightmost() = new_leaf_root_node(1); } SearchResult res = internal_locate(key); iterator iter = res.value; if (res.HasMatch()) { if (res.IsEq()) { // The key already exists in the tree, do nothing. return {iter, false}; } } else { iterator last = internal_last(iter); if (last.node_ && !compare_keys(key, last.key())) { // The key already exists in the tree, do nothing. return {last, false}; } } return {internal_emplace(iter, std::forward(args)...), true}; } template template inline auto btree

::insert_hint_unique(iterator position, const K &key, Args &&... args) -> std::pair { if (!empty()) { if (position == end() || compare_keys(key, position.key())) { if (position == begin() || compare_keys(std::prev(position).key(), key)) { // prev.key() < key < position.key() return {internal_emplace(position, std::forward(args)...), true}; } } else if (compare_keys(position.key(), key)) { ++position; if (position == end() || compare_keys(key, position.key())) { // {original `position`}.key() < key < {current `position`}.key() return {internal_emplace(position, std::forward(args)...), true}; } } else { // position.key() == key return {position, false}; } } return insert_unique(key, std::forward(args)...); } template template void btree

::insert_iterator_unique(InputIterator b, InputIterator e, int) { for (; b != e; ++b) { insert_hint_unique(end(), params_type::key(*b), *b); } } template template void btree

::insert_iterator_unique(InputIterator b, InputIterator e, char) { for (; b != e; ++b) { // Use a node handle to manage a temp slot. auto node_handle = CommonAccess::Construct(get_allocator(), *b); slot_type *slot = CommonAccess::GetSlot(node_handle); insert_hint_unique(end(), params_type::key(slot), slot); } } template template auto btree

::insert_multi(const key_type &key, ValueType &&v) -> iterator { if (empty()) { mutable_root() = mutable_rightmost() = new_leaf_root_node(1); } iterator iter = internal_upper_bound(key); if (iter.node_ == nullptr) { iter = end(); } return internal_emplace(iter, std::forward(v)); } template template auto btree

::insert_hint_multi(iterator position, ValueType &&v) -> iterator { if (!empty()) { const key_type &key = params_type::key(v); if (position == end() || !compare_keys(position.key(), key)) { if (position == begin() || !compare_keys(key, std::prev(position).key())) { // prev.key() <= key <= position.key() return internal_emplace(position, std::forward(v)); } } else { ++position; if (position == end() || !compare_keys(position.key(), key)) { // {original `position`}.key() < key < {current `position`}.key() return internal_emplace(position, std::forward(v)); } } } return insert_multi(std::forward(v)); } template template void btree

::insert_iterator_multi(InputIterator b, InputIterator e) { for (; b != e; ++b) { insert_hint_multi(end(), *b); } } template auto btree

::operator=(const btree &other) -> btree & { if (this != &other) { clear(); *mutable_key_comp() = other.key_comp(); if (absl::allocator_traits< allocator_type>::propagate_on_container_copy_assignment::value) { *mutable_allocator() = other.allocator(); } copy_or_move_values_in_order(other); } return *this; } template auto btree

::operator=(btree &&other) noexcept -> btree & { if (this != &other) { clear(); using std::swap; if (absl::allocator_traits< allocator_type>::propagate_on_container_copy_assignment::value) { swap(root_, other.root_); // Note: `rightmost_` also contains the allocator and the key comparator. swap(rightmost_, other.rightmost_); swap(size_, other.size_); } else { if (allocator() == other.allocator()) { swap(mutable_root(), other.mutable_root()); swap(*mutable_key_comp(), *other.mutable_key_comp()); swap(mutable_rightmost(), other.mutable_rightmost()); swap(size_, other.size_); } else { // We aren't allowed to propagate the allocator and the allocator is // different so we can't take over its memory. We must move each element // individually. We need both `other` and `this` to have `other`s key // comparator while moving the values so we can't swap the key // comparators. *mutable_key_comp() = other.key_comp(); copy_or_move_values_in_order(other); } } } return *this; } template auto btree

::erase(iterator iter) -> iterator { iter.node_->value_destroy(iter.position_, mutable_allocator()); iter.update_generation(); const bool internal_delete = iter.node_->is_internal(); if (internal_delete) { // Deletion of a value on an internal node. First, transfer the largest // value from our left child here, then erase/rebalance from that position. // We can get to the largest value from our left child by decrementing iter. iterator internal_iter(iter); --iter; assert(iter.node_->is_leaf()); internal_iter.node_->transfer(internal_iter.position_, iter.position_, iter.node_, mutable_allocator()); } else { // Shift values after erased position in leaf. In the internal case, we // don't need to do this because the leaf position is the end of the node. const field_type transfer_from = iter.position_ + 1; const field_type num_to_transfer = iter.node_->finish() - transfer_from; iter.node_->transfer_n(num_to_transfer, iter.position_, transfer_from, iter.node_, mutable_allocator()); } // Update node finish and container size. iter.node_->set_finish(iter.node_->finish() - 1); --size_; // We want to return the next value after the one we just erased. If we // erased from an internal node (internal_delete == true), then the next // value is ++(++iter). If we erased from a leaf node (internal_delete == // false) then the next value is ++iter. Note that ++iter may point to an // internal node and the value in the internal node may move to a leaf node // (iter.node_) when rebalancing is performed at the leaf level. iterator res = rebalance_after_delete(iter); // If we erased from an internal node, advance the iterator. if (internal_delete) { ++res; } return res; } template auto btree

::rebalance_after_delete(iterator iter) -> iterator { // Merge/rebalance as we walk back up the tree. iterator res(iter); bool first_iteration = true; for (;;) { if (iter.node_ == root()) { try_shrink(); if (empty()) { return end(); } break; } if (iter.node_->count() >= kMinNodeValues) { break; } bool merged = try_merge_or_rebalance(&iter); // On the first iteration, we should update `res` with `iter` because `res` // may have been invalidated. if (first_iteration) { res = iter; first_iteration = false; } if (!merged) { break; } iter.position_ = iter.node_->position(); iter.node_ = iter.node_->parent(); } res.update_generation(); // Adjust our return value. If we're pointing at the end of a node, advance // the iterator. if (res.position_ == res.node_->finish()) { res.position_ = res.node_->finish() - 1; ++res; } return res; } template auto btree

::erase_range(iterator begin, iterator end) -> std::pair { difference_type count = std::distance(begin, end); assert(count >= 0); if (count == 0) { return {0, begin}; } if (static_cast(count) == size_) { clear(); return {count, this->end()}; } if (begin.node_ == end.node_) { assert(end.position_ > begin.position_); begin.node_->remove_values(begin.position_, end.position_ - begin.position_, mutable_allocator()); size_ -= count; return {count, rebalance_after_delete(begin)}; } const size_type target_size = size_ - count; while (size_ > target_size) { if (begin.node_->is_leaf()) { const size_type remaining_to_erase = size_ - target_size; const size_type remaining_in_node = begin.node_->finish() - begin.position_; const size_type to_erase = (std::min)(remaining_to_erase, remaining_in_node); begin.node_->remove_values(begin.position_, to_erase, mutable_allocator()); size_ -= to_erase; begin = rebalance_after_delete(begin); } else { begin = erase(begin); } } begin.update_generation(); return {count, begin}; } template void btree

::clear() { if (!empty()) { node_type::clear_and_delete(root(), mutable_allocator()); } mutable_root() = mutable_rightmost() = EmptyNode(); size_ = 0; } template void btree

::swap(btree &other) { using std::swap; if (absl::allocator_traits< allocator_type>::propagate_on_container_swap::value) { // Note: `rightmost_` also contains the allocator and the key comparator. swap(rightmost_, other.rightmost_); } else { // It's undefined behavior if the allocators are unequal here. assert(allocator() == other.allocator()); swap(mutable_rightmost(), other.mutable_rightmost()); swap(*mutable_key_comp(), *other.mutable_key_comp()); } swap(mutable_root(), other.mutable_root()); swap(size_, other.size_); } template void btree

::verify() const { assert(root() != nullptr); assert(leftmost() != nullptr); assert(rightmost() != nullptr); assert(empty() || size() == internal_verify(root(), nullptr, nullptr)); assert(leftmost() == (++const_iterator(root(), -1)).node_); assert(rightmost() == (--const_iterator(root(), root()->finish())).node_); assert(leftmost()->is_leaf()); assert(rightmost()->is_leaf()); } template void btree

::rebalance_or_split(iterator *iter) { node_type *&node = iter->node_; int &insert_position = iter->position_; assert(node->count() == node->max_count()); assert(kNodeSlots == node->max_count()); // First try to make room on the node by rebalancing. node_type *parent = node->parent(); if (node != root()) { if (node->position() > parent->start()) { // Try rebalancing with our left sibling. node_type *left = parent->child(node->position() - 1); assert(left->max_count() == kNodeSlots); if (left->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the end of the right node then we bias rebalancing to // fill up the left node. int to_move = (kNodeSlots - left->count()) / (1 + (insert_position < static_cast(kNodeSlots))); to_move = (std::max)(1, to_move); if (insert_position - to_move >= node->start() || left->count() + to_move < static_cast(kNodeSlots)) { left->rebalance_right_to_left(to_move, node, mutable_allocator()); assert(node->max_count() - node->count() == to_move); insert_position = insert_position - to_move; if (insert_position < node->start()) { insert_position = insert_position + left->count() + 1; node = left; } assert(node->count() < node->max_count()); return; } } } if (node->position() < parent->finish()) { // Try rebalancing with our right sibling. node_type *right = parent->child(node->position() + 1); assert(right->max_count() == kNodeSlots); if (right->count() < kNodeSlots) { // We bias rebalancing based on the position being inserted. If we're // inserting at the beginning of the left node then we bias rebalancing // to fill up the right node. int to_move = (static_cast(kNodeSlots) - right->count()) / (1 + (insert_position > node->start())); to_move = (std::max)(1, to_move); if (insert_position <= node->finish() - to_move || right->count() + to_move < static_cast(kNodeSlots)) { node->rebalance_left_to_right(to_move, right, mutable_allocator()); if (insert_position > node->finish()) { insert_position = insert_position - node->count() - 1; node = right; } assert(node->count() < node->max_count()); return; } } } // Rebalancing failed, make sure there is room on the parent node for a new // value. assert(parent->max_count() == kNodeSlots); if (parent->count() == kNodeSlots) { iterator parent_iter(node->parent(), node->position()); rebalance_or_split(&parent_iter); } } else { // Rebalancing not possible because this is the root node. // Create a new root node and set the current root node as the child of the // new root. parent = new_internal_node(parent); parent->set_generation(root()->generation()); parent->init_child(parent->start(), root()); mutable_root() = parent; // If the former root was a leaf node, then it's now the rightmost node. assert(parent->start_child()->is_internal() || parent->start_child() == rightmost()); } // Split the node. node_type *split_node; if (node->is_leaf()) { split_node = new_leaf_node(parent); node->split(insert_position, split_node, mutable_allocator()); if (rightmost() == node) mutable_rightmost() = split_node; } else { split_node = new_internal_node(parent); node->split(insert_position, split_node, mutable_allocator()); } if (insert_position > node->finish()) { insert_position = insert_position - node->count() - 1; node = split_node; } } template void btree

::merge_nodes(node_type *left, node_type *right) { left->merge(right, mutable_allocator()); if (rightmost() == right) mutable_rightmost() = left; } template bool btree

::try_merge_or_rebalance(iterator *iter) { node_type *parent = iter->node_->parent(); if (iter->node_->position() > parent->start()) { // Try merging with our left sibling. node_type *left = parent->child(iter->node_->position() - 1); assert(left->max_count() == kNodeSlots); if (1U + left->count() + iter->node_->count() <= kNodeSlots) { iter->position_ += 1 + left->count(); merge_nodes(left, iter->node_); iter->node_ = left; return true; } } if (iter->node_->position() < parent->finish()) { // Try merging with our right sibling. node_type *right = parent->child(iter->node_->position() + 1); assert(right->max_count() == kNodeSlots); if (1U + iter->node_->count() + right->count() <= kNodeSlots) { merge_nodes(iter->node_, right); return true; } // Try rebalancing with our right sibling. We don't perform rebalancing if // we deleted the first element from iter->node_ and the node is not // empty. This is a small optimization for the common pattern of deleting // from the front of the tree. if (right->count() > kMinNodeValues && (iter->node_->count() == 0 || iter->position_ > iter->node_->start())) { int to_move = (right->count() - iter->node_->count()) / 2; to_move = (std::min)(to_move, right->count() - 1); iter->node_->rebalance_right_to_left(to_move, right, mutable_allocator()); return false; } } if (iter->node_->position() > parent->start()) { // Try rebalancing with our left sibling. We don't perform rebalancing if // we deleted the last element from iter->node_ and the node is not // empty. This is a small optimization for the common pattern of deleting // from the back of the tree. node_type *left = parent->child(iter->node_->position() - 1); if (left->count() > kMinNodeValues && (iter->node_->count() == 0 || iter->position_ < iter->node_->finish())) { int to_move = (left->count() - iter->node_->count()) / 2; to_move = (std::min)(to_move, left->count() - 1); left->rebalance_left_to_right(to_move, iter->node_, mutable_allocator()); iter->position_ += to_move; return false; } } return false; } template void btree

::try_shrink() { node_type *orig_root = root(); if (orig_root->count() > 0) { return; } // Deleted the last item on the root node, shrink the height of the tree. if (orig_root->is_leaf()) { assert(size() == 0); mutable_root() = mutable_rightmost() = EmptyNode(); } else { node_type *child = orig_root->start_child(); child->make_root(); mutable_root() = child; } node_type::clear_and_delete(orig_root, mutable_allocator()); } template template inline IterType btree

::internal_last(IterType iter) { assert(iter.node_ != nullptr); while (iter.position_ == iter.node_->finish()) { iter.position_ = iter.node_->position(); iter.node_ = iter.node_->parent(); if (iter.node_->is_leaf()) { iter.node_ = nullptr; break; } } iter.update_generation(); return iter; } template template inline auto btree

::internal_emplace(iterator iter, Args &&... args) -> iterator { if (iter.node_->is_internal()) { // We can't insert on an internal node. Instead, we'll insert after the // previous value which is guaranteed to be on a leaf node. --iter; ++iter.position_; } const field_type max_count = iter.node_->max_count(); allocator_type *alloc = mutable_allocator(); if (iter.node_->count() == max_count) { // Make room in the leaf for the new item. if (max_count < kNodeSlots) { // Insertion into the root where the root is smaller than the full node // size. Simply grow the size of the root node. assert(iter.node_ == root()); iter.node_ = new_leaf_root_node((std::min)(kNodeSlots, 2 * max_count)); // Transfer the values from the old root to the new root. node_type *old_root = root(); node_type *new_root = iter.node_; new_root->transfer_n(old_root->count(), new_root->start(), old_root->start(), old_root, alloc); new_root->set_finish(old_root->finish()); old_root->set_finish(old_root->start()); new_root->set_generation(old_root->generation()); node_type::clear_and_delete(old_root, alloc); mutable_root() = mutable_rightmost() = new_root; } else { rebalance_or_split(&iter); } } iter.node_->emplace_value(iter.position_, alloc, std::forward(args)...); ++size_; iter.update_generation(); return iter; } template template inline auto btree

::internal_locate(const K &key) const -> SearchResult { iterator iter(const_cast(root())); for (;;) { SearchResult res = iter.node_->lower_bound(key, key_comp()); iter.position_ = res.value; if (res.IsEq()) { return {iter, MatchKind::kEq}; } // Note: in the non-key-compare-to case, we don't need to walk all the way // down the tree if the keys are equal, but determining equality would // require doing an extra comparison on each node on the way down, and we // will need to go all the way to the leaf node in the expected case. if (iter.node_->is_leaf()) { break; } iter.node_ = iter.node_->child(iter.position_); } // Note: in the non-key-compare-to case, the key may actually be equivalent // here (and the MatchKind::kNe is ignored). return {iter, MatchKind::kNe}; } template template auto btree

::internal_lower_bound(const K &key) const -> SearchResult { if (!params_type::template can_have_multiple_equivalent_keys()) { SearchResult ret = internal_locate(key); ret.value = internal_last(ret.value); return ret; } iterator iter(const_cast(root())); SearchResult res; bool seen_eq = false; for (;;) { res = iter.node_->lower_bound(key, key_comp()); iter.position_ = res.value; if (iter.node_->is_leaf()) { break; } seen_eq = seen_eq || res.IsEq(); iter.node_ = iter.node_->child(iter.position_); } if (res.IsEq()) return {iter, MatchKind::kEq}; return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe}; } template template auto btree

::internal_upper_bound(const K &key) const -> iterator { iterator iter(const_cast(root())); for (;;) { iter.position_ = iter.node_->upper_bound(key, key_comp()); if (iter.node_->is_leaf()) { break; } iter.node_ = iter.node_->child(iter.position_); } return internal_last(iter); } template template auto btree

::internal_find(const K &key) const -> iterator { SearchResult res = internal_locate(key); if (res.HasMatch()) { if (res.IsEq()) { return res.value; } } else { const iterator iter = internal_last(res.value); if (iter.node_ != nullptr && !compare_keys(key, iter.key())) { return iter; } } return {nullptr, 0}; } template int btree

::internal_verify(const node_type *node, const key_type *lo, const key_type *hi) const { assert(node->count() > 0); assert(node->count() <= node->max_count()); if (lo) { assert(!compare_keys(node->key(node->start()), *lo)); } if (hi) { assert(!compare_keys(*hi, node->key(node->finish() - 1))); } for (int i = node->start() + 1; i < node->finish(); ++i) { assert(!compare_keys(node->key(i), node->key(i - 1))); } int count = node->count(); if (node->is_internal()) { for (int i = node->start(); i <= node->finish(); ++i) { assert(node->child(i) != nullptr); assert(node->child(i)->parent() == node); assert(node->child(i)->position() == i); count += internal_verify(node->child(i), i == node->start() ? lo : &node->key(i - 1), i == node->finish() ? hi : &node->key(i)); } } return count; } struct btree_access { template static auto erase_if(BtreeContainer &container, Pred pred) -> typename BtreeContainer::size_type { const auto initial_size = container.size(); auto &tree = container.tree_; auto *alloc = tree.mutable_allocator(); for (auto it = container.begin(); it != container.end();) { if (!pred(*it)) { ++it; continue; } auto *node = it.node_; if (node->is_internal()) { // Handle internal nodes normally. it = container.erase(it); continue; } // If this is a leaf node, then we do all the erases from this node // at once before doing rebalancing. // The current position to transfer slots to. int to_pos = it.position_; node->value_destroy(it.position_, alloc); while (++it.position_ < node->finish()) { it.update_generation(); if (pred(*it)) { node->value_destroy(it.position_, alloc); } else { node->transfer(node->slot(to_pos++), node->slot(it.position_), alloc); } } const int num_deleted = node->finish() - to_pos; tree.size_ -= num_deleted; node->set_finish(to_pos); it.position_ = to_pos; it = tree.rebalance_after_delete(it); } return initial_size - container.size(); } }; #undef ABSL_BTREE_ENABLE_GENERATIONS } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_BTREE_H_ abseil-20220623.1/absl/container/internal/btree_container.h000066400000000000000000000652171430371345100234210ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ #define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ #include #include #include #include #include "absl/base/attributes.h" #include "absl/base/internal/throw_delegate.h" #include "absl/container/internal/btree.h" // IWYU pragma: export #include "absl/container/internal/common.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // A common base class for btree_set, btree_map, btree_multiset, and // btree_multimap. template class btree_container { using params_type = typename Tree::params_type; protected: // Alias used for heterogeneous lookup functions. // `key_arg` evaluates to `K` when the functors are transparent and to // `key_type` otherwise. It permits template argument deduction on `K` for the // transparent case. template using key_arg = typename KeyArg::template type< K, typename Tree::key_type>; public: using key_type = typename Tree::key_type; using value_type = typename Tree::value_type; using size_type = typename Tree::size_type; using difference_type = typename Tree::difference_type; using key_compare = typename Tree::original_key_compare; using value_compare = typename Tree::value_compare; using allocator_type = typename Tree::allocator_type; using reference = typename Tree::reference; using const_reference = typename Tree::const_reference; using pointer = typename Tree::pointer; using const_pointer = typename Tree::const_pointer; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; using reverse_iterator = typename Tree::reverse_iterator; using const_reverse_iterator = typename Tree::const_reverse_iterator; using node_type = typename Tree::node_handle_type; // Constructors/assignments. btree_container() : tree_(key_compare(), allocator_type()) {} explicit btree_container(const key_compare &comp, const allocator_type &alloc = allocator_type()) : tree_(comp, alloc) {} explicit btree_container(const allocator_type &alloc) : tree_(key_compare(), alloc) {} btree_container(const btree_container &other) : btree_container(other, absl::allocator_traits:: select_on_container_copy_construction( other.get_allocator())) {} btree_container(const btree_container &other, const allocator_type &alloc) : tree_(other.tree_, alloc) {} btree_container(btree_container &&other) noexcept( std::is_nothrow_move_constructible::value) = default; btree_container(btree_container &&other, const allocator_type &alloc) : tree_(std::move(other.tree_), alloc) {} btree_container &operator=(const btree_container &other) = default; btree_container &operator=(btree_container &&other) noexcept( std::is_nothrow_move_assignable::value) = default; // Iterator routines. iterator begin() { return tree_.begin(); } const_iterator begin() const { return tree_.begin(); } const_iterator cbegin() const { return tree_.begin(); } iterator end() { return tree_.end(); } const_iterator end() const { return tree_.end(); } const_iterator cend() const { return tree_.end(); } reverse_iterator rbegin() { return tree_.rbegin(); } const_reverse_iterator rbegin() const { return tree_.rbegin(); } const_reverse_iterator crbegin() const { return tree_.rbegin(); } reverse_iterator rend() { return tree_.rend(); } const_reverse_iterator rend() const { return tree_.rend(); } const_reverse_iterator crend() const { return tree_.rend(); } // Lookup routines. template size_type count(const key_arg &key) const { auto equal_range = this->equal_range(key); return std::distance(equal_range.first, equal_range.second); } template iterator find(const key_arg &key) { return tree_.find(key); } template const_iterator find(const key_arg &key) const { return tree_.find(key); } template bool contains(const key_arg &key) const { return find(key) != end(); } template iterator lower_bound(const key_arg &key) { return tree_.lower_bound(key); } template const_iterator lower_bound(const key_arg &key) const { return tree_.lower_bound(key); } template iterator upper_bound(const key_arg &key) { return tree_.upper_bound(key); } template const_iterator upper_bound(const key_arg &key) const { return tree_.upper_bound(key); } template std::pair equal_range(const key_arg &key) { return tree_.equal_range(key); } template std::pair equal_range( const key_arg &key) const { return tree_.equal_range(key); } // Deletion routines. Note that there is also a deletion routine that is // specific to btree_set_container/btree_multiset_container. // Erase the specified iterator from the btree. The iterator must be valid // (i.e. not equal to end()). Return an iterator pointing to the node after // the one that was erased (or end() if none exists). iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); } iterator erase(iterator iter) { return tree_.erase(iter); } iterator erase(const_iterator first, const_iterator last) { return tree_.erase_range(iterator(first), iterator(last)).second; } template size_type erase(const key_arg &key) { auto equal_range = this->equal_range(key); return tree_.erase_range(equal_range.first, equal_range.second).first; } // Extract routines. node_type extract(iterator position) { // Use Construct instead of Transfer because the rebalancing code will // destroy the slot later. auto node = CommonAccess::Construct(get_allocator(), position.slot()); erase(position); return node; } node_type extract(const_iterator position) { return extract(iterator(position)); } // Utility routines. ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); } void swap(btree_container &other) { tree_.swap(other.tree_); } void verify() const { tree_.verify(); } // Size routines. size_type size() const { return tree_.size(); } size_type max_size() const { return tree_.max_size(); } bool empty() const { return tree_.empty(); } friend bool operator==(const btree_container &x, const btree_container &y) { if (x.size() != y.size()) return false; return std::equal(x.begin(), x.end(), y.begin()); } friend bool operator!=(const btree_container &x, const btree_container &y) { return !(x == y); } friend bool operator<(const btree_container &x, const btree_container &y) { return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); } friend bool operator>(const btree_container &x, const btree_container &y) { return y < x; } friend bool operator<=(const btree_container &x, const btree_container &y) { return !(y < x); } friend bool operator>=(const btree_container &x, const btree_container &y) { return !(x < y); } // The allocator used by the btree. allocator_type get_allocator() const { return tree_.get_allocator(); } // The key comparator used by the btree. key_compare key_comp() const { return key_compare(tree_.key_comp()); } value_compare value_comp() const { return tree_.value_comp(); } // Support absl::Hash. template friend State AbslHashValue(State h, const btree_container &b) { for (const auto &v : b) { h = State::combine(std::move(h), v); } return State::combine(std::move(h), b.size()); } protected: friend struct btree_access; Tree tree_; }; // A common base class for btree_set and btree_map. template class btree_set_container : public btree_container { using super_type = btree_container; using params_type = typename Tree::params_type; using init_type = typename params_type::init_type; using is_key_compare_to = typename params_type::is_key_compare_to; friend class BtreeNodePeer; protected: template using key_arg = typename super_type::template key_arg; public: using key_type = typename Tree::key_type; using value_type = typename Tree::value_type; using size_type = typename Tree::size_type; using key_compare = typename Tree::original_key_compare; using allocator_type = typename Tree::allocator_type; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; using node_type = typename super_type::node_type; using insert_return_type = InsertReturnType; // Inherit constructors. using super_type::super_type; btree_set_container() {} // Range constructors. template btree_set_container(InputIterator b, InputIterator e, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : super_type(comp, alloc) { insert(b, e); } template btree_set_container(InputIterator b, InputIterator e, const allocator_type &alloc) : btree_set_container(b, e, key_compare(), alloc) {} // Initializer list constructors. btree_set_container(std::initializer_list init, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : btree_set_container(init.begin(), init.end(), comp, alloc) {} btree_set_container(std::initializer_list init, const allocator_type &alloc) : btree_set_container(init.begin(), init.end(), alloc) {} // Insertion routines. std::pair insert(const value_type &v) { return this->tree_.insert_unique(params_type::key(v), v); } std::pair insert(value_type &&v) { return this->tree_.insert_unique(params_type::key(v), std::move(v)); } template std::pair emplace(Args &&... args) { // Use a node handle to manage a temp slot. auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); auto *slot = CommonAccess::GetSlot(node); return this->tree_.insert_unique(params_type::key(slot), slot); } iterator insert(const_iterator hint, const value_type &v) { return this->tree_ .insert_hint_unique(iterator(hint), params_type::key(v), v) .first; } iterator insert(const_iterator hint, value_type &&v) { return this->tree_ .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v)) .first; } template iterator emplace_hint(const_iterator hint, Args &&... args) { // Use a node handle to manage a temp slot. auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); auto *slot = CommonAccess::GetSlot(node); return this->tree_ .insert_hint_unique(iterator(hint), params_type::key(slot), slot) .first; } template void insert(InputIterator b, InputIterator e) { this->tree_.insert_iterator_unique(b, e, 0); } void insert(std::initializer_list init) { this->tree_.insert_iterator_unique(init.begin(), init.end(), 0); } insert_return_type insert(node_type &&node) { if (!node) return {this->end(), false, node_type()}; std::pair res = this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)), CommonAccess::GetSlot(node)); if (res.second) { CommonAccess::Destroy(&node); return {res.first, true, node_type()}; } else { return {res.first, false, std::move(node)}; } } iterator insert(const_iterator hint, node_type &&node) { if (!node) return this->end(); std::pair res = this->tree_.insert_hint_unique( iterator(hint), params_type::key(CommonAccess::GetSlot(node)), CommonAccess::GetSlot(node)); if (res.second) CommonAccess::Destroy(&node); return res.first; } // Node extraction routines. template node_type extract(const key_arg &key) { const std::pair lower_and_equal = this->tree_.lower_bound_equal(key); return lower_and_equal.second ? extract(lower_and_equal.first) : node_type(); } using super_type::extract; // Merge routines. // Moves elements from `src` into `this`. If the element already exists in // `this`, it is left unmodified in `src`. template < typename T, typename absl::enable_if_t< absl::conjunction< std::is_same, std::is_same, std::is_same>::value, int> = 0> void merge(btree_container &src) { // NOLINT for (auto src_it = src.begin(); src_it != src.end();) { if (insert(std::move(params_type::element(src_it.slot()))).second) { src_it = src.erase(src_it); } else { ++src_it; } } } template < typename T, typename absl::enable_if_t< absl::conjunction< std::is_same, std::is_same, std::is_same>::value, int> = 0> void merge(btree_container &&src) { merge(src); } }; // Base class for btree_map. template class btree_map_container : public btree_set_container { using super_type = btree_set_container; using params_type = typename Tree::params_type; friend class BtreeNodePeer; private: template using key_arg = typename super_type::template key_arg; public: using key_type = typename Tree::key_type; using mapped_type = typename params_type::mapped_type; using value_type = typename Tree::value_type; using key_compare = typename Tree::original_key_compare; using allocator_type = typename Tree::allocator_type; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; // Inherit constructors. using super_type::super_type; btree_map_container() {} // Insertion routines. // Note: the nullptr template arguments and extra `const M&` overloads allow // for supporting bitfield arguments. template std::pair insert_or_assign(const key_arg &k, const M &obj) { return insert_or_assign_impl(k, obj); } template std::pair insert_or_assign(key_arg &&k, const M &obj) { return insert_or_assign_impl(std::forward(k), obj); } template std::pair insert_or_assign(const key_arg &k, M &&obj) { return insert_or_assign_impl(k, std::forward(obj)); } template std::pair insert_or_assign(key_arg &&k, M &&obj) { return insert_or_assign_impl(std::forward(k), std::forward(obj)); } template iterator insert_or_assign(const_iterator hint, const key_arg &k, const M &obj) { return insert_or_assign_hint_impl(hint, k, obj); } template iterator insert_or_assign(const_iterator hint, key_arg &&k, const M &obj) { return insert_or_assign_hint_impl(hint, std::forward(k), obj); } template iterator insert_or_assign(const_iterator hint, const key_arg &k, M &&obj) { return insert_or_assign_hint_impl(hint, k, std::forward(obj)); } template iterator insert_or_assign(const_iterator hint, key_arg &&k, M &&obj) { return insert_or_assign_hint_impl(hint, std::forward(k), std::forward(obj)); } template ::value, int> = 0> std::pair try_emplace(const key_arg &k, Args &&... args) { return try_emplace_impl(k, std::forward(args)...); } template ::value, int> = 0> std::pair try_emplace(key_arg &&k, Args &&... args) { return try_emplace_impl(std::forward(k), std::forward(args)...); } template iterator try_emplace(const_iterator hint, const key_arg &k, Args &&... args) { return try_emplace_hint_impl(hint, k, std::forward(args)...); } template iterator try_emplace(const_iterator hint, key_arg &&k, Args &&... args) { return try_emplace_hint_impl(hint, std::forward(k), std::forward(args)...); } template mapped_type &operator[](const key_arg &k) { return try_emplace(k).first->second; } template mapped_type &operator[](key_arg &&k) { return try_emplace(std::forward(k)).first->second; } template mapped_type &at(const key_arg &key) { auto it = this->find(key); if (it == this->end()) base_internal::ThrowStdOutOfRange("absl::btree_map::at"); return it->second; } template const mapped_type &at(const key_arg &key) const { auto it = this->find(key); if (it == this->end()) base_internal::ThrowStdOutOfRange("absl::btree_map::at"); return it->second; } private: // Note: when we call `std::forward(obj)` twice, it's safe because // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when // `ret.second` is false. template std::pair insert_or_assign_impl(K &&k, M &&obj) { const std::pair ret = this->tree_.insert_unique(k, std::forward(k), std::forward(obj)); if (!ret.second) ret.first->second = std::forward(obj); return ret; } template iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) { const std::pair ret = this->tree_.insert_hint_unique( iterator(hint), k, std::forward(k), std::forward(obj)); if (!ret.second) ret.first->second = std::forward(obj); return ret.first; } template std::pair try_emplace_impl(K &&k, Args &&... args) { return this->tree_.insert_unique( k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...)); } template iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) { return this->tree_ .insert_hint_unique(iterator(hint), k, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...)) .first; } }; // A common base class for btree_multiset and btree_multimap. template class btree_multiset_container : public btree_container { using super_type = btree_container; using params_type = typename Tree::params_type; using init_type = typename params_type::init_type; using is_key_compare_to = typename params_type::is_key_compare_to; friend class BtreeNodePeer; template using key_arg = typename super_type::template key_arg; public: using key_type = typename Tree::key_type; using value_type = typename Tree::value_type; using size_type = typename Tree::size_type; using key_compare = typename Tree::original_key_compare; using allocator_type = typename Tree::allocator_type; using iterator = typename Tree::iterator; using const_iterator = typename Tree::const_iterator; using node_type = typename super_type::node_type; // Inherit constructors. using super_type::super_type; btree_multiset_container() {} // Range constructors. template btree_multiset_container(InputIterator b, InputIterator e, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : super_type(comp, alloc) { insert(b, e); } template btree_multiset_container(InputIterator b, InputIterator e, const allocator_type &alloc) : btree_multiset_container(b, e, key_compare(), alloc) {} // Initializer list constructors. btree_multiset_container(std::initializer_list init, const key_compare &comp = key_compare(), const allocator_type &alloc = allocator_type()) : btree_multiset_container(init.begin(), init.end(), comp, alloc) {} btree_multiset_container(std::initializer_list init, const allocator_type &alloc) : btree_multiset_container(init.begin(), init.end(), alloc) {} // Insertion routines. iterator insert(const value_type &v) { return this->tree_.insert_multi(v); } iterator insert(value_type &&v) { return this->tree_.insert_multi(std::move(v)); } iterator insert(const_iterator hint, const value_type &v) { return this->tree_.insert_hint_multi(iterator(hint), v); } iterator insert(const_iterator hint, value_type &&v) { return this->tree_.insert_hint_multi(iterator(hint), std::move(v)); } template void insert(InputIterator b, InputIterator e) { this->tree_.insert_iterator_multi(b, e); } void insert(std::initializer_list init) { this->tree_.insert_iterator_multi(init.begin(), init.end()); } template iterator emplace(Args &&... args) { // Use a node handle to manage a temp slot. auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); return this->tree_.insert_multi(CommonAccess::GetSlot(node)); } template iterator emplace_hint(const_iterator hint, Args &&... args) { // Use a node handle to manage a temp slot. auto node = CommonAccess::Construct(this->get_allocator(), std::forward(args)...); return this->tree_.insert_hint_multi(iterator(hint), CommonAccess::GetSlot(node)); } iterator insert(node_type &&node) { if (!node) return this->end(); iterator res = this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)), CommonAccess::GetSlot(node)); CommonAccess::Destroy(&node); return res; } iterator insert(const_iterator hint, node_type &&node) { if (!node) return this->end(); iterator res = this->tree_.insert_hint_multi( iterator(hint), std::move(params_type::element(CommonAccess::GetSlot(node)))); CommonAccess::Destroy(&node); return res; } // Node extraction routines. template node_type extract(const key_arg &key) { const std::pair lower_and_equal = this->tree_.lower_bound_equal(key); return lower_and_equal.second ? extract(lower_and_equal.first) : node_type(); } using super_type::extract; // Merge routines. // Moves all elements from `src` into `this`. template < typename T, typename absl::enable_if_t< absl::conjunction< std::is_same, std::is_same, std::is_same>::value, int> = 0> void merge(btree_container &src) { // NOLINT for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) { insert(std::move(params_type::element(src_it.slot()))); } src.clear(); } template < typename T, typename absl::enable_if_t< absl::conjunction< std::is_same, std::is_same, std::is_same>::value, int> = 0> void merge(btree_container &&src) { merge(src); } }; // A base class for btree_multimap. template class btree_multimap_container : public btree_multiset_container { using super_type = btree_multiset_container; using params_type = typename Tree::params_type; friend class BtreeNodePeer; public: using mapped_type = typename params_type::mapped_type; // Inherit constructors. using super_type::super_type; btree_multimap_container() {} }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_ abseil-20220623.1/absl/container/internal/common.h000066400000000000000000000130201430371345100215270ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_ #define ABSL_CONTAINER_INTERNAL_CONTAINER_H_ #include #include #include "absl/meta/type_traits.h" #include "absl/types/optional.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template struct IsTransparent : std::false_type {}; template struct IsTransparent> : std::true_type {}; template struct KeyArg { // Transparent. Forward `K`. template using type = K; }; template <> struct KeyArg { // Not transparent. Always use `key_type`. template using type = key_type; }; // The node_handle concept from C++17. // We specialize node_handle for sets and maps. node_handle_base holds the // common API of both. template class node_handle_base { protected: using slot_type = typename PolicyTraits::slot_type; public: using allocator_type = Alloc; constexpr node_handle_base() = default; node_handle_base(node_handle_base&& other) noexcept { *this = std::move(other); } ~node_handle_base() { destroy(); } node_handle_base& operator=(node_handle_base&& other) noexcept { destroy(); if (!other.empty()) { alloc_ = other.alloc_; PolicyTraits::transfer(alloc(), slot(), other.slot()); other.reset(); } return *this; } bool empty() const noexcept { return !alloc_; } explicit operator bool() const noexcept { return !empty(); } allocator_type get_allocator() const { return *alloc_; } protected: friend struct CommonAccess; struct transfer_tag_t {}; node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s) : alloc_(a) { PolicyTraits::transfer(alloc(), slot(), s); } struct construct_tag_t {}; template node_handle_base(construct_tag_t, const allocator_type& a, Args&&... args) : alloc_(a) { PolicyTraits::construct(alloc(), slot(), std::forward(args)...); } void destroy() { if (!empty()) { PolicyTraits::destroy(alloc(), slot()); reset(); } } void reset() { assert(alloc_.has_value()); alloc_ = absl::nullopt; } slot_type* slot() const { assert(!empty()); return reinterpret_cast(std::addressof(slot_space_)); } allocator_type* alloc() { return std::addressof(*alloc_); } private: absl::optional alloc_ = {}; alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {}; }; // For sets. template class node_handle : public node_handle_base { using Base = node_handle_base; public: using value_type = typename PolicyTraits::value_type; constexpr node_handle() {} value_type& value() const { return PolicyTraits::element(this->slot()); } private: friend struct CommonAccess; using Base::Base; }; // For maps. template class node_handle> : public node_handle_base { using Base = node_handle_base; using slot_type = typename PolicyTraits::slot_type; public: using key_type = typename Policy::key_type; using mapped_type = typename Policy::mapped_type; constexpr node_handle() {} // When C++17 is available, we can use std::launder to provide mutable // access to the key. Otherwise, we provide const access. auto key() const -> decltype(PolicyTraits::mutable_key(std::declval())) { return PolicyTraits::mutable_key(this->slot()); } mapped_type& mapped() const { return PolicyTraits::value(&PolicyTraits::element(this->slot())); } private: friend struct CommonAccess; using Base::Base; }; // Provide access to non-public node-handle functions. struct CommonAccess { template static auto GetSlot(const Node& node) -> decltype(node.slot()) { return node.slot(); } template static void Destroy(Node* node) { node->destroy(); } template static void Reset(Node* node) { node->reset(); } template static T Transfer(Args&&... args) { return T(typename T::transfer_tag_t{}, std::forward(args)...); } template static T Construct(Args&&... args) { return T(typename T::construct_tag_t{}, std::forward(args)...); } }; // Implement the insert_return_type<> concept of C++17. template struct InsertReturnType { Iterator position; bool inserted; NodeType node; }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_ abseil-20220623.1/absl/container/internal/compressed_tuple.h000066400000000000000000000246101430371345100236230ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Helper class to perform the Empty Base Optimization. // Ts can contain classes and non-classes, empty or not. For the ones that // are empty classes, we perform the optimization. If all types in Ts are empty // classes, then CompressedTuple is itself an empty class. // // To access the members, use member get() function. // // Eg: // absl::container_internal::CompressedTuple value(7, t1, t2, // t3); // assert(value.get<0>() == 7); // T1& t1 = value.get<1>(); // const T2& t2 = value.get<2>(); // ... // // https://en.cppreference.com/w/cpp/language/ebo #ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ #define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ #include #include #include #include #include "absl/utility/utility.h" #if defined(_MSC_VER) && !defined(__NVCC__) // We need to mark these classes with this declspec to ensure that // CompressedTuple happens. #define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases) #else #define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template class CompressedTuple; namespace internal_compressed_tuple { template struct Elem; template struct Elem, I> : std::tuple_element> {}; template using ElemT = typename Elem::type; // Use the __is_final intrinsic if available. Where it's not available, classes // declared with the 'final' specifier cannot be used as CompressedTuple // elements. // TODO(sbenza): Replace this with std::is_final in C++14. template constexpr bool IsFinal() { #if defined(__clang__) || defined(__GNUC__) return __is_final(T); #else return false; #endif } // We can't use EBCO on other CompressedTuples because that would mean that we // derive from multiple Storage<> instantiations with the same I parameter, // and potentially from multiple identical Storage<> instantiations. So anytime // we use type inheritance rather than encapsulation, we mark // CompressedTupleImpl, to make this easy to detect. struct uses_inheritance {}; template constexpr bool ShouldUseBase() { return std::is_class::value && std::is_empty::value && !IsFinal() && !std::is_base_of::value; } // The storage class provides two specializations: // - For empty classes, it stores T as a base class. // - For everything else, it stores T as a member. template ::type>()> #else bool UseBase = ShouldUseBase()> #endif struct Storage { T value; constexpr Storage() = default; template explicit constexpr Storage(absl::in_place_t, V&& v) : value(absl::forward(v)) {} constexpr const T& get() const& { return value; } T& get() & { return value; } constexpr const T&& get() const&& { return absl::move(*this).value; } T&& get() && { return std::move(*this).value; } }; template struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage : T { constexpr Storage() = default; template explicit constexpr Storage(absl::in_place_t, V&& v) : T(absl::forward(v)) {} constexpr const T& get() const& { return *this; } T& get() & { return *this; } constexpr const T&& get() const&& { return absl::move(*this); } T&& get() && { return std::move(*this); } }; template struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl; template struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< CompressedTuple, absl::index_sequence, ShouldAnyUseBase> // We use the dummy identity function through std::integral_constant to // convince MSVC of accepting and expanding I in that context. Without it // you would get: // error C3548: 'I': parameter pack cannot be used in this context : uses_inheritance, Storage::value>... { constexpr CompressedTupleImpl() = default; template explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) : Storage(absl::in_place, absl::forward(args))... {} friend CompressedTuple; }; template struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl< CompressedTuple, absl::index_sequence, false> // We use the dummy identity function as above... : Storage::value, false>... { constexpr CompressedTupleImpl() = default; template explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args) : Storage(absl::in_place, absl::forward(args))... {} friend CompressedTuple; }; std::false_type Or(std::initializer_list); std::true_type Or(std::initializer_list); // MSVC requires this to be done separately rather than within the declaration // of CompressedTuple below. template constexpr bool ShouldAnyUseBase() { return decltype( Or({std::integral_constant()>()...})){}; } template using TupleElementMoveConstructible = typename std::conditional::value, std::is_convertible, std::is_constructible>::type; template struct TupleMoveConstructible : std::false_type {}; template struct TupleMoveConstructible, Vs...> : std::integral_constant< bool, absl::conjunction< TupleElementMoveConstructible...>::value> {}; template struct compressed_tuple_size; template struct compressed_tuple_size> : public std::integral_constant {}; template struct TupleItemsMoveConstructible : std::integral_constant< bool, TupleMoveConstructible::value == sizeof...(Vs), T, Vs...>::value> {}; } // namespace internal_compressed_tuple // Helper class to perform the Empty Base Class Optimization. // Ts can contain classes and non-classes, empty or not. For the ones that // are empty classes, we perform the CompressedTuple. If all types in Ts are // empty classes, then CompressedTuple is itself an empty class. (This // does not apply when one or more of those empty classes is itself an empty // CompressedTuple.) // // To access the members, use member .get() function. // // Eg: // absl::container_internal::CompressedTuple value(7, t1, t2, // t3); // assert(value.get<0>() == 7); // T1& t1 = value.get<1>(); // const T2& t2 = value.get<2>(); // ... // // https://en.cppreference.com/w/cpp/language/ebo template class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple : private internal_compressed_tuple::CompressedTupleImpl< CompressedTuple, absl::index_sequence_for, internal_compressed_tuple::ShouldAnyUseBase()> { private: template using ElemT = internal_compressed_tuple::ElemT; template using StorageT = internal_compressed_tuple::Storage, I>; public: // There seems to be a bug in MSVC dealing in which using '=default' here will // cause the compiler to ignore the body of other constructors. The work- // around is to explicitly implement the default constructor. #if defined(_MSC_VER) constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {} #else constexpr CompressedTuple() = default; #endif explicit constexpr CompressedTuple(const Ts&... base) : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {} template )>>, internal_compressed_tuple::TupleItemsMoveConstructible< CompressedTuple, First, Vs...>>::value, bool> = true> explicit constexpr CompressedTuple(First&& first, Vs&&... base) : CompressedTuple::CompressedTupleImpl(absl::in_place, absl::forward(first), absl::forward(base)...) {} template ElemT& get() & { return StorageT::get(); } template constexpr const ElemT& get() const& { return StorageT::get(); } template ElemT&& get() && { return std::move(*this).StorageT::get(); } template constexpr const ElemT&& get() const&& { return absl::move(*this).StorageT::get(); } }; // Explicit specialization for a zero-element tuple // (needed to avoid ambiguous overloads for the default constructor). template <> class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {}; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC #endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_ abseil-20220623.1/absl/container/internal/compressed_tuple_test.cc000066400000000000000000000321771430371345100250270ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/compressed_tuple.h" #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/container/internal/test_instance_tracker.h" #include "absl/memory/memory.h" #include "absl/types/any.h" #include "absl/types/optional.h" #include "absl/utility/utility.h" // These are declared at global scope purely so that error messages // are smaller and easier to understand. enum class CallType { kConstRef, kConstMove }; template struct Empty { constexpr CallType value() const& { return CallType::kConstRef; } constexpr CallType value() const&& { return CallType::kConstMove; } }; template struct NotEmpty { T value; }; template struct TwoValues { T value1; U value2; }; namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using absl::test_internal::CopyableMovableInstance; using absl::test_internal::InstanceTracker; TEST(CompressedTupleTest, Sizeof) { EXPECT_EQ(sizeof(int), sizeof(CompressedTuple)); EXPECT_EQ(sizeof(int), sizeof(CompressedTuple>)); EXPECT_EQ(sizeof(int), sizeof(CompressedTuple, Empty<1>>)); EXPECT_EQ(sizeof(int), sizeof(CompressedTuple, Empty<1>, Empty<2>>)); EXPECT_EQ(sizeof(TwoValues), sizeof(CompressedTuple>)); EXPECT_EQ(sizeof(TwoValues), sizeof(CompressedTuple, NotEmpty>)); EXPECT_EQ(sizeof(TwoValues), sizeof(CompressedTuple, NotEmpty, Empty<1>>)); } TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) { InstanceTracker tracker; CompressedTuple x1(CopyableMovableInstance(1)); EXPECT_EQ(tracker.instances(), 1); EXPECT_EQ(tracker.copies(), 0); EXPECT_LE(tracker.moves(), 1); EXPECT_EQ(x1.get<0>().value(), 1); } TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) { InstanceTracker tracker; CopyableMovableInstance i1(1); CompressedTuple x1(std::move(i1)); EXPECT_EQ(tracker.instances(), 2); EXPECT_EQ(tracker.copies(), 0); EXPECT_LE(tracker.moves(), 1); EXPECT_EQ(x1.get<0>().value(), 1); } TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) { InstanceTracker tracker; CopyableMovableInstance i1(1); CopyableMovableInstance i2(2); Empty<0> empty; CompressedTuple> x1(std::move(i1), i2, empty); EXPECT_EQ(x1.get<0>().value(), 1); EXPECT_EQ(x1.get<1>().value(), 2); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 1); } struct IncompleteType; CompressedTuple> MakeWithIncomplete(CopyableMovableInstance i1, IncompleteType& t, // NOLINT Empty<0> empty) { return CompressedTuple>{ std::move(i1), t, empty}; } struct IncompleteType {}; TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) { InstanceTracker tracker; CopyableMovableInstance i1(1); Empty<0> empty; struct DerivedType : IncompleteType {int value = 0;}; DerivedType fd; fd.value = 7; CompressedTuple> x1 = MakeWithIncomplete(std::move(i1), fd, empty); EXPECT_EQ(x1.get<0>().value(), 1); EXPECT_EQ(static_cast(x1.get<1>()).value, 7); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 2); } TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) { InstanceTracker tracker; CopyableMovableInstance i1(1); CopyableMovableInstance i2(2); CompressedTuple> x1(std::move(i1), i2, {}); // NOLINT EXPECT_EQ(x1.get<0>().value(), 1); EXPECT_EQ(x1.get<1>().value(), 2); EXPECT_EQ(tracker.instances(), 3); // We are forced into the `const Ts&...` constructor (invoking copies) // because we need it to deduce the type of `{}`. // std::tuple also has this behavior. // Note, this test is proof that this is expected behavior, but it is not // _desired_ behavior. EXPECT_EQ(tracker.copies(), 1); EXPECT_EQ(tracker.moves(), 0); } TEST(CompressedTupleTest, OneCopyOnLValueConstruction) { InstanceTracker tracker; CopyableMovableInstance i1(1); CompressedTuple x1(i1); EXPECT_EQ(tracker.copies(), 1); EXPECT_EQ(tracker.moves(), 0); tracker.ResetCopiesMovesSwaps(); CopyableMovableInstance i2(2); const CopyableMovableInstance& i2_ref = i2; CompressedTuple x2(i2_ref); EXPECT_EQ(tracker.copies(), 1); EXPECT_EQ(tracker.moves(), 0); } TEST(CompressedTupleTest, OneMoveOnRValueAccess) { InstanceTracker tracker; CopyableMovableInstance i1(1); CompressedTuple x(std::move(i1)); tracker.ResetCopiesMovesSwaps(); CopyableMovableInstance i2 = std::move(x).get<0>(); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 1); } TEST(CompressedTupleTest, OneCopyOnLValueAccess) { InstanceTracker tracker; CompressedTuple x(CopyableMovableInstance(0)); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 1); CopyableMovableInstance t = x.get<0>(); EXPECT_EQ(tracker.copies(), 1); EXPECT_EQ(tracker.moves(), 1); } TEST(CompressedTupleTest, ZeroCopyOnRefAccess) { InstanceTracker tracker; CompressedTuple x(CopyableMovableInstance(0)); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 1); CopyableMovableInstance& t1 = x.get<0>(); const CopyableMovableInstance& t2 = x.get<0>(); EXPECT_EQ(tracker.copies(), 0); EXPECT_EQ(tracker.moves(), 1); EXPECT_EQ(t1.value(), 0); EXPECT_EQ(t2.value(), 0); } TEST(CompressedTupleTest, Access) { struct S { std::string x; }; CompressedTuple, S> x(7, {}, S{"ABC"}); EXPECT_EQ(sizeof(x), sizeof(TwoValues)); EXPECT_EQ(7, x.get<0>()); EXPECT_EQ("ABC", x.get<2>().x); } TEST(CompressedTupleTest, NonClasses) { CompressedTuple x(7, "ABC"); EXPECT_EQ(7, x.get<0>()); EXPECT_STREQ("ABC", x.get<1>()); } TEST(CompressedTupleTest, MixClassAndNonClass) { CompressedTuple, NotEmpty> x(7, "ABC", {}, {1.25}); struct Mock { int v; const char* p; double d; }; EXPECT_EQ(sizeof(x), sizeof(Mock)); EXPECT_EQ(7, x.get<0>()); EXPECT_STREQ("ABC", x.get<1>()); EXPECT_EQ(1.25, x.get<3>().value); } TEST(CompressedTupleTest, Nested) { CompressedTuple, CompressedTuple>> x(1, CompressedTuple(2), CompressedTuple>(3, CompressedTuple(4))); EXPECT_EQ(1, x.get<0>()); EXPECT_EQ(2, x.get<1>().get<0>()); EXPECT_EQ(3, x.get<2>().get<0>()); EXPECT_EQ(4, x.get<2>().get<1>().get<0>()); CompressedTuple, Empty<0>, CompressedTuple, CompressedTuple>>> y; std::set*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(), &y.get<2>().get<1>().get<0>()}; #ifdef _MSC_VER // MSVC has a bug where many instances of the same base class are layed out in // the same address when using __declspec(empty_bases). // This will be fixed in a future version of MSVC. int expected = 1; #else int expected = 4; #endif EXPECT_EQ(expected, sizeof(y)); EXPECT_EQ(expected, empties.size()); EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size()); EXPECT_EQ(4 * sizeof(char), sizeof(CompressedTuple, CompressedTuple>)); EXPECT_TRUE((std::is_empty, Empty<1>>>::value)); // Make sure everything still works when things are nested. struct CT_Empty : CompressedTuple> {}; CompressedTuple, CT_Empty> nested_empty; auto contained = nested_empty.get<0>(); auto nested = nested_empty.get<1>().get<0>(); EXPECT_TRUE((std::is_same::value)); } TEST(CompressedTupleTest, Reference) { int i = 7; std::string s = "Very long string that goes in the heap"; CompressedTuple x(i, i, s, s); // Sanity check. We should have not moved from `s` EXPECT_EQ(s, "Very long string that goes in the heap"); EXPECT_EQ(x.get<0>(), x.get<1>()); EXPECT_NE(&x.get<0>(), &x.get<1>()); EXPECT_EQ(&x.get<1>(), &i); EXPECT_EQ(x.get<2>(), x.get<3>()); EXPECT_NE(&x.get<2>(), &x.get<3>()); EXPECT_EQ(&x.get<3>(), &s); } TEST(CompressedTupleTest, NoElements) { CompressedTuple<> x; static_cast(x); // Silence -Wunused-variable. EXPECT_TRUE(std::is_empty>::value); } TEST(CompressedTupleTest, MoveOnlyElements) { CompressedTuple> str_tup( absl::make_unique("str")); CompressedTuple>, std::unique_ptr> x(std::move(str_tup), absl::make_unique(5)); EXPECT_EQ(*x.get<0>().get<0>(), "str"); EXPECT_EQ(*x.get<1>(), 5); std::unique_ptr x0 = std::move(x.get<0>()).get<0>(); std::unique_ptr x1 = std::move(x).get<1>(); EXPECT_EQ(*x0, "str"); EXPECT_EQ(*x1, 5); } TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) { CompressedTuple> base( absl::make_unique("str")); EXPECT_EQ(*base.get<0>(), "str"); CompressedTuple> copy(std::move(base)); EXPECT_EQ(*copy.get<0>(), "str"); } TEST(CompressedTupleTest, AnyElements) { any a(std::string("str")); CompressedTuple x(any(5), a); EXPECT_EQ(absl::any_cast(x.get<0>()), 5); EXPECT_EQ(absl::any_cast(x.get<1>()), "str"); a = 0.5f; EXPECT_EQ(absl::any_cast(x.get<1>()), 0.5); } TEST(CompressedTupleTest, Constexpr) { struct NonTrivialStruct { constexpr NonTrivialStruct() = default; constexpr int value() const { return v; } int v = 5; }; struct TrivialStruct { TrivialStruct() = default; constexpr int value() const { return v; } int v; }; constexpr CompressedTuple, Empty<0>> x( 7, 1.25, CompressedTuple(5), {}); constexpr int x0 = x.get<0>(); constexpr double x1 = x.get<1>(); constexpr int x2 = x.get<2>().get<0>(); constexpr CallType x3 = x.get<3>().value(); EXPECT_EQ(x0, 7); EXPECT_EQ(x1, 1.25); EXPECT_EQ(x2, 5); EXPECT_EQ(x3, CallType::kConstRef); #if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4 constexpr CompressedTuple, TrivialStruct, int> trivial = {}; constexpr CallType trivial0 = trivial.get<0>().value(); constexpr int trivial1 = trivial.get<1>().value(); constexpr int trivial2 = trivial.get<2>(); EXPECT_EQ(trivial0, CallType::kConstRef); EXPECT_EQ(trivial1, 0); EXPECT_EQ(trivial2, 0); #endif constexpr CompressedTuple, NonTrivialStruct, absl::optional> non_trivial = {}; constexpr CallType non_trivial0 = non_trivial.get<0>().value(); constexpr int non_trivial1 = non_trivial.get<1>().value(); constexpr absl::optional non_trivial2 = non_trivial.get<2>(); EXPECT_EQ(non_trivial0, CallType::kConstRef); EXPECT_EQ(non_trivial1, 5); EXPECT_EQ(non_trivial2, absl::nullopt); static constexpr char data[] = "DEF"; constexpr CompressedTuple z(data); constexpr const char* z1 = z.get<0>(); EXPECT_EQ(std::string(z1), std::string(data)); #if defined(__clang__) // An apparent bug in earlier versions of gcc claims these are ambiguous. constexpr int x2m = absl::move(x.get<2>()).get<0>(); constexpr CallType x3m = absl::move(x).get<3>().value(); EXPECT_EQ(x2m, 5); EXPECT_EQ(x3m, CallType::kConstMove); #endif } #if defined(__clang__) || defined(__GNUC__) TEST(CompressedTupleTest, EmptyFinalClass) { struct S final { int f() const { return 5; } }; CompressedTuple x; EXPECT_EQ(x.get<0>().f(), 5); } #endif // TODO(b/214288561): enable this test. TEST(CompressedTupleTest, DISABLED_NestedEbo) { struct Empty1 {}; struct Empty2 {}; CompressedTuple, int> x; CompressedTuple y; // Currently fails with sizeof(x) == 8, sizeof(y) == 4. EXPECT_EQ(sizeof(x), sizeof(y)); } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/container_memory.h000066400000000000000000000403621430371345100236220ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ #define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ #include #include #include #include #include #include #include #include "absl/base/config.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/utility/utility.h" #ifdef ABSL_HAVE_ADDRESS_SANITIZER #include #endif #ifdef ABSL_HAVE_MEMORY_SANITIZER #include #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template struct alignas(Alignment) AlignedType {}; // Allocates at least n bytes aligned to the specified alignment. // Alignment must be a power of 2. It must be positive. // // Note that many allocators don't honor alignment requirements above certain // threshold (usually either alignof(std::max_align_t) or alignof(void*)). // Allocate() doesn't apply alignment corrections. If the underlying allocator // returns insufficiently alignment pointer, that's what you are going to get. template void* Allocate(Alloc* alloc, size_t n) { static_assert(Alignment > 0, ""); assert(n && "n must be positive"); using M = AlignedType; using A = typename absl::allocator_traits::template rebind_alloc; using AT = typename absl::allocator_traits::template rebind_traits; // On macOS, "mem_alloc" is a #define with one argument defined in // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it // with the "foo(bar)" syntax. A my_mem_alloc(*alloc); void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M)); assert(reinterpret_cast(p) % Alignment == 0 && "allocator does not respect alignment"); return p; } // The pointer must have been previously obtained by calling // Allocate(alloc, n). template void Deallocate(Alloc* alloc, void* p, size_t n) { static_assert(Alignment > 0, ""); assert(n && "n must be positive"); using M = AlignedType; using A = typename absl::allocator_traits::template rebind_alloc; using AT = typename absl::allocator_traits::template rebind_traits; // On macOS, "mem_alloc" is a #define with one argument defined in // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it // with the "foo(bar)" syntax. A my_mem_alloc(*alloc); AT::deallocate(my_mem_alloc, static_cast(p), (n + sizeof(M) - 1) / sizeof(M)); } namespace memory_internal { // Constructs T into uninitialized storage pointed by `ptr` using the args // specified in the tuple. template void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t, absl::index_sequence) { absl::allocator_traits::construct( *alloc, ptr, std::get(std::forward(t))...); } template struct WithConstructedImplF { template decltype(std::declval()(std::declval())) operator()( Args&&... args) const { return std::forward(f)(T(std::forward(args)...)); } F&& f; }; template decltype(std::declval()(std::declval())) WithConstructedImpl( Tuple&& t, absl::index_sequence, F&& f) { return WithConstructedImplF{std::forward(f)}( std::get(std::forward(t))...); } template auto TupleRefImpl(T&& t, absl::index_sequence) -> decltype(std::forward_as_tuple(std::get(std::forward(t))...)) { return std::forward_as_tuple(std::get(std::forward(t))...); } // Returns a tuple of references to the elements of the input tuple. T must be a // tuple. template auto TupleRef(T&& t) -> decltype( TupleRefImpl(std::forward(t), absl::make_index_sequence< std::tuple_size::type>::value>())) { return TupleRefImpl( std::forward(t), absl::make_index_sequence< std::tuple_size::type>::value>()); } template decltype(std::declval()(std::declval(), std::piecewise_construct, std::declval>(), std::declval())) DecomposePairImpl(F&& f, std::pair, V> p) { const auto& key = std::get<0>(p.first); return std::forward(f)(key, std::piecewise_construct, std::move(p.first), std::move(p.second)); } } // namespace memory_internal // Constructs T into uninitialized storage pointed by `ptr` using the args // specified in the tuple. template void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) { memory_internal::ConstructFromTupleImpl( alloc, ptr, std::forward(t), absl::make_index_sequence< std::tuple_size::type>::value>()); } // Constructs T using the args specified in the tuple and calls F with the // constructed value. template decltype(std::declval()(std::declval())) WithConstructed( Tuple&& t, F&& f) { return memory_internal::WithConstructedImpl( std::forward(t), absl::make_index_sequence< std::tuple_size::type>::value>(), std::forward(f)); } // Given arguments of an std::pair's consructor, PairArgs() returns a pair of // tuples with references to the passed arguments. The tuples contain // constructor arguments for the first and the second elements of the pair. // // The following two snippets are equivalent. // // 1. std::pair p(args...); // // 2. auto a = PairArgs(args...); // std::pair p(std::piecewise_construct, // std::move(a.first), std::move(a.second)); inline std::pair, std::tuple<>> PairArgs() { return {}; } template std::pair, std::tuple> PairArgs(F&& f, S&& s) { return {std::piecewise_construct, std::forward_as_tuple(std::forward(f)), std::forward_as_tuple(std::forward(s))}; } template std::pair, std::tuple> PairArgs( const std::pair& p) { return PairArgs(p.first, p.second); } template std::pair, std::tuple> PairArgs(std::pair&& p) { return PairArgs(std::forward(p.first), std::forward(p.second)); } template auto PairArgs(std::piecewise_construct_t, F&& f, S&& s) -> decltype(std::make_pair(memory_internal::TupleRef(std::forward(f)), memory_internal::TupleRef(std::forward(s)))) { return std::make_pair(memory_internal::TupleRef(std::forward(f)), memory_internal::TupleRef(std::forward(s))); } // A helper function for implementing apply() in map policies. template auto DecomposePair(F&& f, Args&&... args) -> decltype(memory_internal::DecomposePairImpl( std::forward(f), PairArgs(std::forward(args)...))) { return memory_internal::DecomposePairImpl( std::forward(f), PairArgs(std::forward(args)...)); } // A helper function for implementing apply() in set policies. template decltype(std::declval()(std::declval(), std::declval())) DecomposeValue(F&& f, Arg&& arg) { const auto& key = arg; return std::forward(f)(key, std::forward(arg)); } // Helper functions for asan and msan. inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) { #ifdef ABSL_HAVE_ADDRESS_SANITIZER ASAN_POISON_MEMORY_REGION(m, s); #endif #ifdef ABSL_HAVE_MEMORY_SANITIZER __msan_poison(m, s); #endif (void)m; (void)s; } inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) { #ifdef ABSL_HAVE_ADDRESS_SANITIZER ASAN_UNPOISON_MEMORY_REGION(m, s); #endif #ifdef ABSL_HAVE_MEMORY_SANITIZER __msan_unpoison(m, s); #endif (void)m; (void)s; } template inline void SanitizerPoisonObject(const T* object) { SanitizerPoisonMemoryRegion(object, sizeof(T)); } template inline void SanitizerUnpoisonObject(const T* object) { SanitizerUnpoisonMemoryRegion(object, sizeof(T)); } namespace memory_internal { // If Pair is a standard-layout type, OffsetOf::kFirst and // OffsetOf::kSecond are equivalent to offsetof(Pair, first) and // offsetof(Pair, second) respectively. Otherwise they are -1. // // The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout // type, which is non-portable. template struct OffsetOf { static constexpr size_t kFirst = static_cast(-1); static constexpr size_t kSecond = static_cast(-1); }; template struct OffsetOf::type> { static constexpr size_t kFirst = offsetof(Pair, first); static constexpr size_t kSecond = offsetof(Pair, second); }; template struct IsLayoutCompatible { private: struct Pair { K first; V second; }; // Is P layout-compatible with Pair? template static constexpr bool LayoutCompatible() { return std::is_standard_layout

() && sizeof(P) == sizeof(Pair) && alignof(P) == alignof(Pair) && memory_internal::OffsetOf

::kFirst == memory_internal::OffsetOf::kFirst && memory_internal::OffsetOf

::kSecond == memory_internal::OffsetOf::kSecond; } public: // Whether pair and pair are layout-compatible. If they are, // then it is safe to store them in a union and read from either. static constexpr bool value = std::is_standard_layout() && std::is_standard_layout() && memory_internal::OffsetOf::kFirst == 0 && LayoutCompatible>() && LayoutCompatible>(); }; } // namespace memory_internal // The internal storage type for key-value containers like flat_hash_map. // // It is convenient for the value_type of a flat_hash_map to be // pair; the "const K" prevents accidental modification of the key // when dealing with the reference returned from find() and similar methods. // However, this creates other problems; we want to be able to emplace(K, V) // efficiently with move operations, and similarly be able to move a // pair in insert(). // // The solution is this union, which aliases the const and non-const versions // of the pair. This also allows flat_hash_map to work, even though // that has the same efficiency issues with move in emplace() and insert() - // but people do it anyway. // // If kMutableKeys is false, only the value member can be accessed. // // If kMutableKeys is true, key can be accessed through all slots while value // and mutable_value must be accessed only via INITIALIZED slots. Slots are // created and destroyed via mutable_value so that the key can be moved later. // // Accessing one of the union fields while the other is active is safe as // long as they are layout-compatible, which is guaranteed by the definition of // kMutableKeys. For C++11, the relevant section of the standard is // https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19) template union map_slot_type { map_slot_type() {} ~map_slot_type() = delete; using value_type = std::pair; using mutable_value_type = std::pair, absl::remove_const_t>; value_type value; mutable_value_type mutable_value; absl::remove_const_t key; }; template struct map_slot_policy { using slot_type = map_slot_type; using value_type = std::pair; using mutable_value_type = std::pair; private: static void emplace(slot_type* slot) { // The construction of union doesn't do anything at runtime but it allows us // to access its members without violating aliasing rules. new (slot) slot_type; } // If pair and pair are layout-compatible, we can accept one // or the other via slot_type. We are also free to access the key via // slot_type::key in this case. using kMutableKeys = memory_internal::IsLayoutCompatible; public: static value_type& element(slot_type* slot) { return slot->value; } static const value_type& element(const slot_type* slot) { return slot->value; } // When C++17 is available, we can use std::launder to provide mutable // access to the key for use in node handle. #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 static K& mutable_key(slot_type* slot) { // Still check for kMutableKeys so that we can avoid calling std::launder // unless necessary because it can interfere with optimizations. return kMutableKeys::value ? slot->key : *std::launder(const_cast( std::addressof(slot->value.first))); } #else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606) static const K& mutable_key(slot_type* slot) { return key(slot); } #endif static const K& key(const slot_type* slot) { return kMutableKeys::value ? slot->key : slot->value.first; } template static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { emplace(slot); if (kMutableKeys::value) { absl::allocator_traits::construct(*alloc, &slot->mutable_value, std::forward(args)...); } else { absl::allocator_traits::construct(*alloc, &slot->value, std::forward(args)...); } } // Construct this slot by moving from another slot. template static void construct(Allocator* alloc, slot_type* slot, slot_type* other) { emplace(slot); if (kMutableKeys::value) { absl::allocator_traits::construct( *alloc, &slot->mutable_value, std::move(other->mutable_value)); } else { absl::allocator_traits::construct(*alloc, &slot->value, std::move(other->value)); } } // Construct this slot by copying from another slot. template static void construct(Allocator* alloc, slot_type* slot, const slot_type* other) { emplace(slot); absl::allocator_traits::construct(*alloc, &slot->value, other->value); } template static void destroy(Allocator* alloc, slot_type* slot) { if (kMutableKeys::value) { absl::allocator_traits::destroy(*alloc, &slot->mutable_value); } else { absl::allocator_traits::destroy(*alloc, &slot->value); } } template static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { emplace(new_slot); if (kMutableKeys::value) { absl::allocator_traits::construct( *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value)); } else { absl::allocator_traits::construct(*alloc, &new_slot->value, std::move(old_slot->value)); } destroy(alloc, old_slot); } }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_ abseil-20220623.1/absl/container/internal/container_memory_test.cc000066400000000000000000000167111430371345100250200ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/container_memory.h" #include #include #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/container/internal/test_instance_tracker.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::absl::test_internal::CopyableMovableInstance; using ::absl::test_internal::InstanceTracker; using ::testing::_; using ::testing::ElementsAre; using ::testing::Gt; using ::testing::Pair; TEST(Memory, AlignmentLargerThanBase) { std::allocator alloc; void* mem = Allocate<2>(&alloc, 3); EXPECT_EQ(0, reinterpret_cast(mem) % 2); memcpy(mem, "abc", 3); Deallocate<2>(&alloc, mem, 3); } TEST(Memory, AlignmentSmallerThanBase) { std::allocator alloc; void* mem = Allocate<2>(&alloc, 3); EXPECT_EQ(0, reinterpret_cast(mem) % 2); memcpy(mem, "abc", 3); Deallocate<2>(&alloc, mem, 3); } std::map& AllocationMap() { static auto* map = new std::map; return *map; } template struct TypeCountingAllocator { TypeCountingAllocator() = default; template TypeCountingAllocator(const TypeCountingAllocator&) {} // NOLINT using value_type = T; T* allocate(size_t n, const void* = nullptr) { AllocationMap()[typeid(T)] += n; return std::allocator().allocate(n); } void deallocate(T* p, std::size_t n) { AllocationMap()[typeid(T)] -= n; return std::allocator().deallocate(p, n); } }; TEST(Memory, AllocateDeallocateMatchType) { TypeCountingAllocator alloc; void* mem = Allocate<1>(&alloc, 1); // Verify that it was allocated EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, Gt(0)))); Deallocate<1>(&alloc, mem, 1); // Verify that the deallocation matched. EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, 0))); } class Fixture : public ::testing::Test { using Alloc = std::allocator; public: Fixture() { ptr_ = std::allocator_traits::allocate(*alloc(), 1); } ~Fixture() override { std::allocator_traits::destroy(*alloc(), ptr_); std::allocator_traits::deallocate(*alloc(), ptr_, 1); } std::string* ptr() { return ptr_; } Alloc* alloc() { return &alloc_; } private: Alloc alloc_; std::string* ptr_; }; TEST_F(Fixture, ConstructNoArgs) { ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple()); EXPECT_EQ(*ptr(), ""); } TEST_F(Fixture, ConstructOneArg) { ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde")); EXPECT_EQ(*ptr(), "abcde"); } TEST_F(Fixture, ConstructTwoArg) { ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a')); EXPECT_EQ(*ptr(), "aaaaa"); } TEST(PairArgs, NoArgs) { EXPECT_THAT(PairArgs(), Pair(std::forward_as_tuple(), std::forward_as_tuple())); } TEST(PairArgs, TwoArgs) { EXPECT_EQ( std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), PairArgs(1, 'A')); } TEST(PairArgs, Pair) { EXPECT_EQ( std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), PairArgs(std::make_pair(1, 'A'))); } TEST(PairArgs, Piecewise) { EXPECT_EQ( std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')), PairArgs(std::piecewise_construct, std::forward_as_tuple(1), std::forward_as_tuple('A'))); } TEST(WithConstructed, Simple) { EXPECT_EQ(1, WithConstructed( std::make_tuple(std::string("a")), [](absl::string_view str) { return str.size(); })); } template decltype(DecomposeValue(std::declval(), std::declval())) DecomposeValueImpl(int, F&& f, Arg&& arg) { return DecomposeValue(std::forward(f), std::forward(arg)); } template const char* DecomposeValueImpl(char, F&& f, Arg&& arg) { return "not decomposable"; } template decltype(DecomposeValueImpl(0, std::declval(), std::declval())) TryDecomposeValue(F&& f, Arg&& arg) { return DecomposeValueImpl(0, std::forward(f), std::forward(arg)); } TEST(DecomposeValue, Decomposable) { auto f = [](const int& x, int&& y) { // NOLINT EXPECT_EQ(&x, &y); EXPECT_EQ(42, x); return 'A'; }; EXPECT_EQ('A', TryDecomposeValue(f, 42)); } TEST(DecomposeValue, NotDecomposable) { auto f = [](void*) { ADD_FAILURE() << "Must not be called"; return 'A'; }; EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42)); } template decltype(DecomposePair(std::declval(), std::declval()...)) DecomposePairImpl(int, F&& f, Args&&... args) { return DecomposePair(std::forward(f), std::forward(args)...); } template const char* DecomposePairImpl(char, F&& f, Args&&... args) { return "not decomposable"; } template decltype(DecomposePairImpl(0, std::declval(), std::declval()...)) TryDecomposePair(F&& f, Args&&... args) { return DecomposePairImpl(0, std::forward(f), std::forward(args)...); } TEST(DecomposePair, Decomposable) { auto f = [](const int& x, // NOLINT std::piecewise_construct_t, std::tuple k, std::tuple&& v) { EXPECT_EQ(&x, &std::get<0>(k)); EXPECT_EQ(42, x); EXPECT_EQ(0.5, std::get<0>(v)); return 'A'; }; EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5)); EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5))); EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct, std::make_tuple(42), std::make_tuple(0.5))); } TEST(DecomposePair, NotDecomposable) { auto f = [](...) { ADD_FAILURE() << "Must not be called"; return 'A'; }; EXPECT_STREQ("not decomposable", TryDecomposePair(f)); EXPECT_STREQ("not decomposable", TryDecomposePair(f, std::piecewise_construct, std::make_tuple(), std::make_tuple(0.5))); } TEST(MapSlotPolicy, ConstKeyAndValue) { using slot_policy = map_slot_policy; using slot_type = typename slot_policy::slot_type; union Slots { Slots() {} ~Slots() {} slot_type slots[100]; } slots; std::allocator< std::pair> alloc; InstanceTracker tracker; slot_policy::construct(&alloc, &slots.slots[0], CopyableMovableInstance(1), CopyableMovableInstance(1)); for (int i = 0; i < 99; ++i) { slot_policy::transfer(&alloc, &slots.slots[i + 1], &slots.slots[i]); } slot_policy::destroy(&alloc, &slots.slots[99]); EXPECT_EQ(tracker.copies(), 0); } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/counting_allocator.h000066400000000000000000000074551430371345100241440ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ #define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ #include #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // This is a stateful allocator, but the state lives outside of the // allocator (in whatever test is using the allocator). This is odd // but helps in tests where the allocator is propagated into nested // containers - that chain of allocators uses the same state and is // thus easier to query for aggregate allocation information. template class CountingAllocator { public: using Allocator = std::allocator; using AllocatorTraits = std::allocator_traits; using value_type = typename AllocatorTraits::value_type; using pointer = typename AllocatorTraits::pointer; using const_pointer = typename AllocatorTraits::const_pointer; using size_type = typename AllocatorTraits::size_type; using difference_type = typename AllocatorTraits::difference_type; CountingAllocator() = default; explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {} CountingAllocator(int64_t* bytes_used, int64_t* instance_count) : bytes_used_(bytes_used), instance_count_(instance_count) {} template CountingAllocator(const CountingAllocator& x) : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {} pointer allocate( size_type n, typename AllocatorTraits::const_void_pointer hint = nullptr) { Allocator allocator; pointer ptr = AllocatorTraits::allocate(allocator, n, hint); if (bytes_used_ != nullptr) { *bytes_used_ += n * sizeof(T); } return ptr; } void deallocate(pointer p, size_type n) { Allocator allocator; AllocatorTraits::deallocate(allocator, p, n); if (bytes_used_ != nullptr) { *bytes_used_ -= n * sizeof(T); } } template void construct(U* p, Args&&... args) { Allocator allocator; AllocatorTraits::construct(allocator, p, std::forward(args)...); if (instance_count_ != nullptr) { *instance_count_ += 1; } } template void destroy(U* p) { Allocator allocator; // Ignore GCC warning bug. #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wuse-after-free" #endif AllocatorTraits::destroy(allocator, p); #if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0) #pragma GCC diagnostic pop #endif if (instance_count_ != nullptr) { *instance_count_ -= 1; } } template class rebind { public: using other = CountingAllocator; }; friend bool operator==(const CountingAllocator& a, const CountingAllocator& b) { return a.bytes_used_ == b.bytes_used_ && a.instance_count_ == b.instance_count_; } friend bool operator!=(const CountingAllocator& a, const CountingAllocator& b) { return !(a == b); } int64_t* bytes_used_ = nullptr; int64_t* instance_count_ = nullptr; }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_ abseil-20220623.1/absl/container/internal/hash_function_defaults.h000066400000000000000000000125661430371345100247740ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Define the default Hash and Eq functions for SwissTable containers. // // std::hash and std::equal_to are not appropriate hash and equal // functions for SwissTable containers. There are two reasons for this. // // SwissTable containers are power of 2 sized containers: // // This means they use the lower bits of the hash value to find the slot for // each entry. The typical hash function for integral types is the identity. // This is a very weak hash function for SwissTable and any power of 2 sized // hashtable implementation which will lead to excessive collisions. For // SwissTable we use murmur3 style mixing to reduce collisions to a minimum. // // SwissTable containers support heterogeneous lookup: // // In order to make heterogeneous lookup work, hash and equal functions must be // polymorphic. At the same time they have to satisfy the same requirements the // C++ standard imposes on hash functions and equality operators. That is: // // if hash_default_eq(a, b) returns true for any a and b of type T, then // hash_default_hash(a) must equal hash_default_hash(b) // // For SwissTable containers this requirement is relaxed to allow a and b of // any and possibly different types. Note that like the standard the hash and // equal functions are still bound to T. This is important because some type U // can be hashed by/tested for equality differently depending on T. A notable // example is `const char*`. `const char*` is treated as a c-style string when // the hash function is hash but as a pointer when the hash // function is hash. // #ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ #define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ #include #include #include #include #include #include "absl/base/config.h" #include "absl/hash/hash.h" #include "absl/strings/cord.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // The hash of an object of type T is computed by using absl::Hash. template struct HashEq { using Hash = absl::Hash; using Eq = std::equal_to; }; struct StringHash { using is_transparent = void; size_t operator()(absl::string_view v) const { return absl::Hash{}(v); } size_t operator()(const absl::Cord& v) const { return absl::Hash{}(v); } }; struct StringEq { using is_transparent = void; bool operator()(absl::string_view lhs, absl::string_view rhs) const { return lhs == rhs; } bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const { return lhs == rhs; } bool operator()(const absl::Cord& lhs, absl::string_view rhs) const { return lhs == rhs; } bool operator()(absl::string_view lhs, const absl::Cord& rhs) const { return lhs == rhs; } }; // Supports heterogeneous lookup for string-like elements. struct StringHashEq { using Hash = StringHash; using Eq = StringEq; }; template <> struct HashEq : StringHashEq {}; template <> struct HashEq : StringHashEq {}; template <> struct HashEq : StringHashEq {}; // Supports heterogeneous lookup for pointers and smart pointers. template struct HashEq { struct Hash { using is_transparent = void; template size_t operator()(const U& ptr) const { return absl::Hash{}(HashEq::ToPtr(ptr)); } }; struct Eq { using is_transparent = void; template bool operator()(const A& a, const B& b) const { return HashEq::ToPtr(a) == HashEq::ToPtr(b); } }; private: static const T* ToPtr(const T* ptr) { return ptr; } template static const T* ToPtr(const std::unique_ptr& ptr) { return ptr.get(); } template static const T* ToPtr(const std::shared_ptr& ptr) { return ptr.get(); } }; template struct HashEq> : HashEq {}; template struct HashEq> : HashEq {}; // This header's visibility is restricted. If you need to access the default // hasher please use the container's ::hasher alias instead. // // Example: typename Hash = typename absl::flat_hash_map::hasher template using hash_default_hash = typename container_internal::HashEq::Hash; // This header's visibility is restricted. If you need to access the default // key equal please use the container's ::key_equal alias instead. // // Example: typename Eq = typename absl::flat_hash_map::key_equal template using hash_default_eq = typename container_internal::HashEq::Eq; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_ abseil-20220623.1/absl/container/internal/hash_function_defaults_test.cc000066400000000000000000000251451430371345100261660ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/hash_function_defaults.h" #include #include #include #include "gtest/gtest.h" #include "absl/random/random.h" #include "absl/strings/cord.h" #include "absl/strings/cord_test_helpers.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::testing::Types; TEST(Eq, Int32) { hash_default_eq eq; EXPECT_TRUE(eq(1, 1u)); EXPECT_TRUE(eq(1, char{1})); EXPECT_TRUE(eq(1, true)); EXPECT_TRUE(eq(1, double{1.1})); EXPECT_FALSE(eq(1, char{2})); EXPECT_FALSE(eq(1, 2u)); EXPECT_FALSE(eq(1, false)); EXPECT_FALSE(eq(1, 2.)); } TEST(Hash, Int32) { hash_default_hash hash; auto h = hash(1); EXPECT_EQ(h, hash(1u)); EXPECT_EQ(h, hash(char{1})); EXPECT_EQ(h, hash(true)); EXPECT_EQ(h, hash(double{1.1})); EXPECT_NE(h, hash(2u)); EXPECT_NE(h, hash(char{2})); EXPECT_NE(h, hash(false)); EXPECT_NE(h, hash(2.)); } enum class MyEnum { A, B, C, D }; TEST(Eq, Enum) { hash_default_eq eq; EXPECT_TRUE(eq(MyEnum::A, MyEnum::A)); EXPECT_FALSE(eq(MyEnum::A, MyEnum::B)); } TEST(Hash, Enum) { hash_default_hash hash; for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) { auto h = hash(e); EXPECT_EQ(h, hash_default_hash{}(static_cast(e))); EXPECT_NE(h, hash(MyEnum::D)); } } using StringTypes = ::testing::Types; template struct EqString : ::testing::Test { hash_default_eq key_eq; }; TYPED_TEST_SUITE(EqString, StringTypes); template struct HashString : ::testing::Test { hash_default_hash hasher; }; TYPED_TEST_SUITE(HashString, StringTypes); TYPED_TEST(EqString, Works) { auto eq = this->key_eq; EXPECT_TRUE(eq("a", "a")); EXPECT_TRUE(eq("a", absl::string_view("a"))); EXPECT_TRUE(eq("a", std::string("a"))); EXPECT_FALSE(eq("a", "b")); EXPECT_FALSE(eq("a", absl::string_view("b"))); EXPECT_FALSE(eq("a", std::string("b"))); } TYPED_TEST(HashString, Works) { auto hash = this->hasher; auto h = hash("a"); EXPECT_EQ(h, hash(absl::string_view("a"))); EXPECT_EQ(h, hash(std::string("a"))); EXPECT_NE(h, hash(absl::string_view("b"))); EXPECT_NE(h, hash(std::string("b"))); } struct NoDeleter { template void operator()(const T* ptr) const {} }; using PointerTypes = ::testing::Types, std::unique_ptr, std::unique_ptr, std::unique_ptr, std::shared_ptr, std::shared_ptr>; template struct EqPointer : ::testing::Test { hash_default_eq key_eq; }; TYPED_TEST_SUITE(EqPointer, PointerTypes); template struct HashPointer : ::testing::Test { hash_default_hash hasher; }; TYPED_TEST_SUITE(HashPointer, PointerTypes); TYPED_TEST(EqPointer, Works) { int dummy; auto eq = this->key_eq; auto sptr = std::make_shared(); std::shared_ptr csptr = sptr; int* ptr = sptr.get(); const int* cptr = ptr; std::unique_ptr uptr(ptr); std::unique_ptr cuptr(ptr); EXPECT_TRUE(eq(ptr, cptr)); EXPECT_TRUE(eq(ptr, sptr)); EXPECT_TRUE(eq(ptr, uptr)); EXPECT_TRUE(eq(ptr, csptr)); EXPECT_TRUE(eq(ptr, cuptr)); EXPECT_FALSE(eq(&dummy, cptr)); EXPECT_FALSE(eq(&dummy, sptr)); EXPECT_FALSE(eq(&dummy, uptr)); EXPECT_FALSE(eq(&dummy, csptr)); EXPECT_FALSE(eq(&dummy, cuptr)); } TEST(Hash, DerivedAndBase) { struct Base {}; struct Derived : Base {}; hash_default_hash hasher; Base base; Derived derived; EXPECT_NE(hasher(&base), hasher(&derived)); EXPECT_EQ(hasher(static_cast(&derived)), hasher(&derived)); auto dp = std::make_shared(); EXPECT_EQ(hasher(static_cast(dp.get())), hasher(dp)); } TEST(Hash, FunctionPointer) { using Func = int (*)(); hash_default_hash hasher; hash_default_eq eq; Func p1 = [] { return 1; }, p2 = [] { return 2; }; EXPECT_EQ(hasher(p1), hasher(p1)); EXPECT_TRUE(eq(p1, p1)); EXPECT_NE(hasher(p1), hasher(p2)); EXPECT_FALSE(eq(p1, p2)); } TYPED_TEST(HashPointer, Works) { int dummy; auto hash = this->hasher; auto sptr = std::make_shared(); std::shared_ptr csptr = sptr; int* ptr = sptr.get(); const int* cptr = ptr; std::unique_ptr uptr(ptr); std::unique_ptr cuptr(ptr); EXPECT_EQ(hash(ptr), hash(cptr)); EXPECT_EQ(hash(ptr), hash(sptr)); EXPECT_EQ(hash(ptr), hash(uptr)); EXPECT_EQ(hash(ptr), hash(csptr)); EXPECT_EQ(hash(ptr), hash(cuptr)); EXPECT_NE(hash(&dummy), hash(cptr)); EXPECT_NE(hash(&dummy), hash(sptr)); EXPECT_NE(hash(&dummy), hash(uptr)); EXPECT_NE(hash(&dummy), hash(csptr)); EXPECT_NE(hash(&dummy), hash(cuptr)); } TEST(EqCord, Works) { hash_default_eq eq; const absl::string_view a_string_view = "a"; const absl::Cord a_cord(a_string_view); const absl::string_view b_string_view = "b"; const absl::Cord b_cord(b_string_view); EXPECT_TRUE(eq(a_cord, a_cord)); EXPECT_TRUE(eq(a_cord, a_string_view)); EXPECT_TRUE(eq(a_string_view, a_cord)); EXPECT_FALSE(eq(a_cord, b_cord)); EXPECT_FALSE(eq(a_cord, b_string_view)); EXPECT_FALSE(eq(b_string_view, a_cord)); } TEST(HashCord, Works) { hash_default_hash hash; const absl::string_view a_string_view = "a"; const absl::Cord a_cord(a_string_view); const absl::string_view b_string_view = "b"; const absl::Cord b_cord(b_string_view); EXPECT_EQ(hash(a_cord), hash(a_cord)); EXPECT_EQ(hash(b_cord), hash(b_cord)); EXPECT_EQ(hash(a_string_view), hash(a_cord)); EXPECT_EQ(hash(b_string_view), hash(b_cord)); EXPECT_EQ(hash(absl::Cord("")), hash("")); EXPECT_EQ(hash(absl::Cord()), hash(absl::string_view())); EXPECT_NE(hash(a_cord), hash(b_cord)); EXPECT_NE(hash(a_cord), hash(b_string_view)); EXPECT_NE(hash(a_string_view), hash(b_cord)); EXPECT_NE(hash(a_string_view), hash(b_string_view)); } void NoOpReleaser(absl::string_view data, void* arg) {} TEST(HashCord, FragmentedCordWorks) { hash_default_hash hash; absl::Cord c = absl::MakeFragmentedCord({"a", "b", "c"}); EXPECT_FALSE(c.TryFlat().has_value()); EXPECT_EQ(hash(c), hash("abc")); } TEST(HashCord, FragmentedLongCordWorks) { hash_default_hash hash; // Crete some large strings which do not fit on the stack. std::string a(65536, 'a'); std::string b(65536, 'b'); absl::Cord c = absl::MakeFragmentedCord({a, b}); EXPECT_FALSE(c.TryFlat().has_value()); EXPECT_EQ(hash(c), hash(a + b)); } TEST(HashCord, RandomCord) { hash_default_hash hash; auto bitgen = absl::BitGen(); for (int i = 0; i < 1000; ++i) { const int number_of_segments = absl::Uniform(bitgen, 0, 10); std::vector pieces; for (size_t s = 0; s < number_of_segments; ++s) { std::string str; str.resize(absl::Uniform(bitgen, 0, 4096)); // MSVC needed the explicit return type in the lambda. std::generate(str.begin(), str.end(), [&]() -> char { return static_cast(absl::Uniform(bitgen)); }); pieces.push_back(str); } absl::Cord c = absl::MakeFragmentedCord(pieces); EXPECT_EQ(hash(c), hash(std::string(c))); } } // Cartesian product of (std::string, absl::string_view) // with (std::string, absl::string_view, const char*, absl::Cord). using StringTypesCartesianProduct = Types< // clang-format off std::pair, std::pair, std::pair, std::pair, std::pair, std::pair, std::pair, std::pair, std::pair>; // clang-format on constexpr char kFirstString[] = "abc123"; constexpr char kSecondString[] = "ijk456"; template struct StringLikeTest : public ::testing::Test { typename T::first_type a1{kFirstString}; typename T::second_type b1{kFirstString}; typename T::first_type a2{kSecondString}; typename T::second_type b2{kSecondString}; hash_default_eq eq; hash_default_hash hash; }; TYPED_TEST_SUITE_P(StringLikeTest); TYPED_TEST_P(StringLikeTest, Eq) { EXPECT_TRUE(this->eq(this->a1, this->b1)); EXPECT_TRUE(this->eq(this->b1, this->a1)); } TYPED_TEST_P(StringLikeTest, NotEq) { EXPECT_FALSE(this->eq(this->a1, this->b2)); EXPECT_FALSE(this->eq(this->b2, this->a1)); } TYPED_TEST_P(StringLikeTest, HashEq) { EXPECT_EQ(this->hash(this->a1), this->hash(this->b1)); EXPECT_EQ(this->hash(this->a2), this->hash(this->b2)); // It would be a poor hash function which collides on these strings. EXPECT_NE(this->hash(this->a1), this->hash(this->b2)); } TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct); } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl enum Hash : size_t { kStd = 0x1, // std::hash #ifdef _MSC_VER kExtension = kStd, // In MSVC, std::hash == ::hash #else // _MSC_VER kExtension = 0x2, // ::hash (GCC extension) #endif // _MSC_VER }; // H is a bitmask of Hash enumerations. // Hashable is hashable via all means specified in H. template struct Hashable { static constexpr bool HashableBy(Hash h) { return h & H; } }; namespace std { template struct hash> { template , class = typename std::enable_if::type> size_t operator()(E) const { return kStd; } }; } // namespace std namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { template size_t Hash(const T& v) { return hash_default_hash()(v); } TEST(Delegate, HashDispatch) { EXPECT_EQ(Hash(kStd), Hash(Hashable())); } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/hash_generator_testing.cc000066400000000000000000000040221430371345100251250ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/hash_generator_testing.h" #include namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace hash_internal { namespace { class RandomDeviceSeedSeq { public: using result_type = typename std::random_device::result_type; template void generate(Iterator start, Iterator end) { while (start != end) { *start = gen_(); ++start; } } private: std::random_device gen_; }; } // namespace std::mt19937_64* GetSharedRng() { static auto* rng = [] { RandomDeviceSeedSeq seed_seq; return new std::mt19937_64(seed_seq); }(); return rng; } std::string Generator::operator()() const { // NOLINTNEXTLINE(runtime/int) std::uniform_int_distribution chars(0x20, 0x7E); std::string res; res.resize(32); std::generate(res.begin(), res.end(), [&]() { return chars(*GetSharedRng()); }); return res; } absl::string_view Generator::operator()() const { static auto* arena = new std::deque(); // NOLINTNEXTLINE(runtime/int) std::uniform_int_distribution chars(0x20, 0x7E); arena->emplace_back(); auto& res = arena->back(); res.resize(32); std::generate(res.begin(), res.end(), [&]() { return chars(*GetSharedRng()); }); return res; } } // namespace hash_internal } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/hash_generator_testing.h000066400000000000000000000116561430371345100250020ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Generates random values for testing. Specialized only for the few types we // care about. #ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ #define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ #include #include #include #include #include #include #include #include #include #include "absl/container/internal/hash_policy_testing.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace hash_internal { namespace generator_internal { template struct IsMap : std::false_type {}; template struct IsMap> : std::true_type {}; } // namespace generator_internal std::mt19937_64* GetSharedRng(); enum Enum { kEnumEmpty, kEnumDeleted, }; enum class EnumClass : uint64_t { kEmpty, kDeleted, }; inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) { return o << static_cast(ec); } template struct Generator; template struct Generator::value>::type> { T operator()() const { std::uniform_int_distribution dist; return dist(*GetSharedRng()); } }; template <> struct Generator { Enum operator()() const { std::uniform_int_distribution::type> dist; while (true) { auto variate = dist(*GetSharedRng()); if (variate != kEnumEmpty && variate != kEnumDeleted) return static_cast(variate); } } }; template <> struct Generator { EnumClass operator()() const { std::uniform_int_distribution< typename std::underlying_type::type> dist; while (true) { EnumClass variate = static_cast(dist(*GetSharedRng())); if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted) return static_cast(variate); } } }; template <> struct Generator { std::string operator()() const; }; template <> struct Generator { absl::string_view operator()() const; }; template <> struct Generator { NonStandardLayout operator()() const { return NonStandardLayout(Generator()()); } }; template struct Generator> { std::pair operator()() const { return std::pair(Generator::type>()(), Generator::type>()()); } }; template struct Generator> { std::tuple operator()() const { return std::tuple(Generator::type>()()...); } }; template struct Generator> { std::unique_ptr operator()() const { return absl::make_unique(Generator()()); } }; template struct Generator().key()), decltype(std::declval().value())>> : Generator().key())>::type, typename std::decay().value())>::type>> {}; template using GeneratedType = decltype( std::declval::value, typename Container::value_type, typename Container::key_type>::type>&>()()); // Naive wrapper that performs a linear search of previous values. // Beware this is O(SQR), which is reasonable for smaller kMaxValues. template struct UniqueGenerator { Generator gen; std::vector values; T operator()() { assert(values.size() < kMaxValues); for (;;) { T value = gen(); if (std::find(values.begin(), values.end(), value) == values.end()) { values.push_back(value); return value; } } } }; } // namespace hash_internal } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_ abseil-20220623.1/absl/container/internal/hash_policy_testing.h000066400000000000000000000124561430371345100243120ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Utilities to help tests verify that hash tables properly handle stateful // allocators and hash functions. #ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ #define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ #include #include #include #include #include #include #include #include "absl/hash/hash.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace hash_testing_internal { template struct WithId { WithId() : id_(next_id()) {} WithId(const WithId& that) : id_(that.id_) {} WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; } WithId& operator=(const WithId& that) { id_ = that.id_; return *this; } WithId& operator=(WithId&& that) { id_ = that.id_; that.id_ = 0; return *this; } size_t id() const { return id_; } friend bool operator==(const WithId& a, const WithId& b) { return a.id_ == b.id_; } friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); } protected: explicit WithId(size_t id) : id_(id) {} private: size_t id_; template static size_t next_id() { // 0 is reserved for moved from state. static size_t gId = 1; return gId++; } }; } // namespace hash_testing_internal struct NonStandardLayout { NonStandardLayout() {} explicit NonStandardLayout(std::string s) : value(std::move(s)) {} virtual ~NonStandardLayout() {} friend bool operator==(const NonStandardLayout& a, const NonStandardLayout& b) { return a.value == b.value; } friend bool operator!=(const NonStandardLayout& a, const NonStandardLayout& b) { return a.value != b.value; } template friend H AbslHashValue(H h, const NonStandardLayout& v) { return H::combine(std::move(h), v.value); } std::string value; }; struct StatefulTestingHash : absl::container_internal::hash_testing_internal::WithId< StatefulTestingHash> { template size_t operator()(const T& t) const { return absl::Hash{}(t); } }; struct StatefulTestingEqual : absl::container_internal::hash_testing_internal::WithId< StatefulTestingEqual> { template bool operator()(const T& t, const U& u) const { return t == u; } }; // It is expected that Alloc() == Alloc() for all allocators so we cannot use // WithId base. We need to explicitly assign ids. template struct Alloc : std::allocator { using propagate_on_container_swap = std::true_type; // Using old paradigm for this to ensure compatibility. explicit Alloc(size_t id = 0) : id_(id) {} Alloc(const Alloc&) = default; Alloc& operator=(const Alloc&) = default; template Alloc(const Alloc& that) : std::allocator(that), id_(that.id()) {} template struct rebind { using other = Alloc; }; size_t id() const { return id_; } friend bool operator==(const Alloc& a, const Alloc& b) { return a.id_ == b.id_; } friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); } private: size_t id_ = (std::numeric_limits::max)(); }; template auto items(const Map& m) -> std::vector< std::pair> { using std::get; std::vector> res; res.reserve(m.size()); for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v)); return res; } template auto keys(const Set& s) -> std::vector::type> { std::vector::type> res; res.reserve(s.size()); for (const auto& v : s) res.emplace_back(v); return res; } } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl // ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions // where the unordered containers are missing certain constructors that // take allocator arguments. This test is defined ad-hoc for the platforms // we care about (notably Crosstool 17) because libstdcxx's useless // versioning scheme precludes a more principled solution. // From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html) // "the unordered associative containers in and // meet the allocator-aware container requirements;" #if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \ ( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 )) #define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0 #else #define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1 #endif #endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_ abseil-20220623.1/absl/container/internal/hash_policy_testing_test.cc000066400000000000000000000024001430371345100254730ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/hash_policy_testing.h" #include "gtest/gtest.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { TEST(_, Hash) { StatefulTestingHash h1; EXPECT_EQ(1, h1.id()); StatefulTestingHash h2; EXPECT_EQ(2, h2.id()); StatefulTestingHash h1c(h1); EXPECT_EQ(1, h1c.id()); StatefulTestingHash h2m(std::move(h2)); EXPECT_EQ(2, h2m.id()); EXPECT_EQ(0, h2.id()); StatefulTestingHash h3; EXPECT_EQ(3, h3.id()); h3 = StatefulTestingHash(); EXPECT_EQ(4, h3.id()); h3 = std::move(h1); EXPECT_EQ(1, h3.id()); } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/hash_policy_traits.h000066400000000000000000000175551430371345100241500ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ #define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ #include #include #include #include #include #include "absl/meta/type_traits.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // Defines how slots are initialized/destroyed/moved. template struct hash_policy_traits { // The type of the keys stored in the hashtable. using key_type = typename Policy::key_type; private: struct ReturnKey { // When C++17 is available, we can use std::launder to provide mutable // access to the key for use in node handle. #if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606 template ::value, int> = 0> static key_type& Impl(Key&& k, int) { return *std::launder( const_cast(std::addressof(std::forward(k)))); } #endif template static Key Impl(Key&& k, char) { return std::forward(k); } // When Key=T&, we forward the lvalue reference. // When Key=T, we return by value to avoid a dangling reference. // eg, for string_hash_map. template auto operator()(Key&& k, const Args&...) const -> decltype(Impl(std::forward(k), 0)) { return Impl(std::forward(k), 0); } }; template struct ConstantIteratorsImpl : std::false_type {}; template struct ConstantIteratorsImpl> : P::constant_iterators {}; public: // The actual object stored in the hash table. using slot_type = typename Policy::slot_type; // The argument type for insertions into the hashtable. This is different // from value_type for increased performance. See initializer_list constructor // and insert() member functions for more details. using init_type = typename Policy::init_type; using reference = decltype(Policy::element(std::declval())); using pointer = typename std::remove_reference::type*; using value_type = typename std::remove_reference::type; // Policies can set this variable to tell raw_hash_set that all iterators // should be constant, even `iterator`. This is useful for set-like // containers. // Defaults to false if not provided by the policy. using constant_iterators = ConstantIteratorsImpl<>; // PRECONDITION: `slot` is UNINITIALIZED // POSTCONDITION: `slot` is INITIALIZED template static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { Policy::construct(alloc, slot, std::forward(args)...); } // PRECONDITION: `slot` is INITIALIZED // POSTCONDITION: `slot` is UNINITIALIZED template static void destroy(Alloc* alloc, slot_type* slot) { Policy::destroy(alloc, slot); } // Transfers the `old_slot` to `new_slot`. Any memory allocated by the // allocator inside `old_slot` to `new_slot` can be transferred. // // OPTIONAL: defaults to: // // clone(new_slot, std::move(*old_slot)); // destroy(old_slot); // // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is // UNINITIALIZED template static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) { transfer_impl(alloc, new_slot, old_slot, 0); } // PRECONDITION: `slot` is INITIALIZED // POSTCONDITION: `slot` is INITIALIZED template static auto element(slot_type* slot) -> decltype(P::element(slot)) { return P::element(slot); } // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`. // // If `slot` is nullptr, returns the constant amount of memory owned by any // full slot or -1 if slots own variable amounts of memory. // // PRECONDITION: `slot` is INITIALIZED or nullptr template static size_t space_used(const slot_type* slot) { return P::space_used(slot); } // Provides generalized access to the key for elements, both for elements in // the table and for elements that have not yet been inserted (or even // constructed). We would like an API that allows us to say: `key(args...)` // but we cannot do that for all cases, so we use this more general API that // can be used for many things, including the following: // // - Given an element in a table, get its key. // - Given an element initializer, get its key. // - Given `emplace()` arguments, get the element key. // // Implementations of this must adhere to a very strict technical // specification around aliasing and consuming arguments: // // Let `value_type` be the result type of `element()` without ref- and // cv-qualifiers. The first argument is a functor, the rest are constructor // arguments for `value_type`. Returns `std::forward(f)(k, xs...)`, where // `k` is the element key, and `xs...` are the new constructor arguments for // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias // `ts...`. The key won't be touched once `xs...` are used to construct an // element; `ts...` won't be touched at all, which allows `apply()` to consume // any rvalues among them. // // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not // trigger a hard compile error unless it originates from `f`. In other words, // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK. // // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`, // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not. template static auto apply(F&& f, Ts&&... ts) -> decltype(P::apply(std::forward(f), std::forward(ts)...)) { return P::apply(std::forward(f), std::forward(ts)...); } // Returns the "key" portion of the slot. // Used for node handle manipulation. template static auto mutable_key(slot_type* slot) -> decltype(P::apply(ReturnKey(), element(slot))) { return P::apply(ReturnKey(), element(slot)); } // Returns the "value" (as opposed to the "key") portion of the element. Used // by maps to implement `operator[]`, `at()` and `insert_or_assign()`. template static auto value(T* elem) -> decltype(P::value(elem)) { return P::value(elem); } private: // Use auto -> decltype as an enabler. template static auto transfer_impl(Alloc* alloc, slot_type* new_slot, slot_type* old_slot, int) -> decltype((void)P::transfer(alloc, new_slot, old_slot)) { P::transfer(alloc, new_slot, old_slot); } template static void transfer_impl(Alloc* alloc, slot_type* new_slot, slot_type* old_slot, char) { construct(alloc, new_slot, std::move(element(old_slot))); destroy(alloc, old_slot); } }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_ abseil-20220623.1/absl/container/internal/hash_policy_traits_test.cc000066400000000000000000000102511430371345100253270ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/hash_policy_traits.h" #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::testing::MockFunction; using ::testing::Return; using ::testing::ReturnRef; using Alloc = std::allocator; using Slot = int; struct PolicyWithoutOptionalOps { using slot_type = Slot; using key_type = Slot; using init_type = Slot; static std::function construct; static std::function destroy; static std::function element; static int apply(int v) { return apply_impl(v); } static std::function apply_impl; static std::function value; }; std::function PolicyWithoutOptionalOps::construct; std::function PolicyWithoutOptionalOps::destroy; std::function PolicyWithoutOptionalOps::element; std::function PolicyWithoutOptionalOps::apply_impl; std::function PolicyWithoutOptionalOps::value; struct PolicyWithOptionalOps : PolicyWithoutOptionalOps { static std::function transfer; }; std::function PolicyWithOptionalOps::transfer; struct Test : ::testing::Test { Test() { PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) { construct.Call(a1, a2, std::move(a3)); }; PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) { destroy.Call(a1, a2); }; PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& { return element.Call(a1); }; PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int { return apply.Call(a1); }; PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& { return value.Call(a1); }; PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) { return transfer.Call(a1, a2, a3); }; } std::allocator alloc; int a = 53; MockFunction construct; MockFunction destroy; MockFunction element; MockFunction apply; MockFunction value; MockFunction transfer; }; TEST_F(Test, construct) { EXPECT_CALL(construct, Call(&alloc, &a, 53)); hash_policy_traits::construct(&alloc, &a, 53); } TEST_F(Test, destroy) { EXPECT_CALL(destroy, Call(&alloc, &a)); hash_policy_traits::destroy(&alloc, &a); } TEST_F(Test, element) { int b = 0; EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b)); EXPECT_EQ(&b, &hash_policy_traits::element(&a)); } TEST_F(Test, apply) { EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337)); EXPECT_EQ(1337, (hash_policy_traits::apply(42))); } TEST_F(Test, value) { int b = 0; EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b)); EXPECT_EQ(&b, &hash_policy_traits::value(&a)); } TEST_F(Test, without_transfer) { int b = 42; EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b)); EXPECT_CALL(construct, Call(&alloc, &a, b)); EXPECT_CALL(destroy, Call(&alloc, &b)); hash_policy_traits::transfer(&alloc, &a, &b); } TEST_F(Test, with_transfer) { int b = 42; EXPECT_CALL(transfer, Call(&alloc, &a, &b)); hash_policy_traits::transfer(&alloc, &a, &b); } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/hashtable_debug.h000066400000000000000000000100421430371345100233410ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // This library provides APIs to debug the probing behavior of hash tables. // // In general, the probing behavior is a black box for users and only the // side effects can be measured in the form of performance differences. // These APIs give a glimpse on the actual behavior of the probing algorithms in // these hashtables given a specified hash function and a set of elements. // // The probe count distribution can be used to assess the quality of the hash // function for that particular hash table. Note that a hash function that // performs well in one hash table implementation does not necessarily performs // well in a different one. // // This library supports std::unordered_{set,map}, dense_hash_{set,map} and // absl::{flat,node,string}_hash_{set,map}. #ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ #define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ #include #include #include #include #include "absl/container/internal/hashtable_debug_hooks.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // Returns the number of probes required to lookup `key`. Returns 0 for a // search with no collisions. Higher values mean more hash collisions occurred; // however, the exact meaning of this number varies according to the container // type. template size_t GetHashtableDebugNumProbes( const C& c, const typename C::key_type& key) { return absl::container_internal::hashtable_debug_internal:: HashtableDebugAccess::GetNumProbes(c, key); } // Gets a histogram of the number of probes for each elements in the container. // The sum of all the values in the vector is equal to container.size(). template std::vector GetHashtableDebugNumProbesHistogram(const C& container) { std::vector v; for (auto it = container.begin(); it != container.end(); ++it) { size_t num_probes = GetHashtableDebugNumProbes( container, absl::container_internal::hashtable_debug_internal::GetKey(*it, 0)); v.resize((std::max)(v.size(), num_probes + 1)); v[num_probes]++; } return v; } struct HashtableDebugProbeSummary { size_t total_elements; size_t total_num_probes; double mean; }; // Gets a summary of the probe count distribution for the elements in the // container. template HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) { auto probes = GetHashtableDebugNumProbesHistogram(container); HashtableDebugProbeSummary summary = {}; for (size_t i = 0; i < probes.size(); ++i) { summary.total_elements += probes[i]; summary.total_num_probes += probes[i] * i; } summary.mean = 1.0 * summary.total_num_probes / summary.total_elements; return summary; } // Returns the number of bytes requested from the allocator by the container // and not freed. template size_t AllocatedByteSize(const C& c) { return absl::container_internal::hashtable_debug_internal:: HashtableDebugAccess::AllocatedByteSize(c); } // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C` // and `c.size()` is equal to `num_elements`. template size_t LowerBoundAllocatedByteSize(size_t num_elements) { return absl::container_internal::hashtable_debug_internal:: HashtableDebugAccess::LowerBoundAllocatedByteSize(num_elements); } } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_ abseil-20220623.1/absl/container/internal/hashtable_debug_hooks.h000066400000000000000000000056401430371345100245540ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Provides the internal API for hashtable_debug.h. #ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ #define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ #include #include #include #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace hashtable_debug_internal { // If it is a map, call get<0>(). using std::get; template auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) { return get<0>(pair); } // If it is not a map, return the value directly. template const typename T::key_type& GetKey(const typename T::key_type& key, char) { return key; } // Containers should specialize this to provide debug information for that // container. template struct HashtableDebugAccess { // Returns the number of probes required to find `key` in `c`. The "number of // probes" is a concept that can vary by container. Implementations should // return 0 when `key` was found in the minimum number of operations and // should increment the result for each non-trivial operation required to find // `key`. // // The default implementation uses the bucket api from the standard and thus // works for `std::unordered_*` containers. static size_t GetNumProbes(const Container& c, const typename Container::key_type& key) { if (!c.bucket_count()) return {}; size_t num_probes = 0; size_t bucket = c.bucket(key); for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) { if (it == e) return num_probes; if (c.key_eq()(key, GetKey(*it, 0))) return num_probes; } } // Returns the number of bytes requested from the allocator by the container // and not freed. // // static size_t AllocatedByteSize(const Container& c); // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type // `Container` and `c.size()` is equal to `num_elements`. // // static size_t LowerBoundAllocatedByteSize(size_t num_elements); }; } // namespace hashtable_debug_internal } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_ abseil-20220623.1/absl/container/internal/hashtablez_sampler.cc000066400000000000000000000174661430371345100242670ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/hashtablez_sampler.h" #include #include #include #include #include #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/debugging/stacktrace.h" #include "absl/memory/memory.h" #include "absl/profiling/internal/exponential_biased.h" #include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr int HashtablezInfo::kMaxStackDepth; #endif namespace { ABSL_CONST_INIT std::atomic g_hashtablez_enabled{ false }; ABSL_CONST_INIT std::atomic g_hashtablez_sample_parameter{1 << 10}; std::atomic g_hashtablez_config_listener{nullptr}; #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased g_exponential_biased_generator; #endif void TriggerHashtablezConfigListener() { auto* listener = g_hashtablez_config_listener.load(std::memory_order_acquire); if (listener != nullptr) listener(); } } // namespace #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample = {0, 0}; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) HashtablezSampler& GlobalHashtablezSampler() { static auto* sampler = new HashtablezSampler(); return *sampler; } HashtablezInfo::HashtablezInfo() = default; HashtablezInfo::~HashtablezInfo() = default; void HashtablezInfo::PrepareForSampling(int64_t stride, size_t inline_element_size_value) { capacity.store(0, std::memory_order_relaxed); size.store(0, std::memory_order_relaxed); num_erases.store(0, std::memory_order_relaxed); num_rehashes.store(0, std::memory_order_relaxed); max_probe_length.store(0, std::memory_order_relaxed); total_probe_length.store(0, std::memory_order_relaxed); hashes_bitwise_or.store(0, std::memory_order_relaxed); hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed); hashes_bitwise_xor.store(0, std::memory_order_relaxed); max_reserve.store(0, std::memory_order_relaxed); create_time = absl::Now(); weight = stride; // The inliner makes hardcoded skip_count difficult (especially when combined // with LTO). We use the ability to exclude stacks by regex when encoding // instead. depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth, /* skip_count= */ 0); inline_element_size = inline_element_size_value; } static bool ShouldForceSampling() { enum ForceState { kDontForce, kForce, kUninitialized }; ABSL_CONST_INIT static std::atomic global_state{ kUninitialized}; ForceState state = global_state.load(std::memory_order_relaxed); if (ABSL_PREDICT_TRUE(state == kDontForce)) return false; if (state == kUninitialized) { state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)() ? kForce : kDontForce; global_state.store(state, std::memory_order_relaxed); } return state == kForce; } HashtablezInfo* SampleSlow(SamplingState& next_sample, size_t inline_element_size) { if (ABSL_PREDICT_FALSE(ShouldForceSampling())) { next_sample.next_sample = 1; const int64_t old_stride = exchange(next_sample.sample_stride, 1); HashtablezInfo* result = GlobalHashtablezSampler().Register(old_stride, inline_element_size); return result; } #if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) next_sample = { std::numeric_limits::max(), std::numeric_limits::max(), }; return nullptr; #else bool first = next_sample.next_sample < 0; const int64_t next_stride = g_exponential_biased_generator.GetStride( g_hashtablez_sample_parameter.load(std::memory_order_relaxed)); next_sample.next_sample = next_stride; const int64_t old_stride = exchange(next_sample.sample_stride, next_stride); // Small values of interval are equivalent to just sampling next time. ABSL_ASSERT(next_stride >= 1); // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold // low enough that we will start sampling in a reasonable time, so we just use // the default sampling rate. if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr; // We will only be negative on our first count, so we should just retry in // that case. if (first) { if (ABSL_PREDICT_TRUE(--next_sample.next_sample > 0)) return nullptr; return SampleSlow(next_sample, inline_element_size); } return GlobalHashtablezSampler().Register(old_stride, inline_element_size); #endif } void UnsampleSlow(HashtablezInfo* info) { GlobalHashtablezSampler().Unregister(info); } void RecordInsertSlow(HashtablezInfo* info, size_t hash, size_t distance_from_desired) { // SwissTables probe in groups of 16, so scale this to count items probes and // not offset from desired. size_t probe_length = distance_from_desired; #ifdef ABSL_INTERNAL_HAVE_SSE2 probe_length /= 16; #else probe_length /= 8; #endif info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed); info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed); info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed); info->max_probe_length.store( std::max(info->max_probe_length.load(std::memory_order_relaxed), probe_length), std::memory_order_relaxed); info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed); info->size.fetch_add(1, std::memory_order_relaxed); } void SetHashtablezConfigListener(HashtablezConfigListener l) { g_hashtablez_config_listener.store(l, std::memory_order_release); } bool IsHashtablezEnabled() { return g_hashtablez_enabled.load(std::memory_order_acquire); } void SetHashtablezEnabled(bool enabled) { SetHashtablezEnabledInternal(enabled); TriggerHashtablezConfigListener(); } void SetHashtablezEnabledInternal(bool enabled) { g_hashtablez_enabled.store(enabled, std::memory_order_release); } int32_t GetHashtablezSampleParameter() { return g_hashtablez_sample_parameter.load(std::memory_order_acquire); } void SetHashtablezSampleParameter(int32_t rate) { SetHashtablezSampleParameterInternal(rate); TriggerHashtablezConfigListener(); } void SetHashtablezSampleParameterInternal(int32_t rate) { if (rate > 0) { g_hashtablez_sample_parameter.store(rate, std::memory_order_release); } else { ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld", static_cast(rate)); // NOLINT(runtime/int) } } int32_t GetHashtablezMaxSamples() { return GlobalHashtablezSampler().GetMaxSamples(); } void SetHashtablezMaxSamples(int32_t max) { SetHashtablezMaxSamplesInternal(max); TriggerHashtablezConfigListener(); } void SetHashtablezMaxSamplesInternal(int32_t max) { if (max > 0) { GlobalHashtablezSampler().SetMaxSamples(max); } else { ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld", static_cast(max)); // NOLINT(runtime/int) } } } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/hashtablez_sampler.h000066400000000000000000000254471430371345100241270ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // ----------------------------------------------------------------------------- // File: hashtablez_sampler.h // ----------------------------------------------------------------------------- // // This header file defines the API for a low level library to sample hashtables // and collect runtime statistics about them. // // `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which // store information about a single sample. // // `Record*` methods store information into samples. // `Sample()` and `Unsample()` make use of a single global sampler with // properties controlled by the flags hashtablez_enabled, // hashtablez_sample_rate, and hashtablez_max_samples. // // WARNING // // Using this sampling API may cause sampled Swiss tables to use the global // allocator (operator `new`) in addition to any custom allocator. If you // are using a table in an unusual circumstance where allocation or calling a // linux syscall is unacceptable, this could interfere. // // This utility is internal-only. Use at your own risk. #ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ #define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ #include #include #include #include #include "absl/base/config.h" #include "absl/base/internal/per_thread_tls.h" #include "absl/base/optimization.h" #include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/mutex.h" #include "absl/utility/utility.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // Stores information about a sampled hashtable. All mutations to this *must* // be made through `Record*` functions below. All reads from this *must* only // occur in the callback to `HashtablezSampler::Iterate`. struct HashtablezInfo : public profiling_internal::Sample { // Constructs the object but does not fill in any fields. HashtablezInfo(); ~HashtablezInfo(); HashtablezInfo(const HashtablezInfo&) = delete; HashtablezInfo& operator=(const HashtablezInfo&) = delete; // Puts the object into a clean state, fills in the logically `const` members, // blocking for any readers that are currently sampling the object. void PrepareForSampling(int64_t stride, size_t inline_element_size_value) ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu); // These fields are mutated by the various Record* APIs and need to be // thread-safe. std::atomic capacity; std::atomic size; std::atomic num_erases; std::atomic num_rehashes; std::atomic max_probe_length; std::atomic total_probe_length; std::atomic hashes_bitwise_or; std::atomic hashes_bitwise_and; std::atomic hashes_bitwise_xor; std::atomic max_reserve; // All of the fields below are set by `PrepareForSampling`, they must not be // mutated in `Record*` functions. They are logically `const` in that sense. // These are guarded by init_mu, but that is not externalized to clients, // which can read them only during `SampleRecorder::Iterate` which will hold // the lock. static constexpr int kMaxStackDepth = 64; absl::Time create_time; int32_t depth; void* stack[kMaxStackDepth]; size_t inline_element_size; // How big is the slot? }; inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) { #ifdef ABSL_INTERNAL_HAVE_SSE2 total_probe_length /= 16; #else total_probe_length /= 8; #endif info->total_probe_length.store(total_probe_length, std::memory_order_relaxed); info->num_erases.store(0, std::memory_order_relaxed); // There is only one concurrent writer, so `load` then `store` is sufficient // instead of using `fetch_add`. info->num_rehashes.store( 1 + info->num_rehashes.load(std::memory_order_relaxed), std::memory_order_relaxed); } inline void RecordReservationSlow(HashtablezInfo* info, size_t target_capacity) { info->max_reserve.store( (std::max)(info->max_reserve.load(std::memory_order_relaxed), target_capacity), std::memory_order_relaxed); } inline void RecordClearedReservationSlow(HashtablezInfo* info) { info->max_reserve.store(0, std::memory_order_relaxed); } inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size, size_t capacity) { info->size.store(size, std::memory_order_relaxed); info->capacity.store(capacity, std::memory_order_relaxed); if (size == 0) { // This is a clear, reset the total/num_erases too. info->total_probe_length.store(0, std::memory_order_relaxed); info->num_erases.store(0, std::memory_order_relaxed); } } void RecordInsertSlow(HashtablezInfo* info, size_t hash, size_t distance_from_desired); inline void RecordEraseSlow(HashtablezInfo* info) { info->size.fetch_sub(1, std::memory_order_relaxed); // There is only one concurrent writer, so `load` then `store` is sufficient // instead of using `fetch_add`. info->num_erases.store( 1 + info->num_erases.load(std::memory_order_relaxed), std::memory_order_relaxed); } struct SamplingState { int64_t next_sample; // When we make a sampling decision, we record that distance so we can weight // each sample. int64_t sample_stride; }; HashtablezInfo* SampleSlow(SamplingState& next_sample, size_t inline_element_size); void UnsampleSlow(HashtablezInfo* info); #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) class HashtablezInfoHandle { public: explicit HashtablezInfoHandle() : info_(nullptr) {} explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {} ~HashtablezInfoHandle() { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; UnsampleSlow(info_); } HashtablezInfoHandle(const HashtablezInfoHandle&) = delete; HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete; HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept : info_(absl::exchange(o.info_, nullptr)) {} HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept { if (ABSL_PREDICT_FALSE(info_ != nullptr)) { UnsampleSlow(info_); } info_ = absl::exchange(o.info_, nullptr); return *this; } inline void RecordStorageChanged(size_t size, size_t capacity) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordStorageChangedSlow(info_, size, capacity); } inline void RecordRehash(size_t total_probe_length) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordRehashSlow(info_, total_probe_length); } inline void RecordReservation(size_t target_capacity) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordReservationSlow(info_, target_capacity); } inline void RecordClearedReservation() { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordClearedReservationSlow(info_); } inline void RecordInsert(size_t hash, size_t distance_from_desired) { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordInsertSlow(info_, hash, distance_from_desired); } inline void RecordErase() { if (ABSL_PREDICT_TRUE(info_ == nullptr)) return; RecordEraseSlow(info_); } friend inline void swap(HashtablezInfoHandle& lhs, HashtablezInfoHandle& rhs) { std::swap(lhs.info_, rhs.info_); } private: friend class HashtablezInfoHandlePeer; HashtablezInfo* info_; }; #else // Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can // be removed by the linker, in order to reduce the binary size. class HashtablezInfoHandle { public: explicit HashtablezInfoHandle() = default; explicit HashtablezInfoHandle(std::nullptr_t) {} inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {} inline void RecordRehash(size_t /*total_probe_length*/) {} inline void RecordReservation(size_t /*target_capacity*/) {} inline void RecordClearedReservation() {} inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {} inline void RecordErase() {} friend inline void swap(HashtablezInfoHandle& /*lhs*/, HashtablezInfoHandle& /*rhs*/) {} }; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) extern ABSL_PER_THREAD_TLS_KEYWORD SamplingState global_next_sample; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) // Returns an RAII sampling handle that manages registration and unregistation // with the global sampler. inline HashtablezInfoHandle Sample( size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) { #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) if (ABSL_PREDICT_TRUE(--global_next_sample.next_sample > 0)) { return HashtablezInfoHandle(nullptr); } return HashtablezInfoHandle( SampleSlow(global_next_sample, inline_element_size)); #else return HashtablezInfoHandle(nullptr); #endif // !ABSL_PER_THREAD_TLS } using HashtablezSampler = ::absl::profiling_internal::SampleRecorder; // Returns a global Sampler. HashtablezSampler& GlobalHashtablezSampler(); using HashtablezConfigListener = void (*)(); void SetHashtablezConfigListener(HashtablezConfigListener l); // Enables or disables sampling for Swiss tables. bool IsHashtablezEnabled(); void SetHashtablezEnabled(bool enabled); void SetHashtablezEnabledInternal(bool enabled); // Sets the rate at which Swiss tables will be sampled. int32_t GetHashtablezSampleParameter(); void SetHashtablezSampleParameter(int32_t rate); void SetHashtablezSampleParameterInternal(int32_t rate); // Sets a soft max for the number of samples that will be kept. int32_t GetHashtablezMaxSamples(); void SetHashtablezMaxSamples(int32_t max); void SetHashtablezMaxSamplesInternal(int32_t max); // Configuration override. // This allows process-wide sampling without depending on order of // initialization of static storage duration objects. // The definition of this constant is weak, which allows us to inject a // different value for it at link time. extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)(); } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_ abseil-20220623.1/absl/container/internal/hashtablez_sampler_force_weak_definition.cc000066400000000000000000000017531430371345100306540ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/hashtablez_sampler.h" #include "absl/base/attributes.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // See hashtablez_sampler.h for details. extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL( AbslContainerInternalSampleEverything)() { return false; } } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/hashtablez_sampler_test.cc000066400000000000000000000337001430371345100253130ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/hashtablez_sampler.h" #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/profiling/internal/sample_recorder.h" #include "absl/synchronization/blocking_counter.h" #include "absl/synchronization/internal/thread_pool.h" #include "absl/synchronization/mutex.h" #include "absl/synchronization/notification.h" #include "absl/time/clock.h" #include "absl/time/time.h" #ifdef ABSL_INTERNAL_HAVE_SSE2 constexpr int kProbeLength = 16; #else constexpr int kProbeLength = 8; #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) class HashtablezInfoHandlePeer { public: static bool IsSampled(const HashtablezInfoHandle& h) { return h.info_ != nullptr; } static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; } }; #else class HashtablezInfoHandlePeer { public: static bool IsSampled(const HashtablezInfoHandle&) { return false; } static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; } }; #endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) namespace { using ::absl::synchronization_internal::ThreadPool; using ::testing::IsEmpty; using ::testing::UnorderedElementsAre; std::vector GetSizes(HashtablezSampler* s) { std::vector res; s->Iterate([&](const HashtablezInfo& info) { res.push_back(info.size.load(std::memory_order_acquire)); }); return res; } HashtablezInfo* Register(HashtablezSampler* s, size_t size) { const int64_t test_stride = 123; const size_t test_element_size = 17; auto* info = s->Register(test_stride, test_element_size); assert(info != nullptr); info->size.store(size); return info; } TEST(HashtablezInfoTest, PrepareForSampling) { absl::Time test_start = absl::Now(); const int64_t test_stride = 123; const size_t test_element_size = 17; HashtablezInfo info; absl::MutexLock l(&info.init_mu); info.PrepareForSampling(test_stride, test_element_size); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 0); EXPECT_EQ(info.max_probe_length.load(), 0); EXPECT_EQ(info.total_probe_length.load(), 0); EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_GE(info.create_time, test_start); EXPECT_EQ(info.weight, test_stride); EXPECT_EQ(info.inline_element_size, test_element_size); info.capacity.store(1, std::memory_order_relaxed); info.size.store(1, std::memory_order_relaxed); info.num_erases.store(1, std::memory_order_relaxed); info.max_probe_length.store(1, std::memory_order_relaxed); info.total_probe_length.store(1, std::memory_order_relaxed); info.hashes_bitwise_or.store(1, std::memory_order_relaxed); info.hashes_bitwise_and.store(1, std::memory_order_relaxed); info.hashes_bitwise_xor.store(1, std::memory_order_relaxed); info.max_reserve.store(1, std::memory_order_relaxed); info.create_time = test_start - absl::Hours(20); info.PrepareForSampling(test_stride * 2, test_element_size); EXPECT_EQ(info.capacity.load(), 0); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 0); EXPECT_EQ(info.max_probe_length.load(), 0); EXPECT_EQ(info.total_probe_length.load(), 0); EXPECT_EQ(info.hashes_bitwise_or.load(), 0); EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{}); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0); EXPECT_EQ(info.max_reserve.load(), 0); EXPECT_EQ(info.weight, 2 * test_stride); EXPECT_EQ(info.inline_element_size, test_element_size); EXPECT_GE(info.create_time, test_start); } TEST(HashtablezInfoTest, RecordStorageChanged) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); const int64_t test_stride = 21; const size_t test_element_size = 19; info.PrepareForSampling(test_stride, test_element_size); RecordStorageChangedSlow(&info, 17, 47); EXPECT_EQ(info.size.load(), 17); EXPECT_EQ(info.capacity.load(), 47); RecordStorageChangedSlow(&info, 20, 20); EXPECT_EQ(info.size.load(), 20); EXPECT_EQ(info.capacity.load(), 20); } TEST(HashtablezInfoTest, RecordInsert) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); const int64_t test_stride = 25; const size_t test_element_size = 23; info.PrepareForSampling(test_stride, test_element_size); EXPECT_EQ(info.max_probe_length.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x0000FF00); RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 6); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x000F0F00); RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength); EXPECT_EQ(info.max_probe_length.load(), 12); EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000); EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00); EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x00F00F00); } TEST(HashtablezInfoTest, RecordErase) { const int64_t test_stride = 31; const size_t test_element_size = 29; HashtablezInfo info; absl::MutexLock l(&info.init_mu); info.PrepareForSampling(test_stride, test_element_size); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.size.load(), 0); RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength); EXPECT_EQ(info.size.load(), 1); RecordEraseSlow(&info); EXPECT_EQ(info.size.load(), 0); EXPECT_EQ(info.num_erases.load(), 1); EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordRehash) { const int64_t test_stride = 33; const size_t test_element_size = 31; HashtablezInfo info; absl::MutexLock l(&info.init_mu); info.PrepareForSampling(test_stride, test_element_size); RecordInsertSlow(&info, 0x1, 0); RecordInsertSlow(&info, 0x2, kProbeLength); RecordInsertSlow(&info, 0x4, kProbeLength); RecordInsertSlow(&info, 0x8, 2 * kProbeLength); EXPECT_EQ(info.size.load(), 4); EXPECT_EQ(info.total_probe_length.load(), 4); RecordEraseSlow(&info); RecordEraseSlow(&info); EXPECT_EQ(info.size.load(), 2); EXPECT_EQ(info.total_probe_length.load(), 4); EXPECT_EQ(info.num_erases.load(), 2); RecordRehashSlow(&info, 3 * kProbeLength); EXPECT_EQ(info.size.load(), 2); EXPECT_EQ(info.total_probe_length.load(), 3); EXPECT_EQ(info.num_erases.load(), 0); EXPECT_EQ(info.num_rehashes.load(), 1); EXPECT_EQ(info.inline_element_size, test_element_size); } TEST(HashtablezInfoTest, RecordReservation) { HashtablezInfo info; absl::MutexLock l(&info.init_mu); const int64_t test_stride = 35; const size_t test_element_size = 33; info.PrepareForSampling(test_stride, test_element_size); RecordReservationSlow(&info, 3); EXPECT_EQ(info.max_reserve.load(), 3); RecordReservationSlow(&info, 2); // High watermark does not change EXPECT_EQ(info.max_reserve.load(), 3); RecordReservationSlow(&info, 10); // High watermark does change EXPECT_EQ(info.max_reserve.load(), 10); } #if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE) TEST(HashtablezSamplerTest, SmallSampleParameter) { const size_t test_element_size = 31; SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); for (int i = 0; i < 1000; ++i) { SamplingState next_sample = {0, 0}; HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); EXPECT_GT(next_sample.next_sample, 0); EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); EXPECT_NE(sample, nullptr); UnsampleSlow(sample); } } TEST(HashtablezSamplerTest, LargeSampleParameter) { const size_t test_element_size = 31; SetHashtablezEnabled(true); SetHashtablezSampleParameter(std::numeric_limits::max()); for (int i = 0; i < 1000; ++i) { SamplingState next_sample = {0, 0}; HashtablezInfo* sample = SampleSlow(next_sample, test_element_size); EXPECT_GT(next_sample.next_sample, 0); EXPECT_EQ(next_sample.next_sample, next_sample.sample_stride); EXPECT_NE(sample, nullptr); UnsampleSlow(sample); } } TEST(HashtablezSamplerTest, Sample) { const size_t test_element_size = 31; SetHashtablezEnabled(true); SetHashtablezSampleParameter(100); int64_t num_sampled = 0; int64_t total = 0; double sample_rate = 0.0; for (int i = 0; i < 1000000; ++i) { HashtablezInfoHandle h = Sample(test_element_size); ++total; if (HashtablezInfoHandlePeer::IsSampled(h)) { ++num_sampled; } sample_rate = static_cast(num_sampled) / total; if (0.005 < sample_rate && sample_rate < 0.015) break; } EXPECT_NEAR(sample_rate, 0.01, 0.005); } TEST(HashtablezSamplerTest, Handle) { auto& sampler = GlobalHashtablezSampler(); const int64_t test_stride = 41; const size_t test_element_size = 39; HashtablezInfoHandle h(sampler.Register(test_stride, test_element_size)); auto* info = HashtablezInfoHandlePeer::GetInfo(&h); info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed); bool found = false; sampler.Iterate([&](const HashtablezInfo& h) { if (&h == info) { EXPECT_EQ(h.weight, test_stride); EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678); found = true; } }); EXPECT_TRUE(found); h = HashtablezInfoHandle(); found = false; sampler.Iterate([&](const HashtablezInfo& h) { if (&h == info) { // this will only happen if some other thread has resurrected the info // the old handle was using. if (h.hashes_bitwise_and.load() == 0x12345678) { found = true; } } }); EXPECT_FALSE(found); } #endif TEST(HashtablezSamplerTest, Registration) { HashtablezSampler sampler; auto* info1 = Register(&sampler, 1); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1)); auto* info2 = Register(&sampler, 2); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2)); info1->size.store(3); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2)); sampler.Unregister(info1); sampler.Unregister(info2); } TEST(HashtablezSamplerTest, Unregistration) { HashtablezSampler sampler; std::vector infos; for (size_t i = 0; i < 3; ++i) { infos.push_back(Register(&sampler, i)); } EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2)); sampler.Unregister(infos[1]); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2)); infos.push_back(Register(&sampler, 3)); infos.push_back(Register(&sampler, 4)); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4)); sampler.Unregister(infos[3]); EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4)); sampler.Unregister(infos[0]); sampler.Unregister(infos[2]); sampler.Unregister(infos[4]); EXPECT_THAT(GetSizes(&sampler), IsEmpty()); } TEST(HashtablezSamplerTest, MultiThreaded) { HashtablezSampler sampler; Notification stop; ThreadPool pool(10); for (int i = 0; i < 10; ++i) { const int64_t sampling_stride = 11 + i % 3; const size_t elt_size = 10 + i % 2; pool.Schedule([&sampler, &stop, sampling_stride, elt_size]() { std::random_device rd; std::mt19937 gen(rd()); std::vector infoz; while (!stop.HasBeenNotified()) { if (infoz.empty()) { infoz.push_back(sampler.Register(sampling_stride, elt_size)); } switch (std::uniform_int_distribution<>(0, 2)(gen)) { case 0: { infoz.push_back(sampler.Register(sampling_stride, elt_size)); break; } case 1: { size_t p = std::uniform_int_distribution<>(0, infoz.size() - 1)(gen); HashtablezInfo* info = infoz[p]; infoz[p] = infoz.back(); infoz.pop_back(); EXPECT_EQ(info->weight, sampling_stride); sampler.Unregister(info); break; } case 2: { absl::Duration oldest = absl::ZeroDuration(); sampler.Iterate([&](const HashtablezInfo& info) { oldest = std::max(oldest, absl::Now() - info.create_time); }); ASSERT_GE(oldest, absl::ZeroDuration()); break; } } } }); } // The threads will hammer away. Give it a little bit of time for tsan to // spot errors. absl::SleepFor(absl::Seconds(3)); stop.Notify(); } TEST(HashtablezSamplerTest, Callback) { HashtablezSampler sampler; auto* info1 = Register(&sampler, 1); auto* info2 = Register(&sampler, 2); static const HashtablezInfo* expected; auto callback = [](const HashtablezInfo& info) { // We can't use `info` outside of this callback because the object will be // disposed as soon as we return from here. EXPECT_EQ(&info, expected); }; // Set the callback. EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr); expected = info1; sampler.Unregister(info1); // Unset the callback. EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr)); expected = nullptr; // no more calls. sampler.Unregister(info2); } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/inlined_vector.h000066400000000000000000000774571430371345100232730ustar00rootroot00000000000000// Copyright 2019 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ #define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ #include #include #include #include #include #include #include #include #include #include "absl/base/attributes.h" #include "absl/base/macros.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/types/span.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace inlined_vector_internal { // GCC does not deal very well with the below code #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Warray-bounds" #endif template using AllocatorTraits = std::allocator_traits; template using ValueType = typename AllocatorTraits::value_type; template using SizeType = typename AllocatorTraits::size_type; template using Pointer = typename AllocatorTraits::pointer; template using ConstPointer = typename AllocatorTraits::const_pointer; template using SizeType = typename AllocatorTraits::size_type; template using DifferenceType = typename AllocatorTraits::difference_type; template using Reference = ValueType&; template using ConstReference = const ValueType&; template using Iterator = Pointer; template using ConstIterator = ConstPointer; template using ReverseIterator = typename std::reverse_iterator>; template using ConstReverseIterator = typename std::reverse_iterator>; template using MoveIterator = typename std::move_iterator>; template using IsAtLeastForwardIterator = std::is_convertible< typename std::iterator_traits::iterator_category, std::forward_iterator_tag>; template using IsMemcpyOk = absl::conjunction>>, absl::is_trivially_copy_constructible>, absl::is_trivially_copy_assignable>, absl::is_trivially_destructible>>; template struct TypeIdentity { using type = T; }; // Used for function arguments in template functions to prevent ADL by forcing // callers to explicitly specify the template parameter. template using NoTypeDeduction = typename TypeIdentity::type; template >::value> struct DestroyAdapter; template struct DestroyAdapter { static void DestroyElements(A& allocator, Pointer destroy_first, SizeType destroy_size) { for (SizeType i = destroy_size; i != 0;) { --i; AllocatorTraits::destroy(allocator, destroy_first + i); } } }; template struct DestroyAdapter { static void DestroyElements(A& allocator, Pointer destroy_first, SizeType destroy_size) { static_cast(allocator); static_cast(destroy_first); static_cast(destroy_size); } }; template struct Allocation { Pointer data; SizeType capacity; }; template ) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)> struct MallocAdapter { static Allocation Allocate(A& allocator, SizeType requested_capacity) { return {AllocatorTraits::allocate(allocator, requested_capacity), requested_capacity}; } static void Deallocate(A& allocator, Pointer pointer, SizeType capacity) { AllocatorTraits::deallocate(allocator, pointer, capacity); } }; template void ConstructElements(NoTypeDeduction& allocator, Pointer construct_first, ValueAdapter& values, SizeType construct_size) { for (SizeType i = 0; i < construct_size; ++i) { ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); } ABSL_INTERNAL_CATCH_ANY { DestroyAdapter::DestroyElements(allocator, construct_first, i); ABSL_INTERNAL_RETHROW; } } } template void AssignElements(Pointer assign_first, ValueAdapter& values, SizeType assign_size) { for (SizeType i = 0; i < assign_size; ++i) { values.AssignNext(assign_first + i); } } template struct StorageView { Pointer data; SizeType size; SizeType capacity; }; template class IteratorValueAdapter { public: explicit IteratorValueAdapter(const Iterator& it) : it_(it) {} void ConstructNext(A& allocator, Pointer construct_at) { AllocatorTraits::construct(allocator, construct_at, *it_); ++it_; } void AssignNext(Pointer assign_at) { *assign_at = *it_; ++it_; } private: Iterator it_; }; template class CopyValueAdapter { public: explicit CopyValueAdapter(ConstPointer p) : ptr_(p) {} void ConstructNext(A& allocator, Pointer construct_at) { AllocatorTraits::construct(allocator, construct_at, *ptr_); } void AssignNext(Pointer assign_at) { *assign_at = *ptr_; } private: ConstPointer ptr_; }; template class DefaultValueAdapter { public: explicit DefaultValueAdapter() {} void ConstructNext(A& allocator, Pointer construct_at) { AllocatorTraits::construct(allocator, construct_at); } void AssignNext(Pointer assign_at) { *assign_at = ValueType(); } }; template class AllocationTransaction { public: explicit AllocationTransaction(A& allocator) : allocator_data_(allocator, nullptr), capacity_(0) {} ~AllocationTransaction() { if (DidAllocate()) { MallocAdapter::Deallocate(GetAllocator(), GetData(), GetCapacity()); } } AllocationTransaction(const AllocationTransaction&) = delete; void operator=(const AllocationTransaction&) = delete; A& GetAllocator() { return allocator_data_.template get<0>(); } Pointer& GetData() { return allocator_data_.template get<1>(); } SizeType& GetCapacity() { return capacity_; } bool DidAllocate() { return GetData() != nullptr; } Pointer Allocate(SizeType requested_capacity) { Allocation result = MallocAdapter::Allocate(GetAllocator(), requested_capacity); GetData() = result.data; GetCapacity() = result.capacity; return result.data; } ABSL_MUST_USE_RESULT Allocation Release() && { Allocation result = {GetData(), GetCapacity()}; Reset(); return result; } private: void Reset() { GetData() = nullptr; GetCapacity() = 0; } container_internal::CompressedTuple> allocator_data_; SizeType capacity_; }; template class ConstructionTransaction { public: explicit ConstructionTransaction(A& allocator) : allocator_data_(allocator, nullptr), size_(0) {} ~ConstructionTransaction() { if (DidConstruct()) { DestroyAdapter::DestroyElements(GetAllocator(), GetData(), GetSize()); } } ConstructionTransaction(const ConstructionTransaction&) = delete; void operator=(const ConstructionTransaction&) = delete; A& GetAllocator() { return allocator_data_.template get<0>(); } Pointer& GetData() { return allocator_data_.template get<1>(); } SizeType& GetSize() { return size_; } bool DidConstruct() { return GetData() != nullptr; } template void Construct(Pointer data, ValueAdapter& values, SizeType size) { ConstructElements(GetAllocator(), data, values, size); GetData() = data; GetSize() = size; } void Commit() && { GetData() = nullptr; GetSize() = 0; } private: container_internal::CompressedTuple> allocator_data_; SizeType size_; }; template class Storage { public: static SizeType NextCapacity(SizeType current_capacity) { return current_capacity * 2; } static SizeType ComputeCapacity(SizeType current_capacity, SizeType requested_capacity) { return (std::max)(NextCapacity(current_capacity), requested_capacity); } // --------------------------------------------------------------------------- // Storage Constructors and Destructor // --------------------------------------------------------------------------- Storage() : metadata_(A(), /* size and is_allocated */ 0u) {} explicit Storage(const A& allocator) : metadata_(allocator, /* size and is_allocated */ 0u) {} ~Storage() { if (GetSizeAndIsAllocated() == 0) { // Empty and not allocated; nothing to do. } else if (IsMemcpyOk::value) { // No destructors need to be run; just deallocate if necessary. DeallocateIfAllocated(); } else { DestroyContents(); } } // --------------------------------------------------------------------------- // Storage Member Accessors // --------------------------------------------------------------------------- SizeType& GetSizeAndIsAllocated() { return metadata_.template get<1>(); } const SizeType& GetSizeAndIsAllocated() const { return metadata_.template get<1>(); } SizeType GetSize() const { return GetSizeAndIsAllocated() >> 1; } bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; } Pointer GetAllocatedData() { return data_.allocated.allocated_data; } ConstPointer GetAllocatedData() const { return data_.allocated.allocated_data; } Pointer GetInlinedData() { return reinterpret_cast>( std::addressof(data_.inlined.inlined_data[0])); } ConstPointer GetInlinedData() const { return reinterpret_cast>( std::addressof(data_.inlined.inlined_data[0])); } SizeType GetAllocatedCapacity() const { return data_.allocated.allocated_capacity; } SizeType GetInlinedCapacity() const { return static_cast>(N); } StorageView MakeStorageView() { return GetIsAllocated() ? StorageView{GetAllocatedData(), GetSize(), GetAllocatedCapacity()} : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()}; } A& GetAllocator() { return metadata_.template get<0>(); } const A& GetAllocator() const { return metadata_.template get<0>(); } // --------------------------------------------------------------------------- // Storage Member Mutators // --------------------------------------------------------------------------- ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other); template void Initialize(ValueAdapter values, SizeType new_size); template void Assign(ValueAdapter values, SizeType new_size); template void Resize(ValueAdapter values, SizeType new_size); template Iterator Insert(ConstIterator pos, ValueAdapter values, SizeType insert_count); template Reference EmplaceBack(Args&&... args); Iterator Erase(ConstIterator from, ConstIterator to); void Reserve(SizeType requested_capacity); void ShrinkToFit(); void Swap(Storage* other_storage_ptr); void SetIsAllocated() { GetSizeAndIsAllocated() |= static_cast>(1); } void UnsetIsAllocated() { GetSizeAndIsAllocated() &= ((std::numeric_limits>::max)() - 1); } void SetSize(SizeType size) { GetSizeAndIsAllocated() = (size << 1) | static_cast>(GetIsAllocated()); } void SetAllocatedSize(SizeType size) { GetSizeAndIsAllocated() = (size << 1) | static_cast>(1); } void SetInlinedSize(SizeType size) { GetSizeAndIsAllocated() = size << static_cast>(1); } void AddSize(SizeType count) { GetSizeAndIsAllocated() += count << static_cast>(1); } void SubtractSize(SizeType count) { ABSL_HARDENING_ASSERT(count <= GetSize()); GetSizeAndIsAllocated() -= count << static_cast>(1); } void SetAllocation(Allocation allocation) { data_.allocated.allocated_data = allocation.data; data_.allocated.allocated_capacity = allocation.capacity; } void MemcpyFrom(const Storage& other_storage) { ABSL_HARDENING_ASSERT(IsMemcpyOk::value || other_storage.GetIsAllocated()); GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated(); data_ = other_storage.data_; } void DeallocateIfAllocated() { if (GetIsAllocated()) { MallocAdapter::Deallocate(GetAllocator(), GetAllocatedData(), GetAllocatedCapacity()); } } private: ABSL_ATTRIBUTE_NOINLINE void DestroyContents(); using Metadata = container_internal::CompressedTuple>; struct Allocated { Pointer allocated_data; SizeType allocated_capacity; }; struct Inlined { alignas(ValueType) char inlined_data[sizeof(ValueType[N])]; }; union Data { Allocated allocated; Inlined inlined; }; template ABSL_ATTRIBUTE_NOINLINE Reference EmplaceBackSlow(Args&&... args); Metadata metadata_; Data data_; }; template void Storage::DestroyContents() { Pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData(); DestroyAdapter::DestroyElements(GetAllocator(), data, GetSize()); DeallocateIfAllocated(); } template void Storage::InitFrom(const Storage& other) { const SizeType n = other.GetSize(); ABSL_HARDENING_ASSERT(n > 0); // Empty sources handled handled in caller. ConstPointer src; Pointer dst; if (!other.GetIsAllocated()) { dst = GetInlinedData(); src = other.GetInlinedData(); } else { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), n); Allocation allocation = MallocAdapter::Allocate(GetAllocator(), requested_capacity); SetAllocation(allocation); dst = allocation.data; src = other.GetAllocatedData(); } if (IsMemcpyOk::value) { std::memcpy(reinterpret_cast(dst), reinterpret_cast(src), n * sizeof(ValueType)); } else { auto values = IteratorValueAdapter>(src); ConstructElements(GetAllocator(), dst, values, n); } GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated(); } template template auto Storage::Initialize(ValueAdapter values, SizeType new_size) -> void { // Only callable from constructors! ABSL_HARDENING_ASSERT(!GetIsAllocated()); ABSL_HARDENING_ASSERT(GetSize() == 0); Pointer construct_data; if (new_size > GetInlinedCapacity()) { // Because this is only called from the `InlinedVector` constructors, it's // safe to take on the allocation with size `0`. If `ConstructElements(...)` // throws, deallocation will be automatically handled by `~Storage()`. SizeType requested_capacity = ComputeCapacity(GetInlinedCapacity(), new_size); Allocation allocation = MallocAdapter::Allocate(GetAllocator(), requested_capacity); construct_data = allocation.data; SetAllocation(allocation); SetIsAllocated(); } else { construct_data = GetInlinedData(); } ConstructElements(GetAllocator(), construct_data, values, new_size); // Since the initial size was guaranteed to be `0` and the allocated bit is // already correct for either case, *adding* `new_size` gives us the correct // result faster than setting it directly. AddSize(new_size); } template template auto Storage::Assign(ValueAdapter values, SizeType new_size) -> void { StorageView storage_view = MakeStorageView(); AllocationTransaction allocation_tx(GetAllocator()); absl::Span> assign_loop; absl::Span> construct_loop; absl::Span> destroy_loop; if (new_size > storage_view.capacity) { SizeType requested_capacity = ComputeCapacity(storage_view.capacity, new_size); construct_loop = {allocation_tx.Allocate(requested_capacity), new_size}; destroy_loop = {storage_view.data, storage_view.size}; } else if (new_size > storage_view.size) { assign_loop = {storage_view.data, storage_view.size}; construct_loop = {storage_view.data + storage_view.size, new_size - storage_view.size}; } else { assign_loop = {storage_view.data, new_size}; destroy_loop = {storage_view.data + new_size, storage_view.size - new_size}; } AssignElements(assign_loop.data(), values, assign_loop.size()); ConstructElements(GetAllocator(), construct_loop.data(), values, construct_loop.size()); DestroyAdapter::DestroyElements(GetAllocator(), destroy_loop.data(), destroy_loop.size()); if (allocation_tx.DidAllocate()) { DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } SetSize(new_size); } template template auto Storage::Resize(ValueAdapter values, SizeType new_size) -> void { StorageView storage_view = MakeStorageView(); Pointer const base = storage_view.data; const SizeType size = storage_view.size; A& alloc = GetAllocator(); if (new_size <= size) { // Destroy extra old elements. DestroyAdapter::DestroyElements(alloc, base + new_size, size - new_size); } else if (new_size <= storage_view.capacity) { // Construct new elements in place. ConstructElements(alloc, base + size, values, new_size - size); } else { // Steps: // a. Allocate new backing store. // b. Construct new elements in new backing store. // c. Move existing elements from old backing store to new backing store. // d. Destroy all elements in old backing store. // Use transactional wrappers for the first two steps so we can roll // back if necessary due to exceptions. AllocationTransaction allocation_tx(alloc); SizeType requested_capacity = ComputeCapacity(storage_view.capacity, new_size); Pointer new_data = allocation_tx.Allocate(requested_capacity); ConstructionTransaction construction_tx(alloc); construction_tx.Construct(new_data + size, values, new_size - size); IteratorValueAdapter> move_values( (MoveIterator(base))); ConstructElements(alloc, new_data, move_values, size); DestroyAdapter::DestroyElements(alloc, base, size); std::move(construction_tx).Commit(); DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } SetSize(new_size); } template template auto Storage::Insert(ConstIterator pos, ValueAdapter values, SizeType insert_count) -> Iterator { StorageView storage_view = MakeStorageView(); SizeType insert_index = std::distance(ConstIterator(storage_view.data), pos); SizeType insert_end_index = insert_index + insert_count; SizeType new_size = storage_view.size + insert_count; if (new_size > storage_view.capacity) { AllocationTransaction allocation_tx(GetAllocator()); ConstructionTransaction construction_tx(GetAllocator()); ConstructionTransaction move_construction_tx(GetAllocator()); IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); SizeType requested_capacity = ComputeCapacity(storage_view.capacity, new_size); Pointer new_data = allocation_tx.Allocate(requested_capacity); construction_tx.Construct(new_data + insert_index, values, insert_count); move_construction_tx.Construct(new_data, move_values, insert_index); ConstructElements(GetAllocator(), new_data + insert_end_index, move_values, storage_view.size - insert_index); DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); std::move(construction_tx).Commit(); std::move(move_construction_tx).Commit(); DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetAllocatedSize(new_size); return Iterator(new_data + insert_index); } else { SizeType move_construction_destination_index = (std::max)(insert_end_index, storage_view.size); ConstructionTransaction move_construction_tx(GetAllocator()); IteratorValueAdapter> move_construction_values( MoveIterator(storage_view.data + (move_construction_destination_index - insert_count))); absl::Span> move_construction = { storage_view.data + move_construction_destination_index, new_size - move_construction_destination_index}; Pointer move_assignment_values = storage_view.data + insert_index; absl::Span> move_assignment = { storage_view.data + insert_end_index, move_construction_destination_index - insert_end_index}; absl::Span> insert_assignment = {move_assignment_values, move_construction.size()}; absl::Span> insert_construction = { insert_assignment.data() + insert_assignment.size(), insert_count - insert_assignment.size()}; move_construction_tx.Construct(move_construction.data(), move_construction_values, move_construction.size()); for (Pointer destination = move_assignment.data() + move_assignment.size(), last_destination = move_assignment.data(), source = move_assignment_values + move_assignment.size(); ;) { --destination; --source; if (destination < last_destination) break; *destination = std::move(*source); } AssignElements(insert_assignment.data(), values, insert_assignment.size()); ConstructElements(GetAllocator(), insert_construction.data(), values, insert_construction.size()); std::move(move_construction_tx).Commit(); AddSize(insert_count); return Iterator(storage_view.data + insert_index); } } template template auto Storage::EmplaceBack(Args&&... args) -> Reference { StorageView storage_view = MakeStorageView(); const SizeType n = storage_view.size; if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) { // Fast path; new element fits. Pointer last_ptr = storage_view.data + n; AllocatorTraits::construct(GetAllocator(), last_ptr, std::forward(args)...); AddSize(1); return *last_ptr; } // TODO(b/173712035): Annotate with musttail attribute to prevent regression. return EmplaceBackSlow(std::forward(args)...); } template template auto Storage::EmplaceBackSlow(Args&&... args) -> Reference { StorageView storage_view = MakeStorageView(); AllocationTransaction allocation_tx(GetAllocator()); IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); SizeType requested_capacity = NextCapacity(storage_view.capacity); Pointer construct_data = allocation_tx.Allocate(requested_capacity); Pointer last_ptr = construct_data + storage_view.size; // Construct new element. AllocatorTraits::construct(GetAllocator(), last_ptr, std::forward(args)...); // Move elements from old backing store to new backing store. ABSL_INTERNAL_TRY { ConstructElements(GetAllocator(), allocation_tx.GetData(), move_values, storage_view.size); } ABSL_INTERNAL_CATCH_ANY { AllocatorTraits::destroy(GetAllocator(), last_ptr); ABSL_INTERNAL_RETHROW; } // Destroy elements in old backing store. DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); AddSize(1); return *last_ptr; } template auto Storage::Erase(ConstIterator from, ConstIterator to) -> Iterator { StorageView storage_view = MakeStorageView(); SizeType erase_size = std::distance(from, to); SizeType erase_index = std::distance(ConstIterator(storage_view.data), from); SizeType erase_end_index = erase_index + erase_size; IteratorValueAdapter> move_values( MoveIterator(storage_view.data + erase_end_index)); AssignElements(storage_view.data + erase_index, move_values, storage_view.size - erase_end_index); DestroyAdapter::DestroyElements( GetAllocator(), storage_view.data + (storage_view.size - erase_size), erase_size); SubtractSize(erase_size); return Iterator(storage_view.data + erase_index); } template auto Storage::Reserve(SizeType requested_capacity) -> void { StorageView storage_view = MakeStorageView(); if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return; AllocationTransaction allocation_tx(GetAllocator()); IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); SizeType new_requested_capacity = ComputeCapacity(storage_view.capacity, requested_capacity); Pointer new_data = allocation_tx.Allocate(new_requested_capacity); ConstructElements(GetAllocator(), new_data, move_values, storage_view.size); DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); DeallocateIfAllocated(); SetAllocation(std::move(allocation_tx).Release()); SetIsAllocated(); } template auto Storage::ShrinkToFit() -> void { // May only be called on allocated instances! ABSL_HARDENING_ASSERT(GetIsAllocated()); StorageView storage_view{GetAllocatedData(), GetSize(), GetAllocatedCapacity()}; if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return; AllocationTransaction allocation_tx(GetAllocator()); IteratorValueAdapter> move_values( MoveIterator(storage_view.data)); Pointer construct_data; if (storage_view.size > GetInlinedCapacity()) { SizeType requested_capacity = storage_view.size; construct_data = allocation_tx.Allocate(requested_capacity); if (allocation_tx.GetCapacity() >= storage_view.capacity) { // Already using the smallest available heap allocation. return; } } else { construct_data = GetInlinedData(); } ABSL_INTERNAL_TRY { ConstructElements(GetAllocator(), construct_data, move_values, storage_view.size); } ABSL_INTERNAL_CATCH_ANY { SetAllocation({storage_view.data, storage_view.capacity}); ABSL_INTERNAL_RETHROW; } DestroyAdapter::DestroyElements(GetAllocator(), storage_view.data, storage_view.size); MallocAdapter::Deallocate(GetAllocator(), storage_view.data, storage_view.capacity); if (allocation_tx.DidAllocate()) { SetAllocation(std::move(allocation_tx).Release()); } else { UnsetIsAllocated(); } } template auto Storage::Swap(Storage* other_storage_ptr) -> void { using std::swap; ABSL_HARDENING_ASSERT(this != other_storage_ptr); if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) { swap(data_.allocated, other_storage_ptr->data_.allocated); } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) { Storage* small_ptr = this; Storage* large_ptr = other_storage_ptr; if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr); for (SizeType i = 0; i < small_ptr->GetSize(); ++i) { swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]); } IteratorValueAdapter> move_values( MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize())); ConstructElements(large_ptr->GetAllocator(), small_ptr->GetInlinedData() + small_ptr->GetSize(), move_values, large_ptr->GetSize() - small_ptr->GetSize()); DestroyAdapter::DestroyElements( large_ptr->GetAllocator(), large_ptr->GetInlinedData() + small_ptr->GetSize(), large_ptr->GetSize() - small_ptr->GetSize()); } else { Storage* allocated_ptr = this; Storage* inlined_ptr = other_storage_ptr; if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr); StorageView allocated_storage_view{ allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(), allocated_ptr->GetAllocatedCapacity()}; IteratorValueAdapter> move_values( MoveIterator(inlined_ptr->GetInlinedData())); ABSL_INTERNAL_TRY { ConstructElements(inlined_ptr->GetAllocator(), allocated_ptr->GetInlinedData(), move_values, inlined_ptr->GetSize()); } ABSL_INTERNAL_CATCH_ANY { allocated_ptr->SetAllocation(Allocation{ allocated_storage_view.data, allocated_storage_view.capacity}); ABSL_INTERNAL_RETHROW; } DestroyAdapter::DestroyElements(inlined_ptr->GetAllocator(), inlined_ptr->GetInlinedData(), inlined_ptr->GetSize()); inlined_ptr->SetAllocation(Allocation{allocated_storage_view.data, allocated_storage_view.capacity}); } swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated()); swap(GetAllocator(), other_storage_ptr->GetAllocator()); } // End ignore "array-bounds" #if !defined(__clang__) && defined(__GNUC__) #pragma GCC diagnostic pop #endif } // namespace inlined_vector_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_ abseil-20220623.1/absl/container/internal/layout.h000066400000000000000000000651001430371345100215620ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // MOTIVATION AND TUTORIAL // // If you want to put in a single heap allocation N doubles followed by M ints, // it's easy if N and M are known at compile time. // // struct S { // double a[N]; // int b[M]; // }; // // S* p = new S; // // But what if N and M are known only in run time? Class template Layout to the // rescue! It's a portable generalization of the technique known as struct hack. // // // This object will tell us everything we need to know about the memory // // layout of double[N] followed by int[M]. It's structurally identical to // // size_t[2] that stores N and M. It's very cheap to create. // const Layout layout(N, M); // // // Allocate enough memory for both arrays. `AllocSize()` tells us how much // // memory is needed. We are free to use any allocation function we want as // // long as it returns aligned memory. // std::unique_ptr p(new unsigned char[layout.AllocSize()]); // // // Obtain the pointer to the array of doubles. // // Equivalent to `reinterpret_cast(p.get())`. // // // // We could have written layout.Pointer<0>(p) instead. If all the types are // // unique you can use either form, but if some types are repeated you must // // use the index form. // double* a = layout.Pointer(p.get()); // // // Obtain the pointer to the array of ints. // // Equivalent to `reinterpret_cast(p.get() + N * 8)`. // int* b = layout.Pointer(p); // // If we are unable to specify sizes of all fields, we can pass as many sizes as // we can to `Partial()`. In return, it'll allow us to access the fields whose // locations and sizes can be computed from the provided information. // `Partial()` comes in handy when the array sizes are embedded into the // allocation. // // // size_t[1] containing N, size_t[1] containing M, double[N], int[M]. // using L = Layout; // // unsigned char* Allocate(size_t n, size_t m) { // const L layout(1, 1, n, m); // unsigned char* p = new unsigned char[layout.AllocSize()]; // *layout.Pointer<0>(p) = n; // *layout.Pointer<1>(p) = m; // return p; // } // // void Use(unsigned char* p) { // // First, extract N and M. // // Specify that the first array has only one element. Using `prefix` we // // can access the first two arrays but not more. // constexpr auto prefix = L::Partial(1); // size_t n = *prefix.Pointer<0>(p); // size_t m = *prefix.Pointer<1>(p); // // // Now we can get pointers to the payload. // const L layout(1, 1, n, m); // double* a = layout.Pointer(p); // int* b = layout.Pointer(p); // } // // The layout we used above combines fixed-size with dynamically-sized fields. // This is quite common. Layout is optimized for this use case and generates // optimal code. All computations that can be performed at compile time are // indeed performed at compile time. // // Efficiency tip: The order of fields matters. In `Layout` try to // ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no // padding in between arrays. // // You can manually override the alignment of an array by wrapping the type in // `Aligned`. `Layout<..., Aligned, ...>` has exactly the same API // and behavior as `Layout<..., T, ...>` except that the first element of the // array of `T` is aligned to `N` (the rest of the elements follow without // padding). `N` cannot be less than `alignof(T)`. // // `AllocSize()` and `Pointer()` are the most basic methods for dealing with // memory layouts. Check out the reference or code below to discover more. // // EXAMPLE // // // Immutable move-only string with sizeof equal to sizeof(void*). The // // string size and the characters are kept in the same heap allocation. // class CompactString { // public: // CompactString(const char* s = "") { // const size_t size = strlen(s); // // size_t[1] followed by char[size + 1]. // const L layout(1, size + 1); // p_.reset(new unsigned char[layout.AllocSize()]); // // If running under ASAN, mark the padding bytes, if any, to catch // // memory errors. // layout.PoisonPadding(p_.get()); // // Store the size in the allocation. // *layout.Pointer(p_.get()) = size; // // Store the characters in the allocation. // memcpy(layout.Pointer(p_.get()), s, size + 1); // } // // size_t size() const { // // Equivalent to reinterpret_cast(*p). // return *L::Partial().Pointer(p_.get()); // } // // const char* c_str() const { // // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). // // The argument in Partial(1) specifies that we have size_t[1] in front // // of the characters. // return L::Partial(1).Pointer(p_.get()); // } // // private: // // Our heap allocation contains a size_t followed by an array of chars. // using L = Layout; // std::unique_ptr p_; // }; // // int main() { // CompactString s = "hello"; // assert(s.size() == 5); // assert(strcmp(s.c_str(), "hello") == 0); // } // // DOCUMENTATION // // The interface exported by this file consists of: // - class `Layout<>` and its public members. // - The public members of class `internal_layout::LayoutImpl<>`. That class // isn't intended to be used directly, and its name and template parameter // list are internal implementation details, but the class itself provides // most of the functionality in this file. See comments on its members for // detailed documentation. // // `Layout::Partial(count1,..., countm)` (where `m` <= `n`) returns a // `LayoutImpl<>` object. `Layout layout(count1,..., countn)` // creates a `Layout` object, which exposes the same functionality by inheriting // from `LayoutImpl<>`. #ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_ #define ABSL_CONTAINER_INTERNAL_LAYOUT_H_ #include #include #include #include #include #include #include #include #include #include "absl/base/config.h" #include "absl/meta/type_traits.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "absl/utility/utility.h" #ifdef ABSL_HAVE_ADDRESS_SANITIZER #include #endif #if defined(__GXX_RTTI) #define ABSL_INTERNAL_HAS_CXA_DEMANGLE #endif #ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE #include #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // A type wrapper that instructs `Layout` to use the specific alignment for the // array. `Layout<..., Aligned, ...>` has exactly the same API // and behavior as `Layout<..., T, ...>` except that the first element of the // array of `T` is aligned to `N` (the rest of the elements follow without // padding). // // Requires: `N >= alignof(T)` and `N` is a power of 2. template struct Aligned; namespace internal_layout { template struct NotAligned {}; template struct NotAligned> { static_assert(sizeof(T) == 0, "Aligned cannot be const-qualified"); }; template using IntToSize = size_t; template using TypeToSize = size_t; template struct Type : NotAligned { using type = T; }; template struct Type> { using type = T; }; template struct SizeOf : NotAligned, std::integral_constant {}; template struct SizeOf> : std::integral_constant {}; // Note: workaround for https://gcc.gnu.org/PR88115 template struct AlignOf : NotAligned { static constexpr size_t value = alignof(T); }; template struct AlignOf> { static_assert(N % alignof(T) == 0, "Custom alignment can't be lower than the type's alignment"); static constexpr size_t value = N; }; // Does `Ts...` contain `T`? template using Contains = absl::disjunction...>; template using CopyConst = typename std::conditional::value, const To, To>::type; // Note: We're not qualifying this with absl:: because it doesn't compile under // MSVC. template using SliceType = Span; // This namespace contains no types. It prevents functions defined in it from // being found by ADL. namespace adl_barrier { template constexpr size_t Find(Needle, Needle, Ts...) { static_assert(!Contains(), "Duplicate element type"); return 0; } template constexpr size_t Find(Needle, T, Ts...) { return adl_barrier::Find(Needle(), Ts()...) + 1; } constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); } // Returns `q * m` for the smallest `q` such that `q * m >= n`. // Requires: `m` is a power of two. It's enforced by IsLegalElementType below. constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); } constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; } constexpr size_t Max(size_t a) { return a; } template constexpr size_t Max(size_t a, size_t b, Ts... rest) { return adl_barrier::Max(b < a ? a : b, rest...); } template std::string TypeName() { std::string out; int status = 0; char* demangled = nullptr; #ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status); #endif if (status == 0 && demangled != nullptr) { // Demangling succeeded. absl::StrAppend(&out, "<", demangled, ">"); free(demangled); } else { #if defined(__GXX_RTTI) || defined(_CPPRTTI) absl::StrAppend(&out, "<", typeid(T).name(), ">"); #endif } return out; } } // namespace adl_barrier template using EnableIf = typename std::enable_if::type; // Can `T` be a template argument of `Layout`? template using IsLegalElementType = std::integral_constant< bool, !std::is_reference::value && !std::is_volatile::value && !std::is_reference::type>::value && !std::is_volatile::type>::value && adl_barrier::IsPow2(AlignOf::value)>; template class LayoutImpl; // Public base class of `Layout` and the result type of `Layout::Partial()`. // // `Elements...` contains all template arguments of `Layout` that created this // instance. // // `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments // passed to `Layout::Partial()` or `Layout::Layout()`. // // `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is // `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we // can compute offsets). template class LayoutImpl, absl::index_sequence, absl::index_sequence> { private: static_assert(sizeof...(Elements) > 0, "At least one field is required"); static_assert(absl::conjunction...>::value, "Invalid element type (see IsLegalElementType)"); enum { NumTypes = sizeof...(Elements), NumSizes = sizeof...(SizeSeq), NumOffsets = sizeof...(OffsetSeq), }; // These are guaranteed by `Layout`. static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1), "Internal error"); static_assert(NumTypes > 0, "Internal error"); // Returns the index of `T` in `Elements...`. Results in a compilation error // if `Elements...` doesn't contain exactly one instance of `T`. template static constexpr size_t ElementIndex() { static_assert(Contains, Type::type>...>(), "Type not found"); return adl_barrier::Find(Type(), Type::type>()...); } template using ElementAlignment = AlignOf>::type>; public: // Element types of all arrays packed in a tuple. using ElementTypes = std::tuple::type...>; // Element type of the Nth array. template using ElementType = typename std::tuple_element::type; constexpr explicit LayoutImpl(IntToSize... sizes) : size_{sizes...} {} // Alignment of the layout, equal to the strictest alignment of all elements. // All pointers passed to the methods of layout must be aligned to this value. static constexpr size_t Alignment() { return adl_barrier::Max(AlignOf::value...); } // Offset in bytes of the Nth array. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // assert(x.Offset<0>() == 0); // The ints starts from 0. // assert(x.Offset<1>() == 16); // The doubles starts from 16. // // Requires: `N <= NumSizes && N < sizeof...(Ts)`. template = 0> constexpr size_t Offset() const { return 0; } template = 0> constexpr size_t Offset() const { static_assert(N < NumOffsets, "Index out of bounds"); return adl_barrier::Align( Offset() + SizeOf>::value * size_[N - 1], ElementAlignment::value); } // Offset in bytes of the array with the specified element type. There must // be exactly one such array and its zero-based index must be at most // `NumSizes`. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // assert(x.Offset() == 0); // The ints starts from 0. // assert(x.Offset() == 16); // The doubles starts from 16. template constexpr size_t Offset() const { return Offset()>(); } // Offsets in bytes of all arrays for which the offsets are known. constexpr std::array Offsets() const { return {{Offset()...}}; } // The number of elements in the Nth array. This is the Nth argument of // `Layout::Partial()` or `Layout::Layout()` (zero-based). // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // assert(x.Size<0>() == 3); // assert(x.Size<1>() == 4); // // Requires: `N < NumSizes`. template constexpr size_t Size() const { static_assert(N < NumSizes, "Index out of bounds"); return size_[N]; } // The number of elements in the array with the specified element type. // There must be exactly one such array and its zero-based index must be // at most `NumSizes`. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // assert(x.Size() == 3); // assert(x.Size() == 4); template constexpr size_t Size() const { return Size()>(); } // The number of elements of all arrays for which they are known. constexpr std::array Sizes() const { return {{Size()...}}; } // Pointer to the beginning of the Nth array. // // `Char` must be `[const] [signed|unsigned] char`. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // unsigned char* p = new unsigned char[x.AllocSize()]; // int* ints = x.Pointer<0>(p); // double* doubles = x.Pointer<1>(p); // // Requires: `N <= NumSizes && N < sizeof...(Ts)`. // Requires: `p` is aligned to `Alignment()`. template CopyConst>* Pointer(Char* p) const { using C = typename std::remove_const::type; static_assert( std::is_same() || std::is_same() || std::is_same(), "The argument must be a pointer to [const] [signed|unsigned] char"); constexpr size_t alignment = Alignment(); (void)alignment; assert(reinterpret_cast(p) % alignment == 0); return reinterpret_cast>*>(p + Offset()); } // Pointer to the beginning of the array with the specified element type. // There must be exactly one such array and its zero-based index must be at // most `NumSizes`. // // `Char` must be `[const] [signed|unsigned] char`. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // unsigned char* p = new unsigned char[x.AllocSize()]; // int* ints = x.Pointer(p); // double* doubles = x.Pointer(p); // // Requires: `p` is aligned to `Alignment()`. template CopyConst* Pointer(Char* p) const { return Pointer()>(p); } // Pointers to all arrays for which pointers are known. // // `Char` must be `[const] [signed|unsigned] char`. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // unsigned char* p = new unsigned char[x.AllocSize()]; // // int* ints; // double* doubles; // std::tie(ints, doubles) = x.Pointers(p); // // Requires: `p` is aligned to `Alignment()`. // // Note: We're not using ElementType alias here because it does not compile // under MSVC. template std::tuple::type>*...> Pointers(Char* p) const { return std::tuple>*...>( Pointer(p)...); } // The Nth array. // // `Char` must be `[const] [signed|unsigned] char`. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // unsigned char* p = new unsigned char[x.AllocSize()]; // Span ints = x.Slice<0>(p); // Span doubles = x.Slice<1>(p); // // Requires: `N < NumSizes`. // Requires: `p` is aligned to `Alignment()`. template SliceType>> Slice(Char* p) const { return SliceType>>(Pointer(p), Size()); } // The array with the specified element type. There must be exactly one // such array and its zero-based index must be less than `NumSizes`. // // `Char` must be `[const] [signed|unsigned] char`. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // unsigned char* p = new unsigned char[x.AllocSize()]; // Span ints = x.Slice(p); // Span doubles = x.Slice(p); // // Requires: `p` is aligned to `Alignment()`. template SliceType> Slice(Char* p) const { return Slice()>(p); } // All arrays with known sizes. // // `Char` must be `[const] [signed|unsigned] char`. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // unsigned char* p = new unsigned char[x.AllocSize()]; // // Span ints; // Span doubles; // std::tie(ints, doubles) = x.Slices(p); // // Requires: `p` is aligned to `Alignment()`. // // Note: We're not using ElementType alias here because it does not compile // under MSVC. template std::tuple::type>>...> Slices(Char* p) const { // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed // in 6.1). (void)p; return std::tuple>>...>( Slice(p)...); } // The size of the allocation that fits all arrays. // // // int[3], 4 bytes of padding, double[4]. // Layout x(3, 4); // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes // // Requires: `NumSizes == sizeof...(Ts)`. constexpr size_t AllocSize() const { static_assert(NumTypes == NumSizes, "You must specify sizes of all fields"); return Offset() + SizeOf>::value * size_[NumTypes - 1]; } // If built with --config=asan, poisons padding bytes (if any) in the // allocation. The pointer must point to a memory block at least // `AllocSize()` bytes in length. // // `Char` must be `[const] [signed|unsigned] char`. // // Requires: `p` is aligned to `Alignment()`. template = 0> void PoisonPadding(const Char* p) const { Pointer<0>(p); // verify the requirements on `Char` and `p` } template = 0> void PoisonPadding(const Char* p) const { static_assert(N < NumOffsets, "Index out of bounds"); (void)p; #ifdef ABSL_HAVE_ADDRESS_SANITIZER PoisonPadding(p); // The `if` is an optimization. It doesn't affect the observable behaviour. if (ElementAlignment::value % ElementAlignment::value) { size_t start = Offset() + SizeOf>::value * size_[N - 1]; ASAN_POISON_MEMORY_REGION(p + start, Offset() - start); } #endif } // Human-readable description of the memory layout. Useful for debugging. // Slow. // // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed // // by an unknown number of doubles. // auto x = Layout::Partial(5, 3); // assert(x.DebugString() == // "@0(1)[5]; @8(4)[3]; @24(8)"); // // Each field is in the following format: @offset(sizeof)[size] ( // may be missing depending on the target platform). For example, // @8(4)[3] means that at offset 8 we have an array of ints, where each // int is 4 bytes, and we have 3 of those ints. The size of the last field may // be missing (as in the example above). Only fields with known offsets are // described. Type names may differ across platforms: one compiler might // produce "unsigned*" where another produces "unsigned int *". std::string DebugString() const { const auto offsets = Offsets(); const size_t sizes[] = {SizeOf>::value...}; const std::string types[] = { adl_barrier::TypeName>()...}; std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")"); for (size_t i = 0; i != NumOffsets - 1; ++i) { absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1], "(", sizes[i + 1], ")"); } // NumSizes is a constant that may be zero. Some compilers cannot see that // inside the if statement "size_[NumSizes - 1]" must be valid. int last = static_cast(NumSizes) - 1; if (NumTypes == NumSizes && last >= 0) { absl::StrAppend(&res, "[", size_[last], "]"); } return res; } private: // Arguments of `Layout::Partial()` or `Layout::Layout()`. size_t size_[NumSizes > 0 ? NumSizes : 1]; }; template using LayoutType = LayoutImpl< std::tuple, absl::make_index_sequence, absl::make_index_sequence>; } // namespace internal_layout // Descriptor of arrays of various types and sizes laid out in memory one after // another. See the top of the file for documentation. // // Check out the public API of internal_layout::LayoutImpl above. The type is // internal to the library but its methods are public, and they are inherited // by `Layout`. template class Layout : public internal_layout::LayoutType { public: static_assert(sizeof...(Ts) > 0, "At least one field is required"); static_assert( absl::conjunction...>::value, "Invalid element type (see IsLegalElementType)"); // The result type of `Partial()` with `NumSizes` arguments. template using PartialType = internal_layout::LayoutType; // `Layout` knows the element types of the arrays we want to lay out in // memory but not the number of elements in each array. // `Partial(size1, ..., sizeN)` allows us to specify the latter. The // resulting immutable object can be used to obtain pointers to the // individual arrays. // // It's allowed to pass fewer array sizes than the number of arrays. E.g., // if all you need is to the offset of the second array, you only need to // pass one argument -- the number of elements in the first array. // // // int[3] followed by 4 bytes of padding and an unknown number of // // doubles. // auto x = Layout::Partial(3); // // doubles start at byte 16. // assert(x.Offset<1>() == 16); // // If you know the number of elements in all arrays, you can still call // `Partial()` but it's more convenient to use the constructor of `Layout`. // // Layout x(3, 5); // // Note: The sizes of the arrays must be specified in number of elements, // not in bytes. // // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`. // Requires: all arguments are convertible to `size_t`. template static constexpr PartialType Partial(Sizes&&... sizes) { static_assert(sizeof...(Sizes) <= sizeof...(Ts), ""); return PartialType(absl::forward(sizes)...); } // Creates a layout with the sizes of all arrays specified. If you know // only the sizes of the first N arrays (where N can be zero), you can use // `Partial()` defined above. The constructor is essentially equivalent to // calling `Partial()` and passing in all array sizes; the constructor is // provided as a convenient abbreviation. // // Note: The sizes of the arrays must be specified in number of elements, // not in bytes. constexpr explicit Layout(internal_layout::TypeToSize... sizes) : internal_layout::LayoutType(sizes...) {} }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_ abseil-20220623.1/absl/container/internal/layout_benchmark.cc000066400000000000000000000101441430371345100237300ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Every benchmark should have the same performance as the corresponding // headroom benchmark. #include "absl/base/internal/raw_logging.h" #include "absl/container/internal/layout.h" #include "benchmark/benchmark.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::benchmark::DoNotOptimize; using Int128 = int64_t[2]; // This benchmark provides the upper bound on performance for BM_OffsetConstant. template void BM_OffsetConstantHeadroom(benchmark::State& state) { for (auto _ : state) { DoNotOptimize(Offset); } } template void BM_OffsetConstant(benchmark::State& state) { using L = Layout; ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset, "Invalid offset"); for (auto _ : state) { DoNotOptimize(L::Partial(3, 5, 7).template Offset<3>()); } } template size_t VariableOffset(size_t n, size_t m, size_t k); template <> size_t VariableOffset(size_t n, size_t m, size_t k) { auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }; return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8); } template <> size_t VariableOffset(size_t n, size_t m, size_t k) { // No alignment is necessary. return n * 16 + m * 4 + k * 2; } // This benchmark provides the upper bound on performance for BM_OffsetVariable. template void BM_OffsetVariableHeadroom(benchmark::State& state) { size_t n = 3; size_t m = 5; size_t k = 7; ABSL_RAW_CHECK(VariableOffset(n, m, k) == Offset, "Invalid offset"); for (auto _ : state) { DoNotOptimize(n); DoNotOptimize(m); DoNotOptimize(k); DoNotOptimize(VariableOffset(n, m, k)); } } template void BM_OffsetVariable(benchmark::State& state) { using L = Layout; size_t n = 3; size_t m = 5; size_t k = 7; ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset, "Inavlid offset"); for (auto _ : state) { DoNotOptimize(n); DoNotOptimize(m); DoNotOptimize(k); DoNotOptimize(L::Partial(n, m, k).template Offset<3>()); } } // Run all benchmarks in two modes: // // Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?]. // Layout without padding: Int128[3], int32_t[5], int16_t[7], int8_t[?]. #define OFFSET_BENCHMARK(NAME, OFFSET, T1, T2, T3, T4) \ auto& NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4 = \ NAME; \ BENCHMARK(NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4) OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t, Int128); OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128); OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t, int8_t); OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t); OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t, Int128); OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128); OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t, int8_t); OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t); } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/layout_test.cc000066400000000000000000001670021430371345100227630ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/layout.h" // We need ::max_align_t because some libstdc++ versions don't provide // std::max_align_t #include #include #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/config.h" #include "absl/base/internal/raw_logging.h" #include "absl/types/span.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::absl::Span; using ::testing::ElementsAre; size_t Distance(const void* from, const void* to) { ABSL_RAW_CHECK(from <= to, "Distance must be non-negative"); return static_cast(to) - static_cast(from); } template Expected Type(Actual val) { static_assert(std::is_same(), ""); return val; } // Helper classes to test different size and alignments. struct alignas(8) Int128 { uint64_t a, b; friend bool operator==(Int128 lhs, Int128 rhs) { return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b); } static std::string Name() { return internal_layout::adl_barrier::TypeName(); } }; // int64_t is *not* 8-byte aligned on all platforms! struct alignas(8) Int64 { int64_t a; friend bool operator==(Int64 lhs, Int64 rhs) { return lhs.a == rhs.a; } }; // Properties of types that this test relies on. static_assert(sizeof(int8_t) == 1, ""); static_assert(alignof(int8_t) == 1, ""); static_assert(sizeof(int16_t) == 2, ""); static_assert(alignof(int16_t) == 2, ""); static_assert(sizeof(int32_t) == 4, ""); static_assert(alignof(int32_t) == 4, ""); static_assert(sizeof(Int64) == 8, ""); static_assert(alignof(Int64) == 8, ""); static_assert(sizeof(Int128) == 16, ""); static_assert(alignof(Int128) == 8, ""); template void SameType() { static_assert(std::is_same(), ""); } TEST(Layout, ElementType) { { using L = Layout; SameType>(); SameType>(); SameType>(); } { using L = Layout; SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); } { using L = Layout; SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); SameType>(); } } TEST(Layout, ElementTypes) { { using L = Layout; SameType, L::ElementTypes>(); SameType, decltype(L::Partial())::ElementTypes>(); SameType, decltype(L::Partial(0))::ElementTypes>(); } { using L = Layout; SameType, L::ElementTypes>(); SameType, decltype(L::Partial())::ElementTypes>(); SameType, decltype(L::Partial(0))::ElementTypes>(); } { using L = Layout; SameType, L::ElementTypes>(); SameType, decltype(L::Partial())::ElementTypes>(); SameType, decltype(L::Partial(0))::ElementTypes>(); SameType, decltype(L::Partial(0, 0))::ElementTypes>(); SameType, decltype(L::Partial(0, 0, 0))::ElementTypes>(); } } TEST(Layout, OffsetByIndex) { { using L = Layout; EXPECT_EQ(0, L::Partial().Offset<0>()); EXPECT_EQ(0, L::Partial(3).Offset<0>()); EXPECT_EQ(0, L(3).Offset<0>()); } { using L = Layout; EXPECT_EQ(0, L::Partial().Offset<0>()); EXPECT_EQ(0, L::Partial(3).Offset<0>()); EXPECT_EQ(12, L::Partial(3).Offset<1>()); EXPECT_EQ(0, L::Partial(3, 5).Offset<0>()); EXPECT_EQ(12, L::Partial(3, 5).Offset<1>()); EXPECT_EQ(0, L(3, 5).Offset<0>()); EXPECT_EQ(12, L(3, 5).Offset<1>()); } { using L = Layout; EXPECT_EQ(0, L::Partial().Offset<0>()); EXPECT_EQ(0, L::Partial(0).Offset<0>()); EXPECT_EQ(0, L::Partial(0).Offset<1>()); EXPECT_EQ(0, L::Partial(1).Offset<0>()); EXPECT_EQ(4, L::Partial(1).Offset<1>()); EXPECT_EQ(0, L::Partial(5).Offset<0>()); EXPECT_EQ(8, L::Partial(5).Offset<1>()); EXPECT_EQ(0, L::Partial(0, 0).Offset<0>()); EXPECT_EQ(0, L::Partial(0, 0).Offset<1>()); EXPECT_EQ(0, L::Partial(0, 0).Offset<2>()); EXPECT_EQ(0, L::Partial(1, 0).Offset<0>()); EXPECT_EQ(4, L::Partial(1, 0).Offset<1>()); EXPECT_EQ(8, L::Partial(1, 0).Offset<2>()); EXPECT_EQ(0, L::Partial(5, 3).Offset<0>()); EXPECT_EQ(8, L::Partial(5, 3).Offset<1>()); EXPECT_EQ(24, L::Partial(5, 3).Offset<2>()); EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>()); EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>()); EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>()); EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>()); EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>()); EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>()); EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>()); EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>()); EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>()); EXPECT_EQ(0, L(5, 3, 1).Offset<0>()); EXPECT_EQ(24, L(5, 3, 1).Offset<2>()); EXPECT_EQ(8, L(5, 3, 1).Offset<1>()); } } TEST(Layout, OffsetByType) { { using L = Layout; EXPECT_EQ(0, L::Partial().Offset()); EXPECT_EQ(0, L::Partial(3).Offset()); EXPECT_EQ(0, L(3).Offset()); } { using L = Layout; EXPECT_EQ(0, L::Partial().Offset()); EXPECT_EQ(0, L::Partial(0).Offset()); EXPECT_EQ(0, L::Partial(0).Offset()); EXPECT_EQ(0, L::Partial(1).Offset()); EXPECT_EQ(4, L::Partial(1).Offset()); EXPECT_EQ(0, L::Partial(5).Offset()); EXPECT_EQ(8, L::Partial(5).Offset()); EXPECT_EQ(0, L::Partial(0, 0).Offset()); EXPECT_EQ(0, L::Partial(0, 0).Offset()); EXPECT_EQ(0, L::Partial(0, 0).Offset()); EXPECT_EQ(0, L::Partial(1, 0).Offset()); EXPECT_EQ(4, L::Partial(1, 0).Offset()); EXPECT_EQ(8, L::Partial(1, 0).Offset()); EXPECT_EQ(0, L::Partial(5, 3).Offset()); EXPECT_EQ(8, L::Partial(5, 3).Offset()); EXPECT_EQ(24, L::Partial(5, 3).Offset()); EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); EXPECT_EQ(0, L::Partial(0, 0, 0).Offset()); EXPECT_EQ(0, L::Partial(1, 0, 0).Offset()); EXPECT_EQ(4, L::Partial(1, 0, 0).Offset()); EXPECT_EQ(8, L::Partial(1, 0, 0).Offset()); EXPECT_EQ(0, L::Partial(5, 3, 1).Offset()); EXPECT_EQ(24, L::Partial(5, 3, 1).Offset()); EXPECT_EQ(8, L::Partial(5, 3, 1).Offset()); EXPECT_EQ(0, L(5, 3, 1).Offset()); EXPECT_EQ(24, L(5, 3, 1).Offset()); EXPECT_EQ(8, L(5, 3, 1).Offset()); } } TEST(Layout, Offsets) { { using L = Layout; EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0)); EXPECT_THAT(L(3).Offsets(), ElementsAre(0)); } { using L = Layout; EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12)); EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12)); EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12)); } { using L = Layout; EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0)); EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4)); EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8)); EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0)); EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8)); EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24)); EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0)); EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8)); EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24)); } } TEST(Layout, AllocSize) { { using L = Layout; EXPECT_EQ(0, L::Partial(0).AllocSize()); EXPECT_EQ(12, L::Partial(3).AllocSize()); EXPECT_EQ(12, L(3).AllocSize()); } { using L = Layout; EXPECT_EQ(32, L::Partial(3, 5).AllocSize()); EXPECT_EQ(32, L(3, 5).AllocSize()); } { using L = Layout; EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize()); EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize()); EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize()); EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize()); EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize()); EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize()); EXPECT_EQ(136, L(3, 5, 7).AllocSize()); } } TEST(Layout, SizeByIndex) { { using L = Layout; EXPECT_EQ(0, L::Partial(0).Size<0>()); EXPECT_EQ(3, L::Partial(3).Size<0>()); EXPECT_EQ(3, L(3).Size<0>()); } { using L = Layout; EXPECT_EQ(0, L::Partial(0).Size<0>()); EXPECT_EQ(3, L::Partial(3).Size<0>()); EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); EXPECT_EQ(3, L(3, 5).Size<0>()); EXPECT_EQ(5, L(3, 5).Size<1>()); } { using L = Layout; EXPECT_EQ(3, L::Partial(3).Size<0>()); EXPECT_EQ(3, L::Partial(3, 5).Size<0>()); EXPECT_EQ(5, L::Partial(3, 5).Size<1>()); EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>()); EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>()); EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>()); EXPECT_EQ(3, L(3, 5, 7).Size<0>()); EXPECT_EQ(5, L(3, 5, 7).Size<1>()); EXPECT_EQ(7, L(3, 5, 7).Size<2>()); } } TEST(Layout, SizeByType) { { using L = Layout; EXPECT_EQ(0, L::Partial(0).Size()); EXPECT_EQ(3, L::Partial(3).Size()); EXPECT_EQ(3, L(3).Size()); } { using L = Layout; EXPECT_EQ(3, L::Partial(3).Size()); EXPECT_EQ(3, L::Partial(3, 5).Size()); EXPECT_EQ(5, L::Partial(3, 5).Size()); EXPECT_EQ(3, L::Partial(3, 5, 7).Size()); EXPECT_EQ(5, L::Partial(3, 5, 7).Size()); EXPECT_EQ(7, L::Partial(3, 5, 7).Size()); EXPECT_EQ(3, L(3, 5, 7).Size()); EXPECT_EQ(5, L(3, 5, 7).Size()); EXPECT_EQ(7, L(3, 5, 7).Size()); } } TEST(Layout, Sizes) { { using L = Layout; EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); EXPECT_THAT(L(3).Sizes(), ElementsAre(3)); } { using L = Layout; EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5)); } { using L = Layout; EXPECT_THAT(L::Partial().Sizes(), ElementsAre()); EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3)); EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5)); EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7)); } } TEST(Layout, PointerByIndex) { alignas(max_align_t) const unsigned char p[100] = {}; { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); } { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); EXPECT_EQ( 12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); } { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); EXPECT_EQ( 4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); EXPECT_EQ( 4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ( 24, Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); } } TEST(Layout, PointerByType) { alignas(max_align_t) const unsigned char p[100] = {}; { using L = Layout; EXPECT_EQ( 0, Distance(p, Type(L::Partial().Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(3).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); } { using L = Layout; EXPECT_EQ( 0, Distance(p, Type(L::Partial().Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(1).Pointer(p)))); EXPECT_EQ( 4, Distance(p, Type(L::Partial(1).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(5).Pointer(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(5).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type( L::Partial(0, 0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); EXPECT_EQ(4, Distance(p, Type( L::Partial(1, 0).Pointer(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); EXPECT_EQ(8, Distance(p, Type( L::Partial(5, 3).Pointer(p)))); EXPECT_EQ( 24, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type( L::Partial(0, 0, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type( L::Partial(0, 0, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type( L::Partial(0, 0, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type( L::Partial(1, 0, 0).Pointer(p)))); EXPECT_EQ(4, Distance(p, Type( L::Partial(1, 0, 0).Pointer(p)))); EXPECT_EQ(8, Distance(p, Type( L::Partial(1, 0, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type( L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ(24, Distance(p, Type( L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ(8, Distance(p, Type( L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer(p)))); EXPECT_EQ( 8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); } } TEST(Layout, MutablePointerByIndex) { alignas(max_align_t) unsigned char p[100]; { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L(3).Pointer<0>(p)))); } { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer<0>(p)))); EXPECT_EQ(12, Distance(p, Type(L::Partial(3).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(3, 5).Pointer<0>(p)))); EXPECT_EQ(12, Distance(p, Type(L::Partial(3, 5).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L(3, 5).Pointer<0>(p)))); EXPECT_EQ(12, Distance(p, Type(L(3, 5).Pointer<1>(p)))); } { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer<0>(p)))); EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer<0>(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer<0>(p)))); EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer<0>(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3).Pointer<1>(p)))); EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<0>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0, 0).Pointer<0>(p)))); EXPECT_EQ(4, Distance(p, Type(L::Partial(1, 0, 0).Pointer<1>(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0, 0).Pointer<2>(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(5, 3, 1).Pointer<1>(p)))); EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer<0>(p)))); EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer<2>(p)))); EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer<1>(p)))); } } TEST(Layout, MutablePointerByType) { alignas(max_align_t) unsigned char p[100]; { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(3).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L(3).Pointer(p)))); } { using L = Layout; EXPECT_EQ(0, Distance(p, Type(L::Partial().Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1).Pointer(p)))); EXPECT_EQ(4, Distance(p, Type(L::Partial(1).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5).Pointer(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(5).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(0, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); EXPECT_EQ( 4, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); EXPECT_EQ(8, Distance(p, Type(L::Partial(1, 0).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); EXPECT_EQ(24, Distance(p, Type(L::Partial(5, 3).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(0, 0, 0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); EXPECT_EQ( 4, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(1, 0, 0).Pointer(p)))); EXPECT_EQ( 0, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ( 24, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ( 8, Distance(p, Type(L::Partial(5, 3, 1).Pointer(p)))); EXPECT_EQ(0, Distance(p, Type(L(5, 3, 1).Pointer(p)))); EXPECT_EQ(24, Distance(p, Type(L(5, 3, 1).Pointer(p)))); EXPECT_EQ(8, Distance(p, Type(L(5, 3, 1).Pointer(p)))); } } TEST(Layout, Pointers) { alignas(max_align_t) const unsigned char p[100] = {}; using L = Layout; { const auto x = L::Partial(); EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), Type>(x.Pointers(p))); } { const auto x = L::Partial(1); EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), (Type>(x.Pointers(p)))); } { const auto x = L::Partial(1, 2); EXPECT_EQ( std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), (Type>( x.Pointers(p)))); } { const auto x = L::Partial(1, 2, 3); EXPECT_EQ( std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), (Type>( x.Pointers(p)))); } { const L x(1, 2, 3); EXPECT_EQ( std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), (Type>( x.Pointers(p)))); } } TEST(Layout, MutablePointers) { alignas(max_align_t) unsigned char p[100]; using L = Layout; { const auto x = L::Partial(); EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)), Type>(x.Pointers(p))); } { const auto x = L::Partial(1); EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)), (Type>(x.Pointers(p)))); } { const auto x = L::Partial(1, 2); EXPECT_EQ( std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), (Type>(x.Pointers(p)))); } { const auto x = L::Partial(1, 2, 3); EXPECT_EQ( std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), (Type>(x.Pointers(p)))); } { const L x(1, 2, 3); EXPECT_EQ( std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)), (Type>(x.Pointers(p)))); } } TEST(Layout, SliceByIndexSize) { alignas(max_align_t) const unsigned char p[100] = {}; { using L = Layout; EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); EXPECT_EQ(3, L(3).Slice<0>(p).size()); } { using L = Layout; EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); } { using L = Layout; EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); } } TEST(Layout, SliceByTypeSize) { alignas(max_align_t) const unsigned char p[100] = {}; { using L = Layout; EXPECT_EQ(0, L::Partial(0).Slice(p).size()); EXPECT_EQ(3, L::Partial(3).Slice(p).size()); EXPECT_EQ(3, L(3).Slice(p).size()); } { using L = Layout; EXPECT_EQ(3, L::Partial(3).Slice(p).size()); EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); } } TEST(Layout, MutableSliceByIndexSize) { alignas(max_align_t) unsigned char p[100]; { using L = Layout; EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size()); EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); EXPECT_EQ(3, L(3).Slice<0>(p).size()); } { using L = Layout; EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); EXPECT_EQ(5, L(3, 5).Slice<1>(p).size()); } { using L = Layout; EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size()); EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size()); EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size()); EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size()); EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size()); EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size()); EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size()); EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size()); EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size()); } } TEST(Layout, MutableSliceByTypeSize) { alignas(max_align_t) unsigned char p[100]; { using L = Layout; EXPECT_EQ(0, L::Partial(0).Slice(p).size()); EXPECT_EQ(3, L::Partial(3).Slice(p).size()); EXPECT_EQ(3, L(3).Slice(p).size()); } { using L = Layout; EXPECT_EQ(3, L::Partial(3).Slice(p).size()); EXPECT_EQ(3, L::Partial(3, 5).Slice(p).size()); EXPECT_EQ(5, L::Partial(3, 5).Slice(p).size()); EXPECT_EQ(3, L::Partial(3, 5, 7).Slice(p).size()); EXPECT_EQ(5, L::Partial(3, 5, 7).Slice(p).size()); EXPECT_EQ(7, L::Partial(3, 5, 7).Slice(p).size()); EXPECT_EQ(3, L(3, 5, 7).Slice(p).size()); EXPECT_EQ(5, L(3, 5, 7).Slice(p).size()); EXPECT_EQ(7, L(3, 5, 7).Slice(p).size()); } } TEST(Layout, SliceByIndexData) { alignas(max_align_t) const unsigned char p[100] = {}; { using L = Layout; EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(3).Slice<0>(p)).data())); EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); } { using L = Layout; EXPECT_EQ( 0, Distance( p, Type>(L::Partial(3).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); EXPECT_EQ( 12, Distance( p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); EXPECT_EQ( 12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); } { using L = Layout; EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(1).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(5).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); EXPECT_EQ( 4, Distance( p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); EXPECT_EQ( 4, Distance( p, Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ( 24, Distance( p, Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ( 24, Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); EXPECT_EQ( 8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); } } TEST(Layout, SliceByTypeData) { alignas(max_align_t) const unsigned char p[100] = {}; { using L = Layout; EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(3).Slice(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L(3).Slice(p)).data())); } { using L = Layout; EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(1).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(5).Slice(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(0, 0).Slice(p)) .data())); EXPECT_EQ(0, Distance(p, Type>( L::Partial(0, 0).Slice(p)) .data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(1, 0).Slice(p)) .data())); EXPECT_EQ(4, Distance(p, Type>( L::Partial(1, 0).Slice(p)) .data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(5, 3).Slice(p)) .data())); EXPECT_EQ(8, Distance(p, Type>( L::Partial(5, 3).Slice(p)) .data())); EXPECT_EQ(0, Distance(p, Type>( L::Partial(0, 0, 0).Slice(p)) .data())); EXPECT_EQ(0, Distance(p, Type>( L::Partial(0, 0, 0).Slice(p)) .data())); EXPECT_EQ(0, Distance(p, Type>( L::Partial(0, 0, 0).Slice(p)) .data())); EXPECT_EQ(0, Distance(p, Type>( L::Partial(1, 0, 0).Slice(p)) .data())); EXPECT_EQ(4, Distance(p, Type>( L::Partial(1, 0, 0).Slice(p)) .data())); EXPECT_EQ(8, Distance(p, Type>( L::Partial(1, 0, 0).Slice(p)) .data())); EXPECT_EQ(0, Distance(p, Type>( L::Partial(5, 3, 1).Slice(p)) .data())); EXPECT_EQ(24, Distance(p, Type>( L::Partial(5, 3, 1).Slice(p)) .data())); EXPECT_EQ(8, Distance(p, Type>( L::Partial(5, 3, 1).Slice(p)) .data())); EXPECT_EQ( 0, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 24, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L(5, 3, 1).Slice(p)).data())); } } TEST(Layout, MutableSliceByIndexData) { alignas(max_align_t) unsigned char p[100]; { using L = Layout; EXPECT_EQ( 0, Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); EXPECT_EQ(0, Distance(p, Type>(L(3).Slice<0>(p)).data())); } { using L = Layout; EXPECT_EQ( 0, Distance(p, Type>(L::Partial(3).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(3, 5).Slice<0>(p)).data())); EXPECT_EQ( 12, Distance(p, Type>(L::Partial(3, 5).Slice<1>(p)).data())); EXPECT_EQ(0, Distance(p, Type>(L(3, 5).Slice<0>(p)).data())); EXPECT_EQ(12, Distance(p, Type>(L(3, 5).Slice<1>(p)).data())); } { using L = Layout; EXPECT_EQ( 0, Distance(p, Type>(L::Partial(0).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(1).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(5).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(0, 0).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(0, 0).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(1, 0).Slice<0>(p)).data())); EXPECT_EQ( 4, Distance(p, Type>(L::Partial(1, 0).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(5, 3).Slice<0>(p)).data())); EXPECT_EQ( 8, Distance(p, Type>(L::Partial(5, 3).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice<0>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice<1>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice<2>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(1, 0, 0).Slice<0>(p)).data())); EXPECT_EQ( 4, Distance( p, Type>(L::Partial(1, 0, 0).Slice<1>(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(1, 0, 0).Slice<2>(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ( 24, Distance( p, Type>(L::Partial(5, 3, 1).Slice<2>(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(5, 3, 1).Slice<1>(p)).data())); EXPECT_EQ(0, Distance(p, Type>(L(5, 3, 1).Slice<0>(p)).data())); EXPECT_EQ(24, Distance(p, Type>(L(5, 3, 1).Slice<2>(p)).data())); EXPECT_EQ(8, Distance(p, Type>(L(5, 3, 1).Slice<1>(p)).data())); } } TEST(Layout, MutableSliceByTypeData) { alignas(max_align_t) unsigned char p[100]; { using L = Layout; EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(3).Slice(p)).data())); EXPECT_EQ(0, Distance(p, Type>(L(3).Slice(p)).data())); } { using L = Layout; EXPECT_EQ( 0, Distance(p, Type>(L::Partial(0).Slice(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(1).Slice(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(5).Slice(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(0, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(1, 0).Slice(p)).data())); EXPECT_EQ( 4, Distance( p, Type>(L::Partial(1, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L::Partial(5, 3).Slice(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(5, 3).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(0, 0, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); EXPECT_EQ( 4, Distance( p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(1, 0, 0).Slice(p)).data())); EXPECT_EQ( 0, Distance( p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 24, Distance( p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 8, Distance( p, Type>(L::Partial(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 0, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 24, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); EXPECT_EQ( 8, Distance(p, Type>(L(5, 3, 1).Slice(p)).data())); } } MATCHER_P(IsSameSlice, slice, "") { return arg.size() == slice.size() && arg.data() == slice.data(); } template class TupleMatcher { public: explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {} template bool MatchAndExplain(const Tuple& p, testing::MatchResultListener* /* listener */) const { static_assert(std::tuple_size::value == sizeof...(M), ""); return MatchAndExplainImpl( p, absl::make_index_sequence::value>{}); } // For the matcher concept. Left empty as we don't really need the diagnostics // right now. void DescribeTo(::std::ostream* os) const {} void DescribeNegationTo(::std::ostream* os) const {} private: template bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence) const { // Using std::min as a simple variadic "and". return std::min( {true, testing::SafeMatcherCast< const typename std::tuple_element::type&>( std::get(matchers_)) .Matches(std::get(p))...}); } std::tuple matchers_; }; template testing::PolymorphicMatcher> Tuple(M... matchers) { return testing::MakePolymorphicMatcher( TupleMatcher(std::move(matchers)...)); } TEST(Layout, Slices) { alignas(max_align_t) const unsigned char p[100] = {}; using L = Layout; { const auto x = L::Partial(); EXPECT_THAT(Type>(x.Slices(p)), Tuple()); } { const auto x = L::Partial(1); EXPECT_THAT(Type>>(x.Slices(p)), Tuple(IsSameSlice(x.Slice<0>(p)))); } { const auto x = L::Partial(1, 2); EXPECT_THAT( (Type, Span>>(x.Slices(p))), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); } { const auto x = L::Partial(1, 2, 3); EXPECT_THAT((Type, Span, Span>>(x.Slices(p))), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), IsSameSlice(x.Slice<2>(p)))); } { const L x(1, 2, 3); EXPECT_THAT((Type, Span, Span>>(x.Slices(p))), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), IsSameSlice(x.Slice<2>(p)))); } } TEST(Layout, MutableSlices) { alignas(max_align_t) unsigned char p[100] = {}; using L = Layout; { const auto x = L::Partial(); EXPECT_THAT(Type>(x.Slices(p)), Tuple()); } { const auto x = L::Partial(1); EXPECT_THAT(Type>>(x.Slices(p)), Tuple(IsSameSlice(x.Slice<0>(p)))); } { const auto x = L::Partial(1, 2); EXPECT_THAT((Type, Span>>(x.Slices(p))), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)))); } { const auto x = L::Partial(1, 2, 3); EXPECT_THAT((Type, Span, Span>>( x.Slices(p))), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), IsSameSlice(x.Slice<2>(p)))); } { const L x(1, 2, 3); EXPECT_THAT((Type, Span, Span>>( x.Slices(p))), Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)), IsSameSlice(x.Slice<2>(p)))); } } TEST(Layout, UnalignedTypes) { constexpr Layout x(1, 2, 3); alignas(max_align_t) unsigned char p[x.AllocSize() + 1]; EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4)); } TEST(Layout, CustomAlignment) { constexpr Layout> x(1, 2); alignas(max_align_t) unsigned char p[x.AllocSize()]; EXPECT_EQ(10, x.AllocSize()); EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8)); } TEST(Layout, OverAligned) { constexpr size_t M = alignof(max_align_t); constexpr Layout> x(1, 3); #ifdef __GNUC__ // Using __attribute__ ((aligned ())) instead of alignas to bypass a gcc bug: // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357 __attribute__((aligned(2 * M))) unsigned char p[x.AllocSize()]; #else alignas(2 * M) unsigned char p[x.AllocSize()]; #endif EXPECT_EQ(2 * M + 3, x.AllocSize()); EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M)); } TEST(Layout, Alignment) { static_assert(Layout::Alignment() == 1, ""); static_assert(Layout::Alignment() == 4, ""); static_assert(Layout::Alignment() == 8, ""); static_assert(Layout>::Alignment() == 64, ""); static_assert(Layout::Alignment() == 8, ""); static_assert(Layout::Alignment() == 8, ""); static_assert(Layout::Alignment() == 8, ""); static_assert(Layout::Alignment() == 8, ""); static_assert(Layout::Alignment() == 8, ""); static_assert(Layout::Alignment() == 8, ""); } TEST(Layout, ConstexprPartial) { constexpr size_t M = alignof(max_align_t); constexpr Layout> x(1, 3); static_assert(x.Partial(1).template Offset<1>() == 2 * M, ""); } // [from, to) struct Region { size_t from; size_t to; }; void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) { #ifdef ABSL_HAVE_ADDRESS_SANITIZER for (size_t i = 0; i != n; ++i) { EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i)); } #endif } template void ExpectPoisoned(const unsigned char (&buf)[N], std::initializer_list reg) { size_t prev = 0; for (const Region& r : reg) { ExpectRegionPoisoned(buf + prev, r.from - prev, false); ExpectRegionPoisoned(buf + r.from, r.to - r.from, true); prev = r.to; } ExpectRegionPoisoned(buf + prev, N - prev, false); } TEST(Layout, PoisonPadding) { using L = Layout; constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize(); { constexpr auto x = L::Partial(); alignas(max_align_t) const unsigned char c[n] = {}; x.PoisonPadding(c); EXPECT_EQ(x.Slices(c), x.Slices(c)); ExpectPoisoned(c, {}); } { constexpr auto x = L::Partial(1); alignas(max_align_t) const unsigned char c[n] = {}; x.PoisonPadding(c); EXPECT_EQ(x.Slices(c), x.Slices(c)); ExpectPoisoned(c, {{1, 8}}); } { constexpr auto x = L::Partial(1, 2); alignas(max_align_t) const unsigned char c[n] = {}; x.PoisonPadding(c); EXPECT_EQ(x.Slices(c), x.Slices(c)); ExpectPoisoned(c, {{1, 8}}); } { constexpr auto x = L::Partial(1, 2, 3); alignas(max_align_t) const unsigned char c[n] = {}; x.PoisonPadding(c); EXPECT_EQ(x.Slices(c), x.Slices(c)); ExpectPoisoned(c, {{1, 8}, {36, 40}}); } { constexpr auto x = L::Partial(1, 2, 3, 4); alignas(max_align_t) const unsigned char c[n] = {}; x.PoisonPadding(c); EXPECT_EQ(x.Slices(c), x.Slices(c)); ExpectPoisoned(c, {{1, 8}, {36, 40}}); } { constexpr L x(1, 2, 3, 4); alignas(max_align_t) const unsigned char c[n] = {}; x.PoisonPadding(c); EXPECT_EQ(x.Slices(c), x.Slices(c)); ExpectPoisoned(c, {{1, 8}, {36, 40}}); } } TEST(Layout, DebugString) { { constexpr auto x = Layout::Partial(); EXPECT_EQ("@0(1)", x.DebugString()); } { constexpr auto x = Layout::Partial(1); EXPECT_EQ("@0(1)[1]; @4(4)", x.DebugString()); } { constexpr auto x = Layout::Partial(1, 2); EXPECT_EQ("@0(1)[1]; @4(4)[2]; @12(1)", x.DebugString()); } { constexpr auto x = Layout::Partial(1, 2, 3); EXPECT_EQ( "@0(1)[1]; @4(4)[2]; @12(1)[3]; " "@16" + Int128::Name() + "(16)", x.DebugString()); } { constexpr auto x = Layout::Partial(1, 2, 3, 4); EXPECT_EQ( "@0(1)[1]; @4(4)[2]; @12(1)[3]; " "@16" + Int128::Name() + "(16)[4]", x.DebugString()); } { constexpr Layout x(1, 2, 3, 4); EXPECT_EQ( "@0(1)[1]; @4(4)[2]; @12(1)[3]; " "@16" + Int128::Name() + "(16)[4]", x.DebugString()); } } TEST(Layout, CharTypes) { constexpr Layout x(1); alignas(max_align_t) char c[x.AllocSize()] = {}; alignas(max_align_t) unsigned char uc[x.AllocSize()] = {}; alignas(max_align_t) signed char sc[x.AllocSize()] = {}; alignas(max_align_t) const char cc[x.AllocSize()] = {}; alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {}; alignas(max_align_t) const signed char csc[x.AllocSize()] = {}; Type(x.Pointer<0>(c)); Type(x.Pointer<0>(uc)); Type(x.Pointer<0>(sc)); Type(x.Pointer<0>(cc)); Type(x.Pointer<0>(cuc)); Type(x.Pointer<0>(csc)); Type(x.Pointer(c)); Type(x.Pointer(uc)); Type(x.Pointer(sc)); Type(x.Pointer(cc)); Type(x.Pointer(cuc)); Type(x.Pointer(csc)); Type>(x.Pointers(c)); Type>(x.Pointers(uc)); Type>(x.Pointers(sc)); Type>(x.Pointers(cc)); Type>(x.Pointers(cuc)); Type>(x.Pointers(csc)); Type>(x.Slice<0>(c)); Type>(x.Slice<0>(uc)); Type>(x.Slice<0>(sc)); Type>(x.Slice<0>(cc)); Type>(x.Slice<0>(cuc)); Type>(x.Slice<0>(csc)); Type>>(x.Slices(c)); Type>>(x.Slices(uc)); Type>>(x.Slices(sc)); Type>>(x.Slices(cc)); Type>>(x.Slices(cuc)); Type>>(x.Slices(csc)); } TEST(Layout, ConstElementType) { constexpr Layout x(1); alignas(int32_t) char c[x.AllocSize()] = {}; const char* cc = c; const int32_t* p = reinterpret_cast(cc); EXPECT_EQ(alignof(int32_t), x.Alignment()); EXPECT_EQ(0, x.Offset<0>()); EXPECT_EQ(0, x.Offset()); EXPECT_THAT(x.Offsets(), ElementsAre(0)); EXPECT_EQ(1, x.Size<0>()); EXPECT_EQ(1, x.Size()); EXPECT_THAT(x.Sizes(), ElementsAre(1)); EXPECT_EQ(sizeof(int32_t), x.AllocSize()); EXPECT_EQ(p, Type(x.Pointer<0>(c))); EXPECT_EQ(p, Type(x.Pointer<0>(cc))); EXPECT_EQ(p, Type(x.Pointer(c))); EXPECT_EQ(p, Type(x.Pointer(cc))); EXPECT_THAT(Type>(x.Pointers(c)), Tuple(p)); EXPECT_THAT(Type>(x.Pointers(cc)), Tuple(p)); EXPECT_THAT(Type>(x.Slice<0>(c)), IsSameSlice(Span(p, 1))); EXPECT_THAT(Type>(x.Slice<0>(cc)), IsSameSlice(Span(p, 1))); EXPECT_THAT(Type>(x.Slice(c)), IsSameSlice(Span(p, 1))); EXPECT_THAT(Type>(x.Slice(cc)), IsSameSlice(Span(p, 1))); EXPECT_THAT(Type>>(x.Slices(c)), Tuple(IsSameSlice(Span(p, 1)))); EXPECT_THAT(Type>>(x.Slices(cc)), Tuple(IsSameSlice(Span(p, 1)))); } namespace example { // Immutable move-only string with sizeof equal to sizeof(void*). The string // size and the characters are kept in the same heap allocation. class CompactString { public: CompactString(const char* s = "") { // NOLINT const size_t size = strlen(s); // size_t[1], followed by char[size + 1]. // This statement doesn't allocate memory. const L layout(1, size + 1); // AllocSize() tells us how much memory we need to allocate for all our // data. p_.reset(new unsigned char[layout.AllocSize()]); // If running under ASAN, mark the padding bytes, if any, to catch memory // errors. layout.PoisonPadding(p_.get()); // Store the size in the allocation. // Pointer() is a synonym for Pointer<0>(). *layout.Pointer(p_.get()) = size; // Store the characters in the allocation. memcpy(layout.Pointer(p_.get()), s, size + 1); } size_t size() const { // Equivalent to reinterpret_cast(*p). return *L::Partial().Pointer(p_.get()); } const char* c_str() const { // Equivalent to reinterpret_cast(p.get() + sizeof(size_t)). // The argument in Partial(1) specifies that we have size_t[1] in front of // the characters. return L::Partial(1).Pointer(p_.get()); } private: // Our heap allocation contains a size_t followed by an array of chars. using L = Layout; std::unique_ptr p_; }; TEST(CompactString, Works) { CompactString s = "hello"; EXPECT_EQ(5, s.size()); EXPECT_STREQ("hello", s.c_str()); } } // namespace example } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/node_slot_policy.h000066400000000000000000000054521430371345100236160ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Adapts a policy for nodes. // // The node policy should model: // // struct Policy { // // Returns a new node allocated and constructed using the allocator, using // // the specified arguments. // template // value_type* new_element(Alloc* alloc, Args&&... args) const; // // // Destroys and deallocates node using the allocator. // template // void delete_element(Alloc* alloc, value_type* node) const; // }; // // It may also optionally define `value()` and `apply()`. For documentation on // these, see hash_policy_traits.h. #ifndef ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ #define ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ #include #include #include #include #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template struct node_slot_policy { static_assert(std::is_lvalue_reference::value, ""); using slot_type = typename std::remove_cv< typename std::remove_reference::type>::type*; template static void construct(Alloc* alloc, slot_type* slot, Args&&... args) { *slot = Policy::new_element(alloc, std::forward(args)...); } template static void destroy(Alloc* alloc, slot_type* slot) { Policy::delete_element(alloc, *slot); } template static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) { *new_slot = *old_slot; } static size_t space_used(const slot_type* slot) { if (slot == nullptr) return Policy::element_space_used(nullptr); return Policy::element_space_used(*slot); } static Reference element(slot_type* slot) { return **slot; } template static auto value(T* elem) -> decltype(P::value(elem)) { return P::value(elem); } template static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward(ts)...)) { return P::apply(std::forward(ts)...); } }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_NODE_SLOT_POLICY_H_ abseil-20220623.1/absl/container/internal/node_slot_policy_test.cc000066400000000000000000000032551430371345100250120ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/node_slot_policy.h" #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/container/internal/hash_policy_traits.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { using ::testing::Pointee; struct Policy : node_slot_policy { using key_type = int; using init_type = int; template static int* new_element(Alloc* alloc, int value) { return new int(value); } template static void delete_element(Alloc* alloc, int* elem) { delete elem; } }; using NodePolicy = hash_policy_traits; struct NodeTest : ::testing::Test { std::allocator alloc; int n = 53; int* a = &n; }; TEST_F(NodeTest, ConstructDestroy) { NodePolicy::construct(&alloc, &a, 42); EXPECT_THAT(a, Pointee(42)); NodePolicy::destroy(&alloc, &a); } TEST_F(NodeTest, transfer) { int s = 42; int* b = &s; NodePolicy::transfer(&alloc, &a, &b); EXPECT_EQ(&s, a); } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/raw_hash_map.h000066400000000000000000000167301430371345100227030ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ #define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ #include #include #include #include "absl/base/internal/throw_delegate.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template class raw_hash_map : public raw_hash_set { // P is Policy. It's passed as a template argument to support maps that have // incomplete types as values, as in unordered_map. // MappedReference<> may be a non-reference type. template using MappedReference = decltype(P::value( std::addressof(std::declval()))); // MappedConstReference<> may be a non-reference type. template using MappedConstReference = decltype(P::value( std::addressof(std::declval()))); using KeyArgImpl = KeyArg::value && IsTransparent::value>; public: using key_type = typename Policy::key_type; using mapped_type = typename Policy::mapped_type; template using key_arg = typename KeyArgImpl::template type; static_assert(!std::is_reference::value, ""); // TODO(b/187807849): Evaluate whether to support reference mapped_type and // remove this assertion if/when it is supported. static_assert(!std::is_reference::value, ""); using iterator = typename raw_hash_map::raw_hash_set::iterator; using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator; raw_hash_map() {} using raw_hash_map::raw_hash_set::raw_hash_set; // The last two template parameters ensure that both arguments are rvalues // (lvalue arguments are handled by the overloads below). This is necessary // for supporting bitfield arguments. // // union { int n : 1; }; // flat_hash_map m; // m.insert_or_assign(n, n); template std::pair insert_or_assign(key_arg&& k, V&& v) { return insert_or_assign_impl(std::forward(k), std::forward(v)); } template std::pair insert_or_assign(key_arg&& k, const V& v) { return insert_or_assign_impl(std::forward(k), v); } template std::pair insert_or_assign(const key_arg& k, V&& v) { return insert_or_assign_impl(k, std::forward(v)); } template std::pair insert_or_assign(const key_arg& k, const V& v) { return insert_or_assign_impl(k, v); } template iterator insert_or_assign(const_iterator, key_arg&& k, V&& v) { return insert_or_assign(std::forward(k), std::forward(v)).first; } template iterator insert_or_assign(const_iterator, key_arg&& k, const V& v) { return insert_or_assign(std::forward(k), v).first; } template iterator insert_or_assign(const_iterator, const key_arg& k, V&& v) { return insert_or_assign(k, std::forward(v)).first; } template iterator insert_or_assign(const_iterator, const key_arg& k, const V& v) { return insert_or_assign(k, v).first; } // All `try_emplace()` overloads make the same guarantees regarding rvalue // arguments as `std::unordered_map::try_emplace()`, namely that these // functions will not move from rvalue arguments if insertions do not happen. template ::value, int>::type = 0, K* = nullptr> std::pair try_emplace(key_arg&& k, Args&&... args) { return try_emplace_impl(std::forward(k), std::forward(args)...); } template ::value, int>::type = 0> std::pair try_emplace(const key_arg& k, Args&&... args) { return try_emplace_impl(k, std::forward(args)...); } template iterator try_emplace(const_iterator, key_arg&& k, Args&&... args) { return try_emplace(std::forward(k), std::forward(args)...).first; } template iterator try_emplace(const_iterator, const key_arg& k, Args&&... args) { return try_emplace(k, std::forward(args)...).first; } template MappedReference

at(const key_arg& key) { auto it = this->find(key); if (it == this->end()) { base_internal::ThrowStdOutOfRange( "absl::container_internal::raw_hash_map<>::at"); } return Policy::value(&*it); } template MappedConstReference

at(const key_arg& key) const { auto it = this->find(key); if (it == this->end()) { base_internal::ThrowStdOutOfRange( "absl::container_internal::raw_hash_map<>::at"); } return Policy::value(&*it); } template MappedReference

operator[](key_arg&& key) { return Policy::value(&*try_emplace(std::forward(key)).first); } template MappedReference

operator[](const key_arg& key) { return Policy::value(&*try_emplace(key).first); } private: template std::pair insert_or_assign_impl(K&& k, V&& v) { auto res = this->find_or_prepare_insert(k); if (res.second) this->emplace_at(res.first, std::forward(k), std::forward(v)); else Policy::value(&*this->iterator_at(res.first)) = std::forward(v); return {this->iterator_at(res.first), res.second}; } template std::pair try_emplace_impl(K&& k, Args&&... args) { auto res = this->find_or_prepare_insert(k); if (res.second) this->emplace_at(res.first, std::piecewise_construct, std::forward_as_tuple(std::forward(k)), std::forward_as_tuple(std::forward(args)...)); return {this->iterator_at(res.first), res.second}; } }; } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_ abseil-20220623.1/absl/container/internal/raw_hash_set.cc000066400000000000000000000051611430371345100230530ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/raw_hash_set.h" #include #include #include "absl/base/config.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { // A single block of empty control bytes for tables without any slots allocated. // This enables removing a branch in the hot path of find(). alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = { ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty}; #ifdef ABSL_INTERNAL_NEED_REDUNDANT_CONSTEXPR_DECL constexpr size_t Group::kWidth; #endif // Returns "random" seed. inline size_t RandomSeed() { #ifdef ABSL_HAVE_THREAD_LOCAL static thread_local size_t counter = 0; size_t value = ++counter; #else // ABSL_HAVE_THREAD_LOCAL static std::atomic counter(0); size_t value = counter.fetch_add(1, std::memory_order_relaxed); #endif // ABSL_HAVE_THREAD_LOCAL return value ^ static_cast(reinterpret_cast(&counter)); } bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) { // To avoid problems with weak hashes and single bit tests, we use % 13. // TODO(kfm,sbenza): revisit after we do unconditional mixing return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6; } void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) { assert(ctrl[capacity] == ctrl_t::kSentinel); assert(IsValidCapacity(capacity)); for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) { Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos); } // Copy the cloned ctrl bytes. std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes()); ctrl[capacity] = ctrl_t::kSentinel; } // Extern template instantiotion for inline function. template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/raw_hash_set.h000066400000000000000000002634251430371345100227260ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // An open-addressing // hashtable with quadratic probing. // // This is a low level hashtable on top of which different interfaces can be // implemented, like flat_hash_set, node_hash_set, string_hash_set, etc. // // The table interface is similar to that of std::unordered_set. Notable // differences are that most member functions support heterogeneous keys when // BOTH the hash and eq functions are marked as transparent. They do so by // providing a typedef called `is_transparent`. // // When heterogeneous lookup is enabled, functions that take key_type act as if // they have an overload set like: // // iterator find(const key_type& key); // template // iterator find(const K& key); // // size_type erase(const key_type& key); // template // size_type erase(const K& key); // // std::pair equal_range(const key_type& key); // template // std::pair equal_range(const K& key); // // When heterogeneous lookup is disabled, only the explicit `key_type` overloads // exist. // // find() also supports passing the hash explicitly: // // iterator find(const key_type& key, size_t hash); // template // iterator find(const U& key, size_t hash); // // In addition the pointer to element and iterator stability guarantees are // weaker: all iterators and pointers are invalidated after a new element is // inserted. // // IMPLEMENTATION DETAILS // // # Table Layout // // A raw_hash_set's backing array consists of control bytes followed by slots // that may or may not contain objects. // // The layout of the backing array, for `capacity` slots, is thus, as a // pseudo-struct: // // struct BackingArray { // // Control bytes for the "real" slots. // ctrl_t ctrl[capacity]; // // Always `ctrl_t::kSentinel`. This is used by iterators to find when to // // stop and serves no other purpose. // ctrl_t sentinel; // // A copy of the first `kWidth - 1` elements of `ctrl`. This is used so // // that if a probe sequence picks a value near the end of `ctrl`, // // `Group` will have valid control bytes to look at. // ctrl_t clones[kWidth - 1]; // // The actual slot data. // slot_type slots[capacity]; // }; // // The length of this array is computed by `AllocSize()` below. // // Control bytes (`ctrl_t`) are bytes (collected into groups of a // platform-specific size) that define the state of the corresponding slot in // the slot array. Group manipulation is tightly optimized to be as efficient // as possible: SSE and friends on x86, clever bit operations on other arches. // // Group 1 Group 2 Group 3 // +---------------+---------------+---------------+ // | | | | | | | | | | | | | | | | | | | | | | | | | // +---------------+---------------+---------------+ // // Each control byte is either a special value for empty slots, deleted slots // (sometimes called *tombstones*), and a special end-of-table marker used by // iterators, or, if occupied, seven bits (H2) from the hash of the value in the // corresponding slot. // // Storing control bytes in a separate array also has beneficial cache effects, // since more logical slots will fit into a cache line. // // # Hashing // // We compute two separate hashes, `H1` and `H2`, from the hash of an object. // `H1(hash(x))` is an index into `slots`, and essentially the starting point // for the probe sequence. `H2(hash(x))` is a 7-bit value used to filter out // objects that cannot possibly be the one we are looking for. // // # Table operations. // // The key operations are `insert`, `find`, and `erase`. // // Since `insert` and `erase` are implemented in terms of `find`, we describe // `find` first. To `find` a value `x`, we compute `hash(x)`. From // `H1(hash(x))` and the capacity, we construct a `probe_seq` that visits every // group of slots in some interesting order. // // We now walk through these indices. At each index, we select the entire group // starting with that index and extract potential candidates: occupied slots // with a control byte equal to `H2(hash(x))`. If we find an empty slot in the // group, we stop and return an error. Each candidate slot `y` is compared with // `x`; if `x == y`, we are done and return `&y`; otherwise we contine to the // next probe index. Tombstones effectively behave like full slots that never // match the value we're looking for. // // The `H2` bits ensure when we compare a slot to an object with `==`, we are // likely to have actually found the object. That is, the chance is low that // `==` is called and returns `false`. Thus, when we search for an object, we // are unlikely to call `==` many times. This likelyhood can be analyzed as // follows (assuming that H2 is a random enough hash function). // // Let's assume that there are `k` "wrong" objects that must be examined in a // probe sequence. For example, when doing a `find` on an object that is in the // table, `k` is the number of objects between the start of the probe sequence // and the final found object (not including the final found object). The // expected number of objects with an H2 match is then `k/128`. Measurements // and analysis indicate that even at high load factors, `k` is less than 32, // meaning that the number of "false positive" comparisons we must perform is // less than 1/8 per `find`. // `insert` is implemented in terms of `unchecked_insert`, which inserts a // value presumed to not be in the table (violating this requirement will cause // the table to behave erratically). Given `x` and its hash `hash(x)`, to insert // it, we construct a `probe_seq` once again, and use it to find the first // group with an unoccupied (empty *or* deleted) slot. We place `x` into the // first such slot in the group and mark it as full with `x`'s H2. // // To `insert`, we compose `unchecked_insert` with `find`. We compute `h(x)` and // perform a `find` to see if it's already present; if it is, we're done. If // it's not, we may decide the table is getting overcrowded (i.e. the load // factor is greater than 7/8 for big tables; `is_small()` tables use a max load // factor of 1); in this case, we allocate a bigger array, `unchecked_insert` // each element of the table into the new array (we know that no insertion here // will insert an already-present value), and discard the old backing array. At // this point, we may `unchecked_insert` the value `x`. // // Below, `unchecked_insert` is partly implemented by `prepare_insert`, which // presents a viable, initialized slot pointee to the caller. // // `erase` is implemented in terms of `erase_at`, which takes an index to a // slot. Given an offset, we simply create a tombstone and destroy its contents. // If we can prove that the slot would not appear in a probe sequence, we can // make the slot as empty, instead. We can prove this by observing that if a // group has any empty slots, it has never been full (assuming we never create // an empty slot in a group with no empties, which this heuristic guarantees we // never do) and find would stop at this group anyways (since it does not probe // beyond groups with empties). // // `erase` is `erase_at` composed with `find`: if we // have a value `x`, we can perform a `find`, and then `erase_at` the resulting // slot. // // To iterate, we simply traverse the array, skipping empty and deleted slots // and stopping when we hit a `kSentinel`. #ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ #define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ #include #include #include #include #include #include #include #include #include #include #include "absl/base/config.h" #include "absl/base/internal/endian.h" #include "absl/base/internal/prefetch.h" #include "absl/base/optimization.h" #include "absl/base/port.h" #include "absl/container/internal/common.h" #include "absl/container/internal/compressed_tuple.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_policy_traits.h" #include "absl/container/internal/hashtable_debug_hooks.h" #include "absl/container/internal/hashtablez_sampler.h" #include "absl/memory/memory.h" #include "absl/meta/type_traits.h" #include "absl/numeric/bits.h" #include "absl/utility/utility.h" #ifdef ABSL_INTERNAL_HAVE_SSE2 #include #endif #ifdef ABSL_INTERNAL_HAVE_SSSE3 #include #endif #ifdef _MSC_VER #include #endif #ifdef ABSL_INTERNAL_HAVE_ARM_NEON #include #endif namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { template void SwapAlloc(AllocType& lhs, AllocType& rhs, std::true_type /* propagate_on_container_swap */) { using std::swap; swap(lhs, rhs); } template void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/, std::false_type /* propagate_on_container_swap */) {} // The state for a probe sequence. // // Currently, the sequence is a triangular progression of the form // // p(i) := Width * (i^2 + i)/2 + hash (mod mask + 1) // // The use of `Width` ensures that each probe step does not overlap groups; // the sequence effectively outputs the addresses of *groups* (although not // necessarily aligned to any boundary). The `Group` machinery allows us // to check an entire group with minimal branching. // // Wrapping around at `mask + 1` is important, but not for the obvious reason. // As described above, the first few entries of the control byte array // are mirrored at the end of the array, which `Group` will find and use // for selecting candidates. However, when those candidates' slots are // actually inspected, there are no corresponding slots for the cloned bytes, // so we need to make sure we've treated those offsets as "wrapping around". // // It turns out that this probe sequence visits every group exactly once if the // number of groups is a power of two, since (i^2+i)/2 is a bijection in // Z/(2^m). See https://en.wikipedia.org/wiki/Quadratic_probing template class probe_seq { public: // Creates a new probe sequence using `hash` as the initial value of the // sequence and `mask` (usually the capacity of the table) as the mask to // apply to each value in the progression. probe_seq(size_t hash, size_t mask) { assert(((mask + 1) & mask) == 0 && "not a mask"); mask_ = mask; offset_ = hash & mask_; } // The offset within the table, i.e., the value `p(i)` above. size_t offset() const { return offset_; } size_t offset(size_t i) const { return (offset_ + i) & mask_; } void next() { index_ += Width; offset_ += index_; offset_ &= mask_; } // 0-based probe index, a multiple of `Width`. size_t index() const { return index_; } private: size_t mask_; size_t offset_; size_t index_ = 0; }; template struct RequireUsableKey { template std::pair< decltype(std::declval()(std::declval())), decltype(std::declval()(std::declval(), std::declval()))>* operator()(const PassedKey&, const Args&...) const; }; template struct IsDecomposable : std::false_type {}; template struct IsDecomposable< absl::void_t(), std::declval()...))>, Policy, Hash, Eq, Ts...> : std::true_type {}; // TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it. template constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) { using std::swap; return noexcept(swap(std::declval(), std::declval())); } template constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) { return false; } template uint32_t TrailingZeros(T x) { ABSL_ASSUME(x != 0); return static_cast(countr_zero(x)); } // An abstract bitmask, such as that emitted by a SIMD instruction. // // Specifically, this type implements a simple bitset whose representation is // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number // of abstract bits in the bitset, while `Shift` is the log-base-two of the // width of an abstract bit in the representation. // This mask provides operations for any number of real bits set in an abstract // bit. To add iteration on top of that, implementation must guarantee no more // than one real bit is set in an abstract bit. template class NonIterableBitMask { public: explicit NonIterableBitMask(T mask) : mask_(mask) {} explicit operator bool() const { return this->mask_ != 0; } // Returns the index of the lowest *abstract* bit set in `self`. uint32_t LowestBitSet() const { return container_internal::TrailingZeros(mask_) >> Shift; } // Returns the index of the highest *abstract* bit set in `self`. uint32_t HighestBitSet() const { return static_cast((bit_width(mask_) - 1) >> Shift); } // Return the number of trailing zero *abstract* bits. uint32_t TrailingZeros() const { return container_internal::TrailingZeros(mask_) >> Shift; } // Return the number of leading zero *abstract* bits. uint32_t LeadingZeros() const { constexpr int total_significant_bits = SignificantBits << Shift; constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits; return static_cast(countl_zero(mask_ << extra_bits)) >> Shift; } T mask_; }; // Mask that can be iterable // // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask. // // For example: // for (int i : BitMask(0b101)) -> yields 0, 2 // for (int i : BitMask(0x0000000080800000)) -> yields 2, 3 template class BitMask : public NonIterableBitMask { using Base = NonIterableBitMask; static_assert(std::is_unsigned::value, ""); static_assert(Shift == 0 || Shift == 3, ""); public: explicit BitMask(T mask) : Base(mask) {} // BitMask is an iterator over the indices of its abstract bits. using value_type = int; using iterator = BitMask; using const_iterator = BitMask; BitMask& operator++() { this->mask_ &= (this->mask_ - 1); return *this; } uint32_t operator*() const { return Base::LowestBitSet(); } BitMask begin() const { return *this; } BitMask end() const { return BitMask(0); } private: friend bool operator==(const BitMask& a, const BitMask& b) { return a.mask_ == b.mask_; } friend bool operator!=(const BitMask& a, const BitMask& b) { return a.mask_ != b.mask_; } }; using h2_t = uint8_t; // The values here are selected for maximum performance. See the static asserts // below for details. // A `ctrl_t` is a single control byte, which can have one of four // states: empty, deleted, full (which has an associated seven-bit h2_t value) // and the sentinel. They have the following bit patterns: // // empty: 1 0 0 0 0 0 0 0 // deleted: 1 1 1 1 1 1 1 0 // full: 0 h h h h h h h // h represents the hash bits. // sentinel: 1 1 1 1 1 1 1 1 // // These values are specifically tuned for SSE-flavored SIMD. // The static_asserts below detail the source of these choices. // // We use an enum class so that when strict aliasing is enabled, the compiler // knows ctrl_t doesn't alias other types. enum class ctrl_t : int8_t { kEmpty = -128, // 0b10000000 kDeleted = -2, // 0b11111110 kSentinel = -1, // 0b11111111 }; static_assert( (static_cast(ctrl_t::kEmpty) & static_cast(ctrl_t::kDeleted) & static_cast(ctrl_t::kSentinel) & 0x80) != 0, "Special markers need to have the MSB to make checking for them efficient"); static_assert( ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel, "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than " "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient"); static_assert( ctrl_t::kSentinel == static_cast(-1), "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD " "registers (pcmpeqd xmm, xmm)"); static_assert(ctrl_t::kEmpty == static_cast(-128), "ctrl_t::kEmpty must be -128 to make the SIMD check for its " "existence efficient (psignb xmm, xmm)"); static_assert( (~static_cast(ctrl_t::kEmpty) & ~static_cast(ctrl_t::kDeleted) & static_cast(ctrl_t::kSentinel) & 0x7F) != 0, "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not " "shared by ctrl_t::kSentinel to make the scalar test for " "MaskEmptyOrDeleted() efficient"); static_assert(ctrl_t::kDeleted == static_cast(-2), "ctrl_t::kDeleted must be -2 to make the implementation of " "ConvertSpecialToEmptyAndFullToDeleted efficient"); ABSL_DLL extern const ctrl_t kEmptyGroup[16]; // Returns a pointer to a control byte group that can be used by empty tables. inline ctrl_t* EmptyGroup() { // Const must be cast away here; no uses of this function will actually write // to it, because it is only used for empty tables. return const_cast(kEmptyGroup); } // Mixes a randomly generated per-process seed with `hash` and `ctrl` to // randomize insertion order within groups. bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl); // Returns a per-table, hash salt, which changes on resize. This gets mixed into // H1 to randomize iteration order per-table. // // The seed consists of the ctrl_ pointer, which adds enough entropy to ensure // non-determinism of iteration order in most cases. inline size_t PerTableSalt(const ctrl_t* ctrl) { // The low bits of the pointer have little or no entropy because of // alignment. We shift the pointer to try to use higher entropy bits. A // good number seems to be 12 bits, because that aligns with page size. return reinterpret_cast(ctrl) >> 12; } // Extracts the H1 portion of a hash: 57 bits mixed with a per-table salt. inline size_t H1(size_t hash, const ctrl_t* ctrl) { return (hash >> 7) ^ PerTableSalt(ctrl); } // Extracts the H2 portion of a hash: the 7 bits not used for H1. // // These are used as an occupied control byte. inline h2_t H2(size_t hash) { return hash & 0x7F; } // Helpers for checking the state of a control byte. inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; } inline bool IsFull(ctrl_t c) { return c >= static_cast(0); } inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; } inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; } #ifdef ABSL_INTERNAL_HAVE_SSE2 // Quick reference guide for intrinsics used below: // // * __m128i: An XMM (128-bit) word. // // * _mm_setzero_si128: Returns a zero vector. // * _mm_set1_epi8: Returns a vector with the same i8 in each lane. // // * _mm_subs_epi8: Saturating-subtracts two i8 vectors. // * _mm_and_si128: Ands two i128s together. // * _mm_or_si128: Ors two i128s together. // * _mm_andnot_si128: And-nots two i128s together. // // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality, // filling each lane with 0x00 or 0xff. // * _mm_cmpgt_epi8: Same as above, but using > rather than ==. // // * _mm_loadu_si128: Performs an unaligned load of an i128. // * _mm_storeu_si128: Performs an unaligned store of an i128. // // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first // argument if the corresponding lane of the second // argument is positive, negative, or zero, respectively. // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a // bitmask consisting of those bits. // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low // four bits of each i8 lane in the second argument as // indices. // https://github.com/abseil/abseil-cpp/issues/209 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char // Work around this by using the portable implementation of Group // when using -funsigned-char under GCC. inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) { #if defined(__GNUC__) && !defined(__clang__) if (std::is_unsigned::value) { const __m128i mask = _mm_set1_epi8(0x80); const __m128i diff = _mm_subs_epi8(b, a); return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask); } #endif return _mm_cmpgt_epi8(a, b); } struct GroupSse2Impl { static constexpr size_t kWidth = 16; // the number of slots per group explicit GroupSse2Impl(const ctrl_t* pos) { ctrl = _mm_loadu_si128(reinterpret_cast(pos)); } // Returns a bitmask representing the positions of slots that match hash. BitMask Match(h2_t hash) const { auto match = _mm_set1_epi8(hash); return BitMask( static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); } // Returns a bitmask representing the positions of empty slots. NonIterableBitMask MaskEmpty() const { #ifdef ABSL_INTERNAL_HAVE_SSSE3 // This only works because ctrl_t::kEmpty is -128. return NonIterableBitMask( static_cast(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)))); #else auto match = _mm_set1_epi8(static_cast(ctrl_t::kEmpty)); return NonIterableBitMask( static_cast(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)))); #endif } // Returns a bitmask representing the positions of empty or deleted slots. NonIterableBitMask MaskEmptyOrDeleted() const { auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); return NonIterableBitMask(static_cast( _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)))); } // Returns the number of trailing empty or deleted elements in the group. uint32_t CountLeadingEmptyOrDeleted() const { auto special = _mm_set1_epi8(static_cast(ctrl_t::kSentinel)); return TrailingZeros(static_cast( _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1)); } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { auto msbs = _mm_set1_epi8(static_cast(-128)); auto x126 = _mm_set1_epi8(126); #ifdef ABSL_INTERNAL_HAVE_SSSE3 auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs); #else auto zero = _mm_setzero_si128(); auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl); auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126)); #endif _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res); } __m128i ctrl; }; #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) struct GroupAArch64Impl { static constexpr size_t kWidth = 8; explicit GroupAArch64Impl(const ctrl_t* pos) { ctrl = vld1_u8(reinterpret_cast(pos)); } BitMask Match(h2_t hash) const { uint8x8_t dup = vdup_n_u8(hash); auto mask = vceq_u8(ctrl, dup); constexpr uint64_t msbs = 0x8080808080808080ULL; return BitMask( vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs); } NonIterableBitMask MaskEmpty() const { uint64_t mask = vget_lane_u64(vreinterpret_u64_u8( vceq_s8(vdup_n_s8(static_cast(ctrl_t::kEmpty)), vreinterpret_s8_u8(ctrl))), 0); return NonIterableBitMask(mask); } NonIterableBitMask MaskEmptyOrDeleted() const { uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(vcgt_s8( vdup_n_s8(static_cast(ctrl_t::kSentinel)), vreinterpret_s8_u8(ctrl))), 0); return NonIterableBitMask(mask); } uint32_t CountLeadingEmptyOrDeleted() const { uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and // kDeleted. We lower all other bits and count number of trailing zeros. // Clang and GCC optimize countr_zero to rbit+clz without any check for 0, // so we should be fine. constexpr uint64_t bits = 0x0101010101010101ULL; return countr_zero((mask | ~(mask >> 7)) & bits) >> 3; } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0); constexpr uint64_t msbs = 0x8080808080808080ULL; constexpr uint64_t lsbs = 0x0101010101010101ULL; auto x = mask & msbs; auto res = (~x + (x >> 7)) & ~lsbs; little_endian::Store64(dst, res); } uint8x8_t ctrl; }; #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN struct GroupPortableImpl { static constexpr size_t kWidth = 8; explicit GroupPortableImpl(const ctrl_t* pos) : ctrl(little_endian::Load64(pos)) {} BitMask Match(h2_t hash) const { // For the technique, see: // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord // (Determine if a word has a byte equal to n). // // Caveat: there are false positives but: // - they only occur if there is a real match // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel // - they will be handled gracefully by subsequent checks in code // // Example: // v = 0x1716151413121110 // hash = 0x12 // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000 constexpr uint64_t msbs = 0x8080808080808080ULL; constexpr uint64_t lsbs = 0x0101010101010101ULL; auto x = ctrl ^ (lsbs * hash); return BitMask((x - lsbs) & ~x & msbs); } NonIterableBitMask MaskEmpty() const { constexpr uint64_t msbs = 0x8080808080808080ULL; return NonIterableBitMask((ctrl & (~ctrl << 6)) & msbs); } NonIterableBitMask MaskEmptyOrDeleted() const { constexpr uint64_t msbs = 0x8080808080808080ULL; return NonIterableBitMask((ctrl & (~ctrl << 7)) & msbs); } uint32_t CountLeadingEmptyOrDeleted() const { // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and // kDeleted. We lower all other bits and count number of trailing zeros. constexpr uint64_t bits = 0x0101010101010101ULL; return countr_zero((ctrl | ~(ctrl >> 7)) & bits) >> 3; } void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const { constexpr uint64_t msbs = 0x8080808080808080ULL; constexpr uint64_t lsbs = 0x0101010101010101ULL; auto x = ctrl & msbs; auto res = (~x + (x >> 7)) & ~lsbs; little_endian::Store64(dst, res); } uint64_t ctrl; }; #ifdef ABSL_INTERNAL_HAVE_SSE2 using Group = GroupSse2Impl; #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN) using Group = GroupAArch64Impl; #else using Group = GroupPortableImpl; #endif // Returns he number of "cloned control bytes". // // This is the number of control bytes that are present both at the beginning // of the control byte array and at the end, such that we can create a // `Group::kWidth`-width probe window starting from any control byte. constexpr size_t NumClonedBytes() { return Group::kWidth - 1; } template class raw_hash_set; // Returns whether `n` is a valid capacity (i.e., number of slots). // // A valid capacity is a non-zero integer `2^m - 1`. inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; } // Applies the following mapping to every byte in the control array: // * kDeleted -> kEmpty // * kEmpty -> kEmpty // * _ -> kDeleted // PRECONDITION: // IsValidCapacity(capacity) // ctrl[capacity] == ctrl_t::kSentinel // ctrl[i] != ctrl_t::kSentinel for all i < capacity void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity); // Converts `n` into the next valid capacity, per `IsValidCapacity`. inline size_t NormalizeCapacity(size_t n) { return n ? ~size_t{} >> countl_zero(n) : 1; } // General notes on capacity/growth methods below: // - We use 7/8th as maximum load factor. For 16-wide groups, that gives an // average of two empty slots per group. // - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity. // - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we // never need to probe (the whole table fits in one group) so we don't need a // load factor less than 1. // Given `capacity`, applies the load factor; i.e., it returns the maximum // number of values we should put into the table before a resizing rehash. inline size_t CapacityToGrowth(size_t capacity) { assert(IsValidCapacity(capacity)); // `capacity*7/8` if (Group::kWidth == 8 && capacity == 7) { // x-x/8 does not work when x==7. return 6; } return capacity - capacity / 8; } // Given `growth`, "unapplies" the load factor to find how large the capacity // should be to stay within the load factor. // // This might not be a valid capacity and `NormalizeCapacity()` should be // called on this. inline size_t GrowthToLowerboundCapacity(size_t growth) { // `growth*8/7` if (Group::kWidth == 8 && growth == 7) { // x+(x-1)/7 does not work when x==7. return 8; } return growth + static_cast((static_cast(growth) - 1) / 7); } template size_t SelectBucketCountForIterRange(InputIter first, InputIter last, size_t bucket_count) { if (bucket_count != 0) { return bucket_count; } using InputIterCategory = typename std::iterator_traits::iterator_category; if (std::is_base_of::value) { return GrowthToLowerboundCapacity( static_cast(std::distance(first, last))); } return 0; } #define ABSL_INTERNAL_ASSERT_IS_FULL(ctrl, msg) \ ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) && msg) inline void AssertIsValid(ctrl_t* ctrl) { ABSL_HARDENING_ASSERT( (ctrl == nullptr || IsFull(*ctrl)) && "Invalid operation on iterator. The element might have " "been erased, the table might have rehashed, or this may " "be an end() iterator."); } struct FindInfo { size_t offset; size_t probe_length; }; // Whether a table is "small". A small table fits entirely into a probing // group, i.e., has a capacity < `Group::kWidth`. // // In small mode we are able to use the whole capacity. The extra control // bytes give us at least one "empty" control byte to stop the iteration. // This is important to make 1 a valid capacity. // // In small mode only the first `capacity` control bytes after the sentinel // are valid. The rest contain dummy ctrl_t::kEmpty values that do not // represent a real slot. This is important to take into account on // `find_first_non_full()`, where we never try // `ShouldInsertBackwards()` for small tables. inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; } // Begins a probing operation on `ctrl`, using `hash`. inline probe_seq probe(const ctrl_t* ctrl, size_t hash, size_t capacity) { return probe_seq(H1(hash, ctrl), capacity); } // Probes an array of control bits using a probe sequence derived from `hash`, // and returns the offset corresponding to the first deleted or empty slot. // // Behavior when the entire table is full is undefined. // // NOTE: this function must work with tables having both empty and deleted // slots in the same group. Such tables appear during `erase()`. template inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash, size_t capacity) { auto seq = probe(ctrl, hash, capacity); while (true) { Group g{ctrl + seq.offset()}; auto mask = g.MaskEmptyOrDeleted(); if (mask) { #if !defined(NDEBUG) // We want to add entropy even when ASLR is not enabled. // In debug build we will randomly insert in either the front or back of // the group. // TODO(kfm,sbenza): revisit after we do unconditional mixing if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) { return {seq.offset(mask.HighestBitSet()), seq.index()}; } #endif return {seq.offset(mask.LowestBitSet()), seq.index()}; } seq.next(); assert(seq.index() <= capacity && "full table!"); } } // Extern template for inline function keep possibility of inlining. // When compiler decided to not inline, no symbols will be added to the // corresponding translation unit. extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t); // Sets `ctrl` to `{kEmpty, kSentinel, ..., kEmpty}`, marking the entire // array as marked as empty. inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) { std::memset(ctrl, static_cast(ctrl_t::kEmpty), capacity + 1 + NumClonedBytes()); ctrl[capacity] = ctrl_t::kSentinel; SanitizerPoisonMemoryRegion(slot, slot_size * capacity); } // Sets `ctrl[i]` to `h`. // // Unlike setting it directly, this function will perform bounds checks and // mirror the value to the cloned tail if necessary. inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) { assert(i < capacity); auto* slot_i = static_cast(slot) + i * slot_size; if (IsFull(h)) { SanitizerUnpoisonMemoryRegion(slot_i, slot_size); } else { SanitizerPoisonMemoryRegion(slot_i, slot_size); } ctrl[i] = h; ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h; } // Overload for setting to an occupied `h2_t` rather than a special `ctrl_t`. inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl, const void* slot, size_t slot_size) { SetCtrl(i, static_cast(h), capacity, ctrl, slot, slot_size); } // Given the capacity of a table, computes the offset (from the start of the // backing allocation) at which the slots begin. inline size_t SlotOffset(size_t capacity, size_t slot_align) { assert(IsValidCapacity(capacity)); const size_t num_control_bytes = capacity + 1 + NumClonedBytes(); return (num_control_bytes + slot_align - 1) & (~slot_align + 1); } // Given the capacity of a table, computes the total size of the backing // array. inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) { return SlotOffset(capacity, slot_align) + capacity * slot_size; } // A SwissTable. // // Policy: a policy defines how to perform different operations on // the slots of the hashtable (see hash_policy_traits.h for the full interface // of policy). // // Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The // functor should accept a key and return size_t as hash. For best performance // it is important that the hash function provides high entropy across all bits // of the hash. // // Eq: a (possibly polymorphic) functor that compares two keys for equality. It // should accept two (of possibly different type) keys and return a bool: true // if they are equal, false if they are not. If two keys compare equal, then // their hash values as defined by Hash MUST be equal. // // Allocator: an Allocator // [https://en.cppreference.com/w/cpp/named_req/Allocator] with which // the storage of the hashtable will be allocated and the elements will be // constructed and destroyed. template class raw_hash_set { using PolicyTraits = hash_policy_traits; using KeyArgImpl = KeyArg::value && IsTransparent::value>; public: using init_type = typename PolicyTraits::init_type; using key_type = typename PolicyTraits::key_type; // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user // code fixes! using slot_type = typename PolicyTraits::slot_type; using allocator_type = Alloc; using size_type = size_t; using difference_type = ptrdiff_t; using hasher = Hash; using key_equal = Eq; using policy_type = Policy; using value_type = typename PolicyTraits::value_type; using reference = value_type&; using const_reference = const value_type&; using pointer = typename absl::allocator_traits< allocator_type>::template rebind_traits::pointer; using const_pointer = typename absl::allocator_traits< allocator_type>::template rebind_traits::const_pointer; // Alias used for heterogeneous lookup functions. // `key_arg` evaluates to `K` when the functors are transparent and to // `key_type` otherwise. It permits template argument deduction on `K` for the // transparent case. template using key_arg = typename KeyArgImpl::template type; private: // Give an early error when key_type is not hashable/eq. auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k)); auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k)); using AllocTraits = absl::allocator_traits; using SlotAlloc = typename absl::allocator_traits< allocator_type>::template rebind_alloc; using SlotAllocTraits = typename absl::allocator_traits< allocator_type>::template rebind_traits; static_assert(std::is_lvalue_reference::value, "Policy::element() must return a reference"); template struct SameAsElementReference : std::is_same::type>::type, typename std::remove_cv< typename std::remove_reference::type>::type> {}; // An enabler for insert(T&&): T must be convertible to init_type or be the // same as [cv] value_type [ref]. // Note: we separate SameAsElementReference into its own type to avoid using // reference unless we need to. MSVC doesn't seem to like it in some // cases. template using RequiresInsertable = typename std::enable_if< absl::disjunction, SameAsElementReference>::value, int>::type; // RequiresNotInit is a workaround for gcc prior to 7.1. // See https://godbolt.org/g/Y4xsUh. template using RequiresNotInit = typename std::enable_if::value, int>::type; template using IsDecomposable = IsDecomposable; public: static_assert(std::is_same::value, "Allocators with custom pointer types are not supported"); static_assert(std::is_same::value, "Allocators with custom pointer types are not supported"); class iterator { friend class raw_hash_set; public: using iterator_category = std::forward_iterator_tag; using value_type = typename raw_hash_set::value_type; using reference = absl::conditional_t; using pointer = absl::remove_reference_t*; using difference_type = typename raw_hash_set::difference_type; iterator() {} // PRECONDITION: not an end() iterator. reference operator*() const { ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator*() called on invalid iterator."); return PolicyTraits::element(slot_); } // PRECONDITION: not an end() iterator. pointer operator->() const { ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator-> called on invalid iterator."); return &operator*(); } // PRECONDITION: not an end() iterator. iterator& operator++() { ABSL_INTERNAL_ASSERT_IS_FULL(ctrl_, "operator++ called on invalid iterator."); ++ctrl_; ++slot_; skip_empty_or_deleted(); return *this; } // PRECONDITION: not an end() iterator. iterator operator++(int) { auto tmp = *this; ++*this; return tmp; } friend bool operator==(const iterator& a, const iterator& b) { AssertIsValid(a.ctrl_); AssertIsValid(b.ctrl_); return a.ctrl_ == b.ctrl_; } friend bool operator!=(const iterator& a, const iterator& b) { return !(a == b); } private: iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) { // This assumption helps the compiler know that any non-end iterator is // not equal to any end iterator. ABSL_ASSUME(ctrl != nullptr); } // Fixes up `ctrl_` to point to a full by advancing it and `slot_` until // they reach one. // // If a sentinel is reached, we null both of them out instead. void skip_empty_or_deleted() { while (IsEmptyOrDeleted(*ctrl_)) { uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted(); ctrl_ += shift; slot_ += shift; } if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr; } ctrl_t* ctrl_ = nullptr; // To avoid uninitialized member warnings, put slot_ in an anonymous union. // The member is not initialized on singleton and end iterators. union { slot_type* slot_; }; }; class const_iterator { friend class raw_hash_set; public: using iterator_category = typename iterator::iterator_category; using value_type = typename raw_hash_set::value_type; using reference = typename raw_hash_set::const_reference; using pointer = typename raw_hash_set::const_pointer; using difference_type = typename raw_hash_set::difference_type; const_iterator() {} // Implicit construction from iterator. const_iterator(iterator i) : inner_(std::move(i)) {} reference operator*() const { return *inner_; } pointer operator->() const { return inner_.operator->(); } const_iterator& operator++() { ++inner_; return *this; } const_iterator operator++(int) { return inner_++; } friend bool operator==(const const_iterator& a, const const_iterator& b) { return a.inner_ == b.inner_; } friend bool operator!=(const const_iterator& a, const const_iterator& b) { return !(a == b); } private: const_iterator(const ctrl_t* ctrl, const slot_type* slot) : inner_(const_cast(ctrl), const_cast(slot)) {} iterator inner_; }; using node_type = node_handle, Alloc>; using insert_return_type = InsertReturnType; raw_hash_set() noexcept( std::is_nothrow_default_constructible::value&& std::is_nothrow_default_constructible::value&& std::is_nothrow_default_constructible::value) {} explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : ctrl_(EmptyGroup()), settings_(0, HashtablezInfoHandle(), hash, eq, alloc) { if (bucket_count) { capacity_ = NormalizeCapacity(bucket_count); initialize_slots(); } } raw_hash_set(size_t bucket_count, const hasher& hash, const allocator_type& alloc) : raw_hash_set(bucket_count, hash, key_equal(), alloc) {} raw_hash_set(size_t bucket_count, const allocator_type& alloc) : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {} explicit raw_hash_set(const allocator_type& alloc) : raw_hash_set(0, hasher(), key_equal(), alloc) {} template raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count), hash, eq, alloc) { insert(first, last); } template raw_hash_set(InputIter first, InputIter last, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {} template raw_hash_set(InputIter first, InputIter last, size_t bucket_count, const allocator_type& alloc) : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {} template raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc) : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {} // Instead of accepting std::initializer_list as the first // argument like std::unordered_set does, we have two overloads // that accept std::initializer_list and std::initializer_list. // This is advantageous for performance. // // // Turns {"abc", "def"} into std::initializer_list, then // // copies the strings into the set. // std::unordered_set s = {"abc", "def"}; // // // Turns {"abc", "def"} into std::initializer_list, then // // copies the strings into the set. // absl::flat_hash_set s = {"abc", "def"}; // // The same trick is used in insert(). // // The enabler is necessary to prevent this constructor from triggering where // the copy constructor is meant to be called. // // absl::flat_hash_set a, b{a}; // // RequiresNotInit is a workaround for gcc prior to 7.1. template = 0, RequiresInsertable = 0> raw_hash_set(std::initializer_list init, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} raw_hash_set(std::initializer_list init, size_t bucket_count = 0, const hasher& hash = hasher(), const key_equal& eq = key_equal(), const allocator_type& alloc = allocator_type()) : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {} template = 0, RequiresInsertable = 0> raw_hash_set(std::initializer_list init, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} raw_hash_set(std::initializer_list init, size_t bucket_count, const hasher& hash, const allocator_type& alloc) : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {} template = 0, RequiresInsertable = 0> raw_hash_set(std::initializer_list init, size_t bucket_count, const allocator_type& alloc) : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} raw_hash_set(std::initializer_list init, size_t bucket_count, const allocator_type& alloc) : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {} template = 0, RequiresInsertable = 0> raw_hash_set(std::initializer_list init, const allocator_type& alloc) : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} raw_hash_set(std::initializer_list init, const allocator_type& alloc) : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {} raw_hash_set(const raw_hash_set& that) : raw_hash_set(that, AllocTraits::select_on_container_copy_construction( that.alloc_ref())) {} raw_hash_set(const raw_hash_set& that, const allocator_type& a) : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) { reserve(that.size()); // Because the table is guaranteed to be empty, we can do something faster // than a full `insert`. for (const auto& v : that) { const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v); auto target = find_first_non_full(ctrl_, hash, capacity_); SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); emplace_at(target.offset, v); infoz().RecordInsert(hash, target.probe_length); } size_ = that.size(); growth_left() -= that.size(); } raw_hash_set(raw_hash_set&& that) noexcept( std::is_nothrow_copy_constructible::value&& std::is_nothrow_copy_constructible::value&& std::is_nothrow_copy_constructible::value) : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())), slots_(absl::exchange(that.slots_, nullptr)), size_(absl::exchange(that.size_, 0)), capacity_(absl::exchange(that.capacity_, 0)), // Hash, equality and allocator are copied instead of moved because // `that` must be left valid. If Hash is std::function, moving it // would create a nullptr functor that cannot be called. settings_(absl::exchange(that.growth_left(), 0), absl::exchange(that.infoz(), HashtablezInfoHandle()), that.hash_ref(), that.eq_ref(), that.alloc_ref()) {} raw_hash_set(raw_hash_set&& that, const allocator_type& a) : ctrl_(EmptyGroup()), slots_(nullptr), size_(0), capacity_(0), settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(), a) { if (a == that.alloc_ref()) { std::swap(ctrl_, that.ctrl_); std::swap(slots_, that.slots_); std::swap(size_, that.size_); std::swap(capacity_, that.capacity_); std::swap(growth_left(), that.growth_left()); std::swap(infoz(), that.infoz()); } else { reserve(that.size()); // Note: this will copy elements of dense_set and unordered_set instead of // moving them. This can be fixed if it ever becomes an issue. for (auto& elem : that) insert(std::move(elem)); } } raw_hash_set& operator=(const raw_hash_set& that) { raw_hash_set tmp(that, AllocTraits::propagate_on_container_copy_assignment::value ? that.alloc_ref() : alloc_ref()); swap(tmp); return *this; } raw_hash_set& operator=(raw_hash_set&& that) noexcept( absl::allocator_traits::is_always_equal::value&& std::is_nothrow_move_assignable::value&& std::is_nothrow_move_assignable::value) { // TODO(sbenza): We should only use the operations from the noexcept clause // to make sure we actually adhere to that contract. return move_assign( std::move(that), typename AllocTraits::propagate_on_container_move_assignment()); } ~raw_hash_set() { destroy_slots(); } iterator begin() { auto it = iterator_at(0); it.skip_empty_or_deleted(); return it; } iterator end() { return {}; } const_iterator begin() const { return const_cast(this)->begin(); } const_iterator end() const { return {}; } const_iterator cbegin() const { return begin(); } const_iterator cend() const { return end(); } bool empty() const { return !size(); } size_t size() const { return size_; } size_t capacity() const { return capacity_; } size_t max_size() const { return (std::numeric_limits::max)(); } ABSL_ATTRIBUTE_REINITIALIZES void clear() { // Iterating over this container is O(bucket_count()). When bucket_count() // is much greater than size(), iteration becomes prohibitively expensive. // For clear() it is more important to reuse the allocated array when the // container is small because allocation takes comparatively long time // compared to destruction of the elements of the container. So we pick the // largest bucket_count() threshold for which iteration is still fast and // past that we simply deallocate the array. if (capacity_ > 127) { destroy_slots(); infoz().RecordClearedReservation(); } else if (capacity_) { for (size_t i = 0; i != capacity_; ++i) { if (IsFull(ctrl_[i])) { PolicyTraits::destroy(&alloc_ref(), slots_ + i); } } size_ = 0; ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); reset_growth_left(); } assert(empty()); infoz().RecordStorageChanged(0, capacity_); } // This overload kicks in when the argument is an rvalue of insertable and // decomposable type other than init_type. // // flat_hash_map m; // m.insert(std::make_pair("abc", 42)); // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc // bug. template = 0, class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> std::pair insert(T&& value) { return emplace(std::forward(value)); } // This overload kicks in when the argument is a bitfield or an lvalue of // insertable and decomposable type. // // union { int n : 1; }; // flat_hash_set s; // s.insert(n); // // flat_hash_set s; // const char* p = "hello"; // s.insert(p); // // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace // RequiresInsertable with RequiresInsertable. // We are hitting this bug: https://godbolt.org/g/1Vht4f. template < class T, RequiresInsertable = 0, typename std::enable_if::value, int>::type = 0> std::pair insert(const T& value) { return emplace(value); } // This overload kicks in when the argument is an rvalue of init_type. Its // purpose is to handle brace-init-list arguments. // // flat_hash_map s; // s.insert({"abc", 42}); std::pair insert(init_type&& value) { return emplace(std::move(value)); } // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc // bug. template = 0, class T2 = T, typename std::enable_if::value, int>::type = 0, T* = nullptr> iterator insert(const_iterator, T&& value) { return insert(std::forward(value)).first; } // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace // RequiresInsertable with RequiresInsertable. // We are hitting this bug: https://godbolt.org/g/1Vht4f. template < class T, RequiresInsertable = 0, typename std::enable_if::value, int>::type = 0> iterator insert(const_iterator, const T& value) { return insert(value).first; } iterator insert(const_iterator, init_type&& value) { return insert(std::move(value)).first; } template void insert(InputIt first, InputIt last) { for (; first != last; ++first) emplace(*first); } template = 0, RequiresInsertable = 0> void insert(std::initializer_list ilist) { insert(ilist.begin(), ilist.end()); } void insert(std::initializer_list ilist) { insert(ilist.begin(), ilist.end()); } insert_return_type insert(node_type&& node) { if (!node) return {end(), false, node_type()}; const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node)); auto res = PolicyTraits::apply( InsertSlot{*this, std::move(*CommonAccess::GetSlot(node))}, elem); if (res.second) { CommonAccess::Reset(&node); return {res.first, true, node_type()}; } else { return {res.first, false, std::move(node)}; } } iterator insert(const_iterator, node_type&& node) { auto res = insert(std::move(node)); node = std::move(res.node); return res.position; } // This overload kicks in if we can deduce the key from args. This enables us // to avoid constructing value_type if an entry with the same key already // exists. // // For example: // // flat_hash_map m = {{"abc", "def"}}; // // Creates no std::string copies and makes no heap allocations. // m.emplace("abc", "xyz"); template ::value, int>::type = 0> std::pair emplace(Args&&... args) { return PolicyTraits::apply(EmplaceDecomposable{*this}, std::forward(args)...); } // This overload kicks in if we cannot deduce the key from args. It constructs // value_type unconditionally and then either moves it into the table or // destroys. template ::value, int>::type = 0> std::pair emplace(Args&&... args) { alignas(slot_type) unsigned char raw[sizeof(slot_type)]; slot_type* slot = reinterpret_cast(&raw); PolicyTraits::construct(&alloc_ref(), slot, std::forward(args)...); const auto& elem = PolicyTraits::element(slot); return PolicyTraits::apply(InsertSlot{*this, std::move(*slot)}, elem); } template iterator emplace_hint(const_iterator, Args&&... args) { return emplace(std::forward(args)...).first; } // Extension API: support for lazy emplace. // // Looks up key in the table. If found, returns the iterator to the element. // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`. // // `f` must abide by several restrictions: // - it MUST call `raw_hash_set::constructor` with arguments as if a // `raw_hash_set::value_type` is constructed, // - it MUST NOT access the container before the call to // `raw_hash_set::constructor`, and // - it MUST NOT erase the lazily emplaced element. // Doing any of these is undefined behavior. // // For example: // // std::unordered_set s; // // Makes ArenaStr even if "abc" is in the map. // s.insert(ArenaString(&arena, "abc")); // // flat_hash_set s; // // Makes ArenaStr only if "abc" is not in the map. // s.lazy_emplace("abc", [&](const constructor& ctor) { // ctor(&arena, "abc"); // }); // // WARNING: This API is currently experimental. If there is a way to implement // the same thing with the rest of the API, prefer that. class constructor { friend class raw_hash_set; public: template void operator()(Args&&... args) const { assert(*slot_); PolicyTraits::construct(alloc_, *slot_, std::forward(args)...); *slot_ = nullptr; } private: constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {} allocator_type* alloc_; slot_type** slot_; }; template iterator lazy_emplace(const key_arg& key, F&& f) { auto res = find_or_prepare_insert(key); if (res.second) { slot_type* slot = slots_ + res.first; std::forward(f)(constructor(&alloc_ref(), &slot)); assert(!slot); } return iterator_at(res.first); } // Extension API: support for heterogeneous keys. // // std::unordered_set s; // // Turns "abc" into std::string. // s.erase("abc"); // // flat_hash_set s; // // Uses "abc" directly without copying it into std::string. // s.erase("abc"); template size_type erase(const key_arg& key) { auto it = find(key); if (it == end()) return 0; erase(it); return 1; } // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`, // this method returns void to reduce algorithmic complexity to O(1). The // iterator is invalidated, so any increment should be done before calling // erase. In order to erase while iterating across a map, use the following // idiom (which also works for standard containers): // // for (auto it = m.begin(), end = m.end(); it != end;) { // // `erase()` will invalidate `it`, so advance `it` first. // auto copy_it = it++; // if () { // m.erase(copy_it); // } // } void erase(const_iterator cit) { erase(cit.inner_); } // This overload is necessary because otherwise erase(const K&) would be // a better match if non-const iterator is passed as an argument. void erase(iterator it) { ABSL_INTERNAL_ASSERT_IS_FULL(it.ctrl_, "erase() called on invalid iterator."); PolicyTraits::destroy(&alloc_ref(), it.slot_); erase_meta_only(it); } iterator erase(const_iterator first, const_iterator last) { while (first != last) { erase(first++); } return last.inner_; } // Moves elements from `src` into `this`. // If the element already exists in `this`, it is left unmodified in `src`. template void merge(raw_hash_set& src) { // NOLINT assert(this != &src); for (auto it = src.begin(), e = src.end(); it != e;) { auto next = std::next(it); if (PolicyTraits::apply(InsertSlot{*this, std::move(*it.slot_)}, PolicyTraits::element(it.slot_)) .second) { src.erase_meta_only(it); } it = next; } } template void merge(raw_hash_set&& src) { merge(src); } node_type extract(const_iterator position) { ABSL_INTERNAL_ASSERT_IS_FULL(position.inner_.ctrl_, "extract() called on invalid iterator."); auto node = CommonAccess::Transfer(alloc_ref(), position.inner_.slot_); erase_meta_only(position); return node; } template < class K = key_type, typename std::enable_if::value, int>::type = 0> node_type extract(const key_arg& key) { auto it = find(key); return it == end() ? node_type() : extract(const_iterator{it}); } void swap(raw_hash_set& that) noexcept( IsNoThrowSwappable() && IsNoThrowSwappable() && IsNoThrowSwappable( typename AllocTraits::propagate_on_container_swap{})) { using std::swap; swap(ctrl_, that.ctrl_); swap(slots_, that.slots_); swap(size_, that.size_); swap(capacity_, that.capacity_); swap(growth_left(), that.growth_left()); swap(hash_ref(), that.hash_ref()); swap(eq_ref(), that.eq_ref()); swap(infoz(), that.infoz()); SwapAlloc(alloc_ref(), that.alloc_ref(), typename AllocTraits::propagate_on_container_swap{}); } void rehash(size_t n) { if (n == 0 && capacity_ == 0) return; if (n == 0 && size_ == 0) { destroy_slots(); infoz().RecordStorageChanged(0, 0); infoz().RecordClearedReservation(); return; } // bitor is a faster way of doing `max` here. We will round up to the next // power-of-2-minus-1, so bitor is good enough. auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size())); // n == 0 unconditionally rehashes as per the standard. if (n == 0 || m > capacity_) { resize(m); // This is after resize, to ensure that we have completed the allocation // and have potentially sampled the hashtable. infoz().RecordReservation(n); } } void reserve(size_t n) { if (n > size() + growth_left()) { size_t m = GrowthToLowerboundCapacity(n); resize(NormalizeCapacity(m)); // This is after resize, to ensure that we have completed the allocation // and have potentially sampled the hashtable. infoz().RecordReservation(n); } } // Extension API: support for heterogeneous keys. // // std::unordered_set s; // // Turns "abc" into std::string. // s.count("abc"); // // ch_set s; // // Uses "abc" directly without copying it into std::string. // s.count("abc"); template size_t count(const key_arg& key) const { return find(key) == end() ? 0 : 1; } // Issues CPU prefetch instructions for the memory needed to find or insert // a key. Like all lookup functions, this support heterogeneous keys. // // NOTE: This is a very low level operation and should not be used without // specific benchmarks indicating its importance. template void prefetch(const key_arg& key) const { (void)key; // Avoid probing if we won't be able to prefetch the addresses received. #ifdef ABSL_INTERNAL_HAVE_PREFETCH prefetch_heap_block(); auto seq = probe(ctrl_, hash_ref()(key), capacity_); base_internal::PrefetchT0(ctrl_ + seq.offset()); base_internal::PrefetchT0(slots_ + seq.offset()); #endif // ABSL_INTERNAL_HAVE_PREFETCH } // The API of find() has two extensions. // // 1. The hash can be passed by the user. It must be equal to the hash of the // key. // // 2. The type of the key argument doesn't have to be key_type. This is so // called heterogeneous key support. template iterator find(const key_arg& key, size_t hash) { auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) return iterator_at(seq.offset(i)); } if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return end(); seq.next(); assert(seq.index() <= capacity_ && "full table!"); } } template iterator find(const key_arg& key) { prefetch_heap_block(); return find(key, hash_ref()(key)); } template const_iterator find(const key_arg& key, size_t hash) const { return const_cast(this)->find(key, hash); } template const_iterator find(const key_arg& key) const { prefetch_heap_block(); return find(key, hash_ref()(key)); } template bool contains(const key_arg& key) const { return find(key) != end(); } template std::pair equal_range(const key_arg& key) { auto it = find(key); if (it != end()) return {it, std::next(it)}; return {it, it}; } template std::pair equal_range( const key_arg& key) const { auto it = find(key); if (it != end()) return {it, std::next(it)}; return {it, it}; } size_t bucket_count() const { return capacity_; } float load_factor() const { return capacity_ ? static_cast(size()) / capacity_ : 0.0; } float max_load_factor() const { return 1.0f; } void max_load_factor(float) { // Does nothing. } hasher hash_function() const { return hash_ref(); } key_equal key_eq() const { return eq_ref(); } allocator_type get_allocator() const { return alloc_ref(); } friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) { if (a.size() != b.size()) return false; const raw_hash_set* outer = &a; const raw_hash_set* inner = &b; if (outer->capacity() > inner->capacity()) std::swap(outer, inner); for (const value_type& elem : *outer) if (!inner->has_element(elem)) return false; return true; } friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) { return !(a == b); } template friend typename std::enable_if::value, H>::type AbslHashValue(H h, const raw_hash_set& s) { return H::combine(H::combine_unordered(std::move(h), s.begin(), s.end()), s.size()); } friend void swap(raw_hash_set& a, raw_hash_set& b) noexcept(noexcept(a.swap(b))) { a.swap(b); } private: template friend struct absl::container_internal::hashtable_debug_internal:: HashtableDebugAccess; struct FindElement { template const_iterator operator()(const K& key, Args&&...) const { return s.find(key); } const raw_hash_set& s; }; struct HashElement { template size_t operator()(const K& key, Args&&...) const { return h(key); } const hasher& h; }; template struct EqualElement { template bool operator()(const K2& lhs, Args&&...) const { return eq(lhs, rhs); } const K1& rhs; const key_equal& eq; }; struct EmplaceDecomposable { template std::pair operator()(const K& key, Args&&... args) const { auto res = s.find_or_prepare_insert(key); if (res.second) { s.emplace_at(res.first, std::forward(args)...); } return {s.iterator_at(res.first), res.second}; } raw_hash_set& s; }; template struct InsertSlot { template std::pair operator()(const K& key, Args&&...) && { auto res = s.find_or_prepare_insert(key); if (res.second) { PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot); } else if (do_destroy) { PolicyTraits::destroy(&s.alloc_ref(), &slot); } return {s.iterator_at(res.first), res.second}; } raw_hash_set& s; // Constructed slot. Either moved into place or destroyed. slot_type&& slot; }; // Erases, but does not destroy, the value pointed to by `it`. // // This merely updates the pertinent control byte. This can be used in // conjunction with Policy::transfer to move the object to another place. void erase_meta_only(const_iterator it) { assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator"); --size_; const size_t index = static_cast(it.inner_.ctrl_ - ctrl_); const size_t index_before = (index - Group::kWidth) & capacity_; const auto empty_after = Group(it.inner_.ctrl_).MaskEmpty(); const auto empty_before = Group(ctrl_ + index_before).MaskEmpty(); // We count how many consecutive non empties we have to the right and to the // left of `it`. If the sum is >= kWidth then there is at least one probe // window that might have seen a full group. bool was_never_full = empty_before && empty_after && static_cast(empty_after.TrailingZeros() + empty_before.LeadingZeros()) < Group::kWidth; SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted, capacity_, ctrl_, slots_, sizeof(slot_type)); growth_left() += was_never_full; infoz().RecordErase(); } // Allocates a backing array for `self` and initializes its control bytes. // This reads `capacity_` and updates all other fields based on the result of // the allocation. // // This does not free the currently held array; `capacity_` must be nonzero. void initialize_slots() { assert(capacity_); // Folks with custom allocators often make unwarranted assumptions about the // behavior of their classes vis-a-vis trivial destructability and what // calls they will or wont make. Avoid sampling for people with custom // allocators to get us out of this mess. This is not a hard guarantee but // a workaround while we plan the exact guarantee we want to provide. // // People are often sloppy with the exact type of their allocator (sometimes // it has an extra const or is missing the pair, but rebinds made it work // anyway). To avoid the ambiguity, we work off SlotAlloc which we have // bound more carefully. if (std::is_same>::value && slots_ == nullptr) { infoz() = Sample(sizeof(slot_type)); } char* mem = static_cast(Allocate( &alloc_ref(), AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)))); ctrl_ = reinterpret_cast(mem); slots_ = reinterpret_cast( mem + SlotOffset(capacity_, alignof(slot_type))); ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type)); reset_growth_left(); infoz().RecordStorageChanged(size_, capacity_); } // Destroys all slots in the backing array, frees the backing array, and // clears all top-level book-keeping data. // // This essentially implements `map = raw_hash_set();`. void destroy_slots() { if (!capacity_) return; for (size_t i = 0; i != capacity_; ++i) { if (IsFull(ctrl_[i])) { PolicyTraits::destroy(&alloc_ref(), slots_ + i); } } // Unpoison before returning the memory to the allocator. SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_); Deallocate( &alloc_ref(), ctrl_, AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))); ctrl_ = EmptyGroup(); slots_ = nullptr; size_ = 0; capacity_ = 0; growth_left() = 0; } void resize(size_t new_capacity) { assert(IsValidCapacity(new_capacity)); auto* old_ctrl = ctrl_; auto* old_slots = slots_; const size_t old_capacity = capacity_; capacity_ = new_capacity; initialize_slots(); size_t total_probe_length = 0; for (size_t i = 0; i != old_capacity; ++i) { if (IsFull(old_ctrl[i])) { size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, PolicyTraits::element(old_slots + i)); auto target = find_first_non_full(ctrl_, hash, capacity_); size_t new_i = target.offset; total_probe_length += target.probe_length; SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i); } } if (old_capacity) { SanitizerUnpoisonMemoryRegion(old_slots, sizeof(slot_type) * old_capacity); Deallocate( &alloc_ref(), old_ctrl, AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type))); } infoz().RecordRehash(total_probe_length); } // Prunes control bytes to remove as many tombstones as possible. // // See the comment on `rehash_and_grow_if_necessary()`. void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE { assert(IsValidCapacity(capacity_)); assert(!is_small(capacity_)); // Algorithm: // - mark all DELETED slots as EMPTY // - mark all FULL slots as DELETED // - for each slot marked as DELETED // hash = Hash(element) // target = find_first_non_full(hash) // if target is in the same group // mark slot as FULL // else if target is EMPTY // transfer element to target // mark slot as EMPTY // mark target as FULL // else if target is DELETED // swap current element with target element // mark target as FULL // repeat procedure for current slot with moved from element (target) ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_); alignas(slot_type) unsigned char raw[sizeof(slot_type)]; size_t total_probe_length = 0; slot_type* slot = reinterpret_cast(&raw); for (size_t i = 0; i != capacity_; ++i) { if (!IsDeleted(ctrl_[i])) continue; const size_t hash = PolicyTraits::apply( HashElement{hash_ref()}, PolicyTraits::element(slots_ + i)); const FindInfo target = find_first_non_full(ctrl_, hash, capacity_); const size_t new_i = target.offset; total_probe_length += target.probe_length; // Verify if the old and new i fall within the same group wrt the hash. // If they do, we don't need to move the object as it falls already in the // best probe we can. const size_t probe_offset = probe(ctrl_, hash, capacity_).offset(); const auto probe_index = [probe_offset, this](size_t pos) { return ((pos - probe_offset) & capacity_) / Group::kWidth; }; // Element doesn't move. if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) { SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); continue; } if (IsEmpty(ctrl_[new_i])) { // Transfer element to the empty spot. // SetCtrl poisons/unpoisons the slots so we have to call it at the // right time. SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i); SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type)); } else { assert(IsDeleted(ctrl_[new_i])); SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); // Until we are done rehashing, DELETED marks previously FULL slots. // Swap i and new_i elements. PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i); PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i); PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot); --i; // repeat } } reset_growth_left(); infoz().RecordRehash(total_probe_length); } // Called whenever the table *might* need to conditionally grow. // // This function is an optimization opportunity to perform a rehash even when // growth is unnecessary, because vacating tombstones is beneficial for // performance in the long-run. void rehash_and_grow_if_necessary() { if (capacity_ == 0) { resize(1); } else if (capacity_ > Group::kWidth && // Do these calcuations in 64-bit to avoid overflow. size() * uint64_t{32} <= capacity_ * uint64_t{25}) { // Squash DELETED without growing if there is enough capacity. // // Rehash in place if the current size is <= 25/32 of capacity_. // Rationale for such a high factor: 1) drop_deletes_without_resize() is // faster than resize, and 2) it takes quite a bit of work to add // tombstones. In the worst case, seems to take approximately 4 // insert/erase pairs to create a single tombstone and so if we are // rehashing because of tombstones, we can afford to rehash-in-place as // long as we are reclaiming at least 1/8 the capacity without doing more // than 2X the work. (Where "work" is defined to be size() for rehashing // or rehashing in place, and 1 for an insert or erase.) But rehashing in // place is faster per operation than inserting or even doubling the size // of the table, so we actually afford to reclaim even less space from a // resize-in-place. The decision is to rehash in place if we can reclaim // at about 1/8th of the usable capacity (specifically 3/28 of the // capacity) which means that the total cost of rehashing will be a small // fraction of the total work. // // Here is output of an experiment using the BM_CacheInSteadyState // benchmark running the old case (where we rehash-in-place only if we can // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place // if we can recover 3/32*capacity_). // // Note that although in the worst-case number of rehashes jumped up from // 15 to 190, but the number of operations per second is almost the same. // // Abridged output of running BM_CacheInSteadyState benchmark from // raw_hash_set_benchmark. N is the number of insert/erase operations. // // | OLD (recover >= 7/16 | NEW (recover >= 3/32) // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes // 448 | 145284 0.44 18 | 140118 0.44 19 // 493 | 152546 0.24 11 | 151417 0.48 28 // 538 | 151439 0.26 11 | 151152 0.53 38 // 583 | 151765 0.28 11 | 150572 0.57 50 // 628 | 150241 0.31 11 | 150853 0.61 66 // 672 | 149602 0.33 12 | 150110 0.66 90 // 717 | 149998 0.35 12 | 149531 0.70 129 // 762 | 149836 0.37 13 | 148559 0.74 190 // 807 | 149736 0.39 14 | 151107 0.39 14 // 852 | 150204 0.42 15 | 151019 0.42 15 drop_deletes_without_resize(); } else { // Otherwise grow the container. resize(capacity_ * 2 + 1); } } bool has_element(const value_type& elem) const { size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem); auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) == elem)) return true; } if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false; seq.next(); assert(seq.index() <= capacity_ && "full table!"); } return false; } // TODO(alkis): Optimize this assuming *this and that don't overlap. raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) { raw_hash_set tmp(std::move(that)); swap(tmp); return *this; } raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) { raw_hash_set tmp(std::move(that), alloc_ref()); swap(tmp); return *this; } protected: // Attempts to find `key` in the table; if it isn't found, returns a slot that // the value can be inserted into, with the control byte already set to // `key`'s H2. template std::pair find_or_prepare_insert(const K& key) { prefetch_heap_block(); auto hash = hash_ref()(key); auto seq = probe(ctrl_, hash, capacity_); while (true) { Group g{ctrl_ + seq.offset()}; for (uint32_t i : g.Match(H2(hash))) { if (ABSL_PREDICT_TRUE(PolicyTraits::apply( EqualElement{key, eq_ref()}, PolicyTraits::element(slots_ + seq.offset(i))))) return {seq.offset(i), false}; } if (ABSL_PREDICT_TRUE(g.MaskEmpty())) break; seq.next(); assert(seq.index() <= capacity_ && "full table!"); } return {prepare_insert(hash), true}; } // Given the hash of a value not currently in the table, finds the next // viable slot index to insert it at. // // REQUIRES: At least one non-full slot available. size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE { auto target = find_first_non_full(ctrl_, hash, capacity_); if (ABSL_PREDICT_FALSE(growth_left() == 0 && !IsDeleted(ctrl_[target.offset]))) { rehash_and_grow_if_necessary(); target = find_first_non_full(ctrl_, hash, capacity_); } ++size_; growth_left() -= IsEmpty(ctrl_[target.offset]); SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type)); infoz().RecordInsert(hash, target.probe_length); return target.offset; } // Constructs the value in the space pointed by the iterator. This only works // after an unsuccessful find_or_prepare_insert() and before any other // modifications happen in the raw_hash_set. // // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where // k is the key decomposed from `forward(args)...`, and the bool // returned by find_or_prepare_insert(k) was true. // POSTCONDITION: *m.iterator_at(i) == value_type(forward(args)...). template void emplace_at(size_t i, Args&&... args) { PolicyTraits::construct(&alloc_ref(), slots_ + i, std::forward(args)...); assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) == iterator_at(i) && "constructed value does not match the lookup key"); } iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; } const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; } private: friend struct RawHashSetTestOnlyAccess; void reset_growth_left() { growth_left() = CapacityToGrowth(capacity()) - size_; } // The number of slots we can still fill without needing to rehash. // // This is stored separately due to tombstones: we do not include tombstones // in the growth capacity, because we'd like to rehash when the table is // otherwise filled with tombstones: otherwise, probe sequences might get // unacceptably long without triggering a rehash. Callers can also force a // rehash via the standard `rehash(0)`, which will recompute this value as a // side-effect. // // See `CapacityToGrowth()`. size_t& growth_left() { return settings_.template get<0>(); } // Prefetch the heap-allocated memory region to resolve potential TLB misses. // This is intended to overlap with execution of calculating the hash for a // key. void prefetch_heap_block() const { base_internal::PrefetchT2(ctrl_); } HashtablezInfoHandle& infoz() { return settings_.template get<1>(); } hasher& hash_ref() { return settings_.template get<2>(); } const hasher& hash_ref() const { return settings_.template get<2>(); } key_equal& eq_ref() { return settings_.template get<3>(); } const key_equal& eq_ref() const { return settings_.template get<3>(); } allocator_type& alloc_ref() { return settings_.template get<4>(); } const allocator_type& alloc_ref() const { return settings_.template get<4>(); } // TODO(alkis): Investigate removing some of these fields: // - ctrl/slots can be derived from each other // - size can be moved into the slot array // The control bytes (and, also, a pointer to the base of the backing array). // // This contains `capacity_ + 1 + NumClonedBytes()` entries, even // when the table is empty (hence EmptyGroup). ctrl_t* ctrl_ = EmptyGroup(); // The beginning of the slots, located at `SlotOffset()` bytes after // `ctrl_`. May be null for empty tables. slot_type* slots_ = nullptr; // The number of filled slots. size_t size_ = 0; // The total number of available slots. size_t capacity_ = 0; absl::container_internal::CompressedTuple settings_{0u, HashtablezInfoHandle{}, hasher{}, key_equal{}, allocator_type{}}; }; // Erases all elements that satisfy the predicate `pred` from the container `c`. template typename raw_hash_set::size_type EraseIf( Predicate& pred, raw_hash_set* c) { const auto initial_size = c->size(); for (auto it = c->begin(), last = c->end(); it != last;) { if (pred(*it)) { c->erase(it++); } else { ++it; } } return initial_size - c->size(); } namespace hashtable_debug_internal { template struct HashtableDebugAccess> { using Traits = typename Set::PolicyTraits; using Slot = typename Traits::slot_type; static size_t GetNumProbes(const Set& set, const typename Set::key_type& key) { size_t num_probes = 0; size_t hash = set.hash_ref()(key); auto seq = probe(set.ctrl_, hash, set.capacity_); while (true) { container_internal::Group g{set.ctrl_ + seq.offset()}; for (uint32_t i : g.Match(container_internal::H2(hash))) { if (Traits::apply( typename Set::template EqualElement{ key, set.eq_ref()}, Traits::element(set.slots_ + seq.offset(i)))) return num_probes; ++num_probes; } if (g.MaskEmpty()) return num_probes; seq.next(); ++num_probes; } } static size_t AllocatedByteSize(const Set& c) { size_t capacity = c.capacity_; if (capacity == 0) return 0; size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot)); size_t per_slot = Traits::space_used(static_cast(nullptr)); if (per_slot != ~size_t{}) { m += per_slot * c.size(); } else { for (size_t i = 0; i != capacity; ++i) { if (container_internal::IsFull(c.ctrl_[i])) { m += Traits::space_used(c.slots_ + i); } } } return m; } static size_t LowerBoundAllocatedByteSize(size_t size) { size_t capacity = GrowthToLowerboundCapacity(size); if (capacity == 0) return 0; size_t m = AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot)); size_t per_slot = Traits::space_used(static_cast(nullptr)); if (per_slot != ~size_t{}) { m += per_slot * size; } return m; } }; } // namespace hashtable_debug_internal } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl #undef ABSL_INTERNAL_ASSERT_IS_FULL #endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_ abseil-20220623.1/absl/container/internal/raw_hash_set_allocator_test.cc000066400000000000000000000330641430371345100261550ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include "gtest/gtest.h" #include "absl/container/internal/raw_hash_set.h" #include "absl/container/internal/tracked.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { namespace { enum AllocSpec { kPropagateOnCopy = 1, kPropagateOnMove = 2, kPropagateOnSwap = 4, }; struct AllocState { size_t num_allocs = 0; std::set owned; }; template class CheckedAlloc { public: template friend class CheckedAlloc; using value_type = T; CheckedAlloc() {} explicit CheckedAlloc(size_t id) : id_(id) {} CheckedAlloc(const CheckedAlloc&) = default; CheckedAlloc& operator=(const CheckedAlloc&) = default; template CheckedAlloc(const CheckedAlloc& that) : id_(that.id_), state_(that.state_) {} template struct rebind { using other = CheckedAlloc; }; using propagate_on_container_copy_assignment = std::integral_constant; using propagate_on_container_move_assignment = std::integral_constant; using propagate_on_container_swap = std::integral_constant; CheckedAlloc select_on_container_copy_construction() const { if (Spec & kPropagateOnCopy) return *this; return {}; } T* allocate(size_t n) { T* ptr = std::allocator().allocate(n); track_alloc(ptr); return ptr; } void deallocate(T* ptr, size_t n) { memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned. track_dealloc(ptr); return std::allocator().deallocate(ptr, n); } friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) { return a.id_ == b.id_; } friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) { return !(a == b); } size_t num_allocs() const { return state_->num_allocs; } void swap(CheckedAlloc& that) { using std::swap; swap(id_, that.id_); swap(state_, that.state_); } friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); } friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) { return o << "alloc(" << a.id_ << ")"; } private: void track_alloc(void* ptr) { AllocState* state = state_.get(); ++state->num_allocs; if (!state->owned.insert(ptr).second) ADD_FAILURE() << *this << " got previously allocated memory: " << ptr; } void track_dealloc(void* ptr) { if (state_->owned.erase(ptr) != 1) ADD_FAILURE() << *this << " deleting memory owned by another allocator: " << ptr; } size_t id_ = std::numeric_limits::max(); std::shared_ptr state_ = std::make_shared(); }; struct Identity { int32_t operator()(int32_t v) const { return v; } }; struct Policy { using slot_type = Tracked; using init_type = Tracked; using key_type = int32_t; template static void construct(allocator_type* alloc, slot_type* slot, Args&&... args) { std::allocator_traits::construct( *alloc, slot, std::forward(args)...); } template static void destroy(allocator_type* alloc, slot_type* slot) { std::allocator_traits::destroy(*alloc, slot); } template static void transfer(allocator_type* alloc, slot_type* new_slot, slot_type* old_slot) { construct(alloc, new_slot, std::move(*old_slot)); destroy(alloc, old_slot); } template static auto apply(F&& f, int32_t v) -> decltype(std::forward(f)(v, v)) { return std::forward(f)(v, v); } template static auto apply(F&& f, const slot_type& v) -> decltype(std::forward(f)(v.val(), v)) { return std::forward(f)(v.val(), v); } template static auto apply(F&& f, slot_type&& v) -> decltype(std::forward(f)(v.val(), std::move(v))) { return std::forward(f)(v.val(), std::move(v)); } static slot_type& element(slot_type* slot) { return *slot; } }; template struct PropagateTest : public ::testing::Test { using Alloc = CheckedAlloc, Spec>; using Table = raw_hash_set, Alloc>; PropagateTest() { EXPECT_EQ(a1, t1.get_allocator()); EXPECT_NE(a2, t1.get_allocator()); } Alloc a1 = Alloc(1); Table t1 = Table(0, a1); Alloc a2 = Alloc(2); }; using PropagateOnAll = PropagateTest; using NoPropagateOnCopy = PropagateTest; using NoPropagateOnMove = PropagateTest; TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); } TEST_F(PropagateOnAll, InsertAllocates) { auto it = t1.insert(0).first; EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(PropagateOnAll, InsertDecomposes) { auto it = t1.insert(0).first; EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); EXPECT_FALSE(t1.insert(0).second); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(PropagateOnAll, RehashMoves) { auto it = t1.insert(0).first; EXPECT_EQ(0, it->num_moves()); t1.rehash(2 * t1.capacity()); EXPECT_EQ(2, a1.num_allocs()); it = t1.find(0); EXPECT_EQ(1, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(PropagateOnAll, CopyConstructor) { auto it = t1.insert(0).first; Table u(t1); EXPECT_EQ(2, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(NoPropagateOnCopy, CopyConstructor) { auto it = t1.insert(0).first; Table u(t1); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(1, u.get_allocator().num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) { auto it = t1.insert(0).first; Table u(t1, a1); EXPECT_EQ(2, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) { auto it = t1.insert(0).first; Table u(t1, a1); EXPECT_EQ(2, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) { auto it = t1.insert(0).first; Table u(t1, a2); EXPECT_EQ(a2, u.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(1, a2.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) { auto it = t1.insert(0).first; Table u(t1, a2); EXPECT_EQ(a2, u.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(1, a2.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(PropagateOnAll, MoveConstructor) { auto it = t1.insert(0).first; Table u(std::move(t1)); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(NoPropagateOnMove, MoveConstructor) { auto it = t1.insert(0).first; Table u(std::move(t1)); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) { auto it = t1.insert(0).first; Table u(std::move(t1), a1); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) { auto it = t1.insert(0).first; Table u(std::move(t1), a1); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) { auto it = t1.insert(0).first; Table u(std::move(t1), a2); it = u.find(0); EXPECT_EQ(a2, u.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(1, a2.num_allocs()); EXPECT_EQ(1, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) { auto it = t1.insert(0).first; Table u(std::move(t1), a2); it = u.find(0); EXPECT_EQ(a2, u.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(1, a2.num_allocs()); EXPECT_EQ(1, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) { auto it = t1.insert(0).first; Table u(0, a1); u = t1; EXPECT_EQ(2, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) { auto it = t1.insert(0).first; Table u(0, a1); u = t1; EXPECT_EQ(2, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) { auto it = t1.insert(0).first; Table u(0, a2); u = t1; EXPECT_EQ(a1, u.get_allocator()); EXPECT_EQ(2, a1.num_allocs()); EXPECT_EQ(0, a2.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) { auto it = t1.insert(0).first; Table u(0, a2); u = t1; EXPECT_EQ(a2, u.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(1, a2.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(1, it->num_copies()); } TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) { auto it = t1.insert(0).first; Table u(0, a1); u = std::move(t1); EXPECT_EQ(a1, u.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) { auto it = t1.insert(0).first; Table u(0, a1); u = std::move(t1); EXPECT_EQ(a1, u.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) { auto it = t1.insert(0).first; Table u(0, a2); u = std::move(t1); EXPECT_EQ(a1, u.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, a2.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) { auto it = t1.insert(0).first; Table u(0, a2); u = std::move(t1); it = u.find(0); EXPECT_EQ(a2, u.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(1, a2.num_allocs()); EXPECT_EQ(1, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } TEST_F(PropagateOnAll, Swap) { auto it = t1.insert(0).first; Table u(0, a2); u.swap(t1); EXPECT_EQ(a1, u.get_allocator()); EXPECT_EQ(a2, t1.get_allocator()); EXPECT_EQ(1, a1.num_allocs()); EXPECT_EQ(0, a2.num_allocs()); EXPECT_EQ(0, it->num_moves()); EXPECT_EQ(0, it->num_copies()); } // This allocator is similar to std::pmr::polymorphic_allocator. // Note the disabled assignment. template class PAlloc { template friend class PAlloc; public: // types using value_type = T; // traits using propagate_on_container_swap = std::false_type; PAlloc() noexcept = default; explicit PAlloc(size_t id) noexcept : id_(id) {} PAlloc(const PAlloc&) noexcept = default; PAlloc& operator=(const PAlloc&) noexcept = delete; template PAlloc(const PAlloc& that) noexcept : id_(that.id_) {} // NOLINT template struct rebind { using other = PAlloc; }; constexpr PAlloc select_on_container_copy_construction() const { return {}; } // public member functions T* allocate(size_t) { return new T; } void deallocate(T* p, size_t) { delete p; } friend bool operator==(const PAlloc& a, const PAlloc& b) { return a.id_ == b.id_; } friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); } private: size_t id_ = std::numeric_limits::max(); }; // This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing. #if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \ __GNUC_MINOR__ != 5) TEST(NoPropagateOn, Swap) { using PA = PAlloc; using Table = raw_hash_set, PA>; Table t1(PA{1}), t2(PA{2}); swap(t1, t2); EXPECT_EQ(t1.get_allocator(), PA(1)); EXPECT_EQ(t2.get_allocator(), PA(2)); } #endif TEST(NoPropagateOn, CopyConstruct) { using PA = PAlloc; using Table = raw_hash_set, PA>; Table t1(PA{1}), t2(t1); EXPECT_EQ(t1.get_allocator(), PA(1)); EXPECT_EQ(t2.get_allocator(), PA()); } TEST(NoPropagateOn, Assignment) { using PA = PAlloc; using Table = raw_hash_set, PA>; Table t1(PA{1}), t2(PA{2}); t1 = t2; EXPECT_EQ(t1.get_allocator(), PA(1)); EXPECT_EQ(t2.get_allocator(), PA(2)); } } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl abseil-20220623.1/absl/container/internal/raw_hash_set_benchmark.cc000066400000000000000000000317531430371345100250730ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/raw_hash_set.h" #include #include #include "absl/base/internal/raw_logging.h" #include "absl/container/internal/hash_function_defaults.h" #include "absl/strings/str_format.h" #include "benchmark/benchmark.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { struct RawHashSetTestOnlyAccess { template static auto GetSlots(const C& c) -> decltype(c.slots_) { return c.slots_; } }; namespace { struct IntPolicy { using slot_type = int64_t; using key_type = int64_t; using init_type = int64_t; static void construct(void*, int64_t* slot, int64_t v) { *slot = v; } static void destroy(void*, int64_t*) {} static void transfer(void*, int64_t* new_slot, int64_t* old_slot) { *new_slot = *old_slot; } static int64_t& element(slot_type* slot) { return *slot; } template static auto apply(F&& f, int64_t x) -> decltype(std::forward(f)(x, x)) { return std::forward(f)(x, x); } }; class StringPolicy { template ::value>::type> decltype(std::declval()( std::declval(), std::piecewise_construct, std::declval>(), std::declval())) static apply_impl(F&& f, std::pair, V> p) { const absl::string_view& key = std::get<0>(p.first); return std::forward(f)(key, std::piecewise_construct, std::move(p.first), std::move(p.second)); } public: struct slot_type { struct ctor {}; template slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} std::pair pair; }; using key_type = std::string; using init_type = std::pair; template static void construct(allocator_type* alloc, slot_type* slot, Args... args) { std::allocator_traits::construct( *alloc, slot, typename slot_type::ctor(), std::forward(args)...); } template static void destroy(allocator_type* alloc, slot_type* slot) { std::allocator_traits::destroy(*alloc, slot); } template static void transfer(allocator_type* alloc, slot_type* new_slot, slot_type* old_slot) { construct(alloc, new_slot, std::move(old_slot->pair)); destroy(alloc, old_slot); } static std::pair& element(slot_type* slot) { return slot->pair; } template static auto apply(F&& f, Args&&... args) -> decltype(apply_impl(std::forward(f), PairArgs(std::forward(args)...))) { return apply_impl(std::forward(f), PairArgs(std::forward(args)...)); } }; struct StringHash : container_internal::hash_default_hash { using is_transparent = void; }; struct StringEq : std::equal_to { using is_transparent = void; }; struct StringTable : raw_hash_set> { using Base = typename StringTable::raw_hash_set; StringTable() {} using Base::Base; }; struct IntTable : raw_hash_set, std::equal_to, std::allocator> { using Base = typename IntTable::raw_hash_set; IntTable() {} using Base::Base; }; struct string_generator { template std::string operator()(RNG& rng) const { std::string res; res.resize(12); std::uniform_int_distribution printable_ascii(0x20, 0x7E); std::generate(res.begin(), res.end(), [&] { return printable_ascii(rng); }); return res; } size_t size; }; // Model a cache in steady state. // // On a table of size N, keep deleting the LRU entry and add a random one. void BM_CacheInSteadyState(benchmark::State& state) { std::random_device rd; std::mt19937 rng(rd()); string_generator gen{12}; StringTable t; std::deque keys; while (t.size() < state.range(0)) { auto x = t.emplace(gen(rng), gen(rng)); if (x.second) keys.push_back(x.first->first); } ABSL_RAW_CHECK(state.range(0) >= 10, ""); while (state.KeepRunning()) { // Some cache hits. std::deque::const_iterator it; for (int i = 0; i != 90; ++i) { if (i % 10 == 0) it = keys.end(); ::benchmark::DoNotOptimize(t.find(*--it)); } // Some cache misses. for (int i = 0; i != 10; ++i) ::benchmark::DoNotOptimize(t.find(gen(rng))); ABSL_RAW_CHECK(t.erase(keys.front()), keys.front().c_str()); keys.pop_front(); while (true) { auto x = t.emplace(gen(rng), gen(rng)); if (x.second) { keys.push_back(x.first->first); break; } } } state.SetItemsProcessed(state.iterations()); state.SetLabel(absl::StrFormat("load_factor=%.2f", t.load_factor())); } template void CacheInSteadyStateArgs(Benchmark* bm) { // The default. const float max_load_factor = 0.875; // When the cache is at the steady state, the probe sequence will equal // capacity if there is no reclamation of deleted slots. Pick a number large // enough to make the benchmark slow for that case. const size_t capacity = 1 << 10; // Check N data points to cover load factors in [0.4, 0.8). const size_t kNumPoints = 10; for (size_t i = 0; i != kNumPoints; ++i) bm->Arg(std::ceil( capacity * (max_load_factor + i * max_load_factor / kNumPoints) / 2)); } BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs); void BM_EndComparison(benchmark::State& state) { std::random_device rd; std::mt19937 rng(rd()); string_generator gen{12}; StringTable t; while (t.size() < state.range(0)) { t.emplace(gen(rng), gen(rng)); } for (auto _ : state) { for (auto it = t.begin(); it != t.end(); ++it) { benchmark::DoNotOptimize(it); benchmark::DoNotOptimize(t); benchmark::DoNotOptimize(it != t.end()); } } } BENCHMARK(BM_EndComparison)->Arg(400); void BM_CopyCtor(benchmark::State& state) { std::random_device rd; std::mt19937 rng(rd()); IntTable t; std::uniform_int_distribution dist(0, ~uint64_t{}); while (t.size() < state.range(0)) { t.emplace(dist(rng)); } for (auto _ : state) { IntTable t2 = t; benchmark::DoNotOptimize(t2); } } BENCHMARK(BM_CopyCtor)->Range(128, 4096); void BM_CopyAssign(benchmark::State& state) { std::random_device rd; std::mt19937 rng(rd()); IntTable t; std::uniform_int_distribution dist(0, ~uint64_t{}); while (t.size() < state.range(0)) { t.emplace(dist(rng)); } IntTable t2; for (auto _ : state) { t2 = t; benchmark::DoNotOptimize(t2); } } BENCHMARK(BM_CopyAssign)->Range(128, 4096); void BM_RangeCtor(benchmark::State& state) { std::random_device rd; std::mt19937 rng(rd()); std::uniform_int_distribution dist(0, ~uint64_t{}); std::vector values; const size_t desired_size = state.range(0); while (values.size() < desired_size) { values.emplace_back(dist(rng)); } for (auto unused : state) { IntTable t{values.begin(), values.end()}; benchmark::DoNotOptimize(t); } } BENCHMARK(BM_RangeCtor)->Range(128, 65536); void BM_NoOpReserveIntTable(benchmark::State& state) { IntTable t; t.reserve(100000); for (auto _ : state) { benchmark::DoNotOptimize(t); t.reserve(100000); } } BENCHMARK(BM_NoOpReserveIntTable); void BM_NoOpReserveStringTable(benchmark::State& state) { StringTable t; t.reserve(100000); for (auto _ : state) { benchmark::DoNotOptimize(t); t.reserve(100000); } } BENCHMARK(BM_NoOpReserveStringTable); void BM_ReserveIntTable(benchmark::State& state) { int reserve_size = state.range(0); for (auto _ : state) { state.PauseTiming(); IntTable t; state.ResumeTiming(); benchmark::DoNotOptimize(t); t.reserve(reserve_size); } } BENCHMARK(BM_ReserveIntTable)->Range(128, 4096); void BM_ReserveStringTable(benchmark::State& state) { int reserve_size = state.range(0); for (auto _ : state) { state.PauseTiming(); StringTable t; state.ResumeTiming(); benchmark::DoNotOptimize(t); t.reserve(reserve_size); } } BENCHMARK(BM_ReserveStringTable)->Range(128, 4096); // Like std::iota, except that ctrl_t doesn't support operator++. template void Iota(CtrlIter begin, CtrlIter end, int value) { for (; begin != end; ++begin, ++value) { *begin = static_cast(value); } } void BM_Group_Match(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -4); Group g{group.data()}; h2_t h = 1; for (auto _ : state) { ::benchmark::DoNotOptimize(h); ::benchmark::DoNotOptimize(g); ::benchmark::DoNotOptimize(g.Match(h)); } } BENCHMARK(BM_Group_Match); void BM_Group_MaskEmpty(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -4); Group g{group.data()}; for (auto _ : state) { ::benchmark::DoNotOptimize(g); ::benchmark::DoNotOptimize(g.MaskEmpty()); } } BENCHMARK(BM_Group_MaskEmpty); void BM_Group_MaskEmptyOrDeleted(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -4); Group g{group.data()}; for (auto _ : state) { ::benchmark::DoNotOptimize(g); ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted()); } } BENCHMARK(BM_Group_MaskEmptyOrDeleted); void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -2); Group g{group.data()}; for (auto _ : state) { ::benchmark::DoNotOptimize(g); ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted()); } } BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted); void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) { std::array group; Iota(group.begin(), group.end(), -2); Group g{group.data()}; for (auto _ : state) { ::benchmark::DoNotOptimize(g); ::benchmark::DoNotOptimize(g.MaskEmptyOrDeleted().LowestBitSet()); } } BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted); void BM_DropDeletes(benchmark::State& state) { constexpr size_t capacity = (1 << 20) - 1; std::vector ctrl(capacity + 1 + Group::kWidth); ctrl[capacity] = ctrl_t::kSentinel; std::vector pattern = {ctrl_t::kEmpty, static_cast(2), ctrl_t::kDeleted, static_cast(2), ctrl_t::kEmpty, static_cast(1), ctrl_t::kDeleted}; for (size_t i = 0; i != capacity; ++i) { ctrl[i] = pattern[i % pattern.size()]; } while (state.KeepRunning()) { state.PauseTiming(); std::vector ctrl_copy = ctrl; state.ResumeTiming(); ConvertDeletedToEmptyAndFullToDeleted(ctrl_copy.data(), capacity); ::benchmark::DoNotOptimize(ctrl_copy[capacity]); } } BENCHMARK(BM_DropDeletes); } // namespace } // namespace container_internal ABSL_NAMESPACE_END } // namespace absl // These methods are here to make it easy to examine the assembly for targeted // parts of the API. auto CodegenAbslRawHashSetInt64Find(absl::container_internal::IntTable* table, int64_t key) -> decltype(table->find(key)) { return table->find(key); } bool CodegenAbslRawHashSetInt64FindNeEnd( absl::container_internal::IntTable* table, int64_t key) { return table->find(key) != table->end(); } auto CodegenAbslRawHashSetInt64Insert(absl::container_internal::IntTable* table, int64_t key) -> decltype(table->insert(key)) { return table->insert(key); } bool CodegenAbslRawHashSetInt64Contains( absl::container_internal::IntTable* table, int64_t key) { return table->contains(key); } void CodegenAbslRawHashSetInt64Iterate( absl::container_internal::IntTable* table) { for (auto x : *table) benchmark::DoNotOptimize(x); } int odr = (::benchmark::DoNotOptimize(std::make_tuple( &CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd, &CodegenAbslRawHashSetInt64Insert, &CodegenAbslRawHashSetInt64Contains, &CodegenAbslRawHashSetInt64Iterate)), 1); abseil-20220623.1/absl/container/internal/raw_hash_set_probe_benchmark.cc000066400000000000000000000406221430371345100262550ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Generates probe length statistics for many combinations of key types and key // distributions, all using the default hash function for swisstable. #include #include // NOLINT #include #include "absl/container/flat_hash_map.h" #include "absl/container/internal/hash_function_defaults.h" #include "absl/container/internal/hashtable_debug.h" #include "absl/container/internal/raw_hash_set.h" #include "absl/random/distributions.h" #include "absl/random/random.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "absl/strings/strip.h" namespace { enum class OutputStyle { kRegular, kBenchmark }; // The --benchmark command line flag. // This is populated from main(). // When run in "benchmark" mode, we have different output. This allows // A/B comparisons with tools like `benchy`. absl::string_view benchmarks; OutputStyle output() { return !benchmarks.empty() ? OutputStyle::kBenchmark : OutputStyle::kRegular; } template struct Policy { using slot_type = T; using key_type = T; using init_type = T; template static void construct(allocator_type* alloc, slot_type* slot, const Arg& arg) { std::allocator_traits::construct(*alloc, slot, arg); } template static void destroy(allocator_type* alloc, slot_type* slot) { std::allocator_traits::destroy(*alloc, slot); } static slot_type& element(slot_type* slot) { return *slot; } template static auto apply(F&& f, const slot_type& arg) -> decltype(std::forward(f)(arg, arg)) { return std::forward(f)(arg, arg); } }; absl::BitGen& GlobalBitGen() { static auto* value = new absl::BitGen; return *value; } // Keeps a pool of allocations and randomly gives one out. // This introduces more randomization to the addresses given to swisstable and // should help smooth out this factor from probe length calculation. template class RandomizedAllocator { public: using value_type = T; RandomizedAllocator() = default; template RandomizedAllocator(RandomizedAllocator) {} // NOLINT static T* allocate(size_t n) { auto& pointers = GetPointers(n); // Fill the pool while (pointers.size() < kRandomPool) { pointers.push_back(std::allocator{}.allocate(n)); } // Choose a random one. size_t i = absl::Uniform(GlobalBitGen(), 0, pointers.size()); T* result = pointers[i]; pointers[i] = pointers.back(); pointers.pop_back(); return result; } static void deallocate(T* p, size_t n) { // Just put it back on the pool. No need to release the memory. GetPointers(n).push_back(p); } private: // We keep at least kRandomPool allocations for each size. static constexpr size_t kRandomPool = 20; static std::vector& GetPointers(size_t n) { static auto* m = new absl::flat_hash_map>(); return (*m)[n]; } }; template struct DefaultHash { using type = absl::container_internal::hash_default_hash; }; template using DefaultHashT = typename DefaultHash::type; template struct Table : absl::container_internal::raw_hash_set< Policy, DefaultHashT, absl::container_internal::hash_default_eq, RandomizedAllocator> {}; struct LoadSizes { size_t min_load; size_t max_load; }; LoadSizes GetMinMaxLoadSizes() { static const auto sizes = [] { Table t; // First, fill enough to have a good distribution. constexpr size_t kMinSize = 10000; while (t.size() < kMinSize) t.insert(t.size()); const auto reach_min_load_factor = [&] { const double lf = t.load_factor(); while (lf <= t.load_factor()) t.insert(t.size()); }; // Then, insert until we reach min load factor. reach_min_load_factor(); const size_t min_load_size = t.size(); // Keep going until we hit min load factor again, then go back one. t.insert(t.size()); reach_min_load_factor(); return LoadSizes{min_load_size, t.size() - 1}; }(); return sizes; } struct Ratios { double min_load; double avg_load; double max_load; }; // See absl/container/internal/hashtable_debug.h for details on // probe length calculation. template Ratios CollectMeanProbeLengths() { const auto min_max_sizes = GetMinMaxLoadSizes(); ElemFn elem; using Key = decltype(elem()); Table t; Ratios result; while (t.size() < min_max_sizes.min_load) t.insert(elem()); result.min_load = absl::container_internal::GetHashtableDebugProbeSummary(t).mean; while (t.size() < (min_max_sizes.min_load + min_max_sizes.max_load) / 2) t.insert(elem()); result.avg_load = absl::container_internal::GetHashtableDebugProbeSummary(t).mean; while (t.size() < min_max_sizes.max_load) t.insert(elem()); result.max_load = absl::container_internal::GetHashtableDebugProbeSummary(t).mean; return result; } template uintptr_t PointerForAlignment() { alignas(Align) static constexpr uintptr_t kInitPointer = 0; return reinterpret_cast(&kInitPointer); } // This incomplete type is used for testing hash of pointers of different // alignments. // NOTE: We are generating invalid pointer values on the fly with // reinterpret_cast. There are not "safely derived" pointers so using them is // technically UB. It is unlikely to be a problem, though. template struct Ptr; template Ptr* MakePtr(uintptr_t v) { if (sizeof(v) == 8) { constexpr int kCopyBits = 16; // Ensure high bits are all the same. v = static_cast(static_cast(v << kCopyBits) >> kCopyBits); } return reinterpret_cast*>(v); } struct IntIdentity { uint64_t i; friend bool operator==(IntIdentity a, IntIdentity b) { return a.i == b.i; } IntIdentity operator++(int) { return IntIdentity{i++}; } }; template struct PtrIdentity { explicit PtrIdentity(uintptr_t val = PointerForAlignment()) : i(val) {} uintptr_t i; friend bool operator==(PtrIdentity a, PtrIdentity b) { return a.i == b.i; } PtrIdentity operator++(int) { PtrIdentity p(i); i += Align; return p; } }; constexpr char kStringFormat[] = "/path/to/file/name-%07d-of-9999999.txt"; template struct String { std::string value; static std::string Make(uint32_t v) { return {small ? absl::StrCat(v) : absl::StrFormat(kStringFormat, v)}; } }; template <> struct DefaultHash { struct type { size_t operator()(IntIdentity t) const { return t.i; } }; }; template struct DefaultHash> { struct type { size_t operator()(PtrIdentity t) const { return t.i; } }; }; template struct Sequential { T operator()() const { return current++; } mutable T current{}; }; template struct Sequential*> { Ptr* operator()() const { auto* result = MakePtr(current); current += Align; return result; } mutable uintptr_t current = PointerForAlignment(); }; template struct Sequential> { std::string operator()() const { return String::Make(current++); } mutable uint32_t current = 0; }; template struct Sequential> { mutable Sequential tseq; mutable Sequential useq; using RealT = decltype(tseq()); using RealU = decltype(useq()); mutable std::vector ts; mutable std::vector us; mutable size_t ti = 0, ui = 0; std::pair operator()() const { std::pair value{get_t(), get_u()}; if (ti == 0) { ti = ui + 1; ui = 0; } else { --ti; ++ui; } return value; } RealT get_t() const { while (ti >= ts.size()) ts.push_back(tseq()); return ts[ti]; } RealU get_u() const { while (ui >= us.size()) us.push_back(useq()); return us[ui]; } }; template struct AlmostSequential { mutable Sequential current; auto operator()() const -> decltype(current()) { while (absl::Uniform(GlobalBitGen(), 0.0, 1.0) <= percent_skip / 100.) current(); return current(); } }; struct Uniform { template T operator()(T) const { return absl::Uniform(absl::IntervalClosed, GlobalBitGen(), T{0}, ~T{0}); } }; struct Gaussian { template T operator()(T) const { double d; do { d = absl::Gaussian(GlobalBitGen(), 1e6, 1e4); } while (d <= 0 || d > std::numeric_limits::max() / 2); return static_cast(d); } }; struct Zipf { template T operator()(T) const { return absl::Zipf(GlobalBitGen(), std::numeric_limits::max(), 1.6); } }; template struct Random { T operator()() const { return Dist{}(T{}); } }; template struct Random*, Dist> { Ptr* operator()() const { return MakePtr(Random{}() * Align); } }; template struct Random { IntIdentity operator()() const { return IntIdentity{Random{}()}; } }; template struct Random, Dist> { PtrIdentity operator()() const { return PtrIdentity{Random{}() * Align}; } }; template struct Random, Dist> { std::string operator()() const { return String::Make(Random{}()); } }; template struct Random, Dist> { auto operator()() const -> decltype(std::make_pair(Random{}(), Random{}())) { return std::make_pair(Random{}(), Random{}()); } }; template std::string Name(); std::string Name(uint32_t*) { return "u32"; } std::string Name(uint64_t*) { return "u64"; } std::string Name(IntIdentity*) { return "IntIdentity"; } template std::string Name(Ptr**) { return absl::StrCat("Ptr", Align); } template std::string Name(PtrIdentity*) { return absl::StrCat("PtrIdentity", Align); } template std::string Name(String*) { return small ? "StrS" : "StrL"; } template std::string Name(std::pair*) { if (output() == OutputStyle::kBenchmark) return absl::StrCat("P_", Name(), "_", Name()); return absl::StrCat("P<", Name(), ",", Name(), ">"); } template std::string Name(Sequential*) { return "Sequential"; } template std::string Name(AlmostSequential*) { return absl::StrCat("AlmostSeq_", P); } template std::string Name(Random*) { return "UnifRand"; } template std::string Name(Random*) { return "GausRand"; } template std::string Name(Random*) { return "ZipfRand"; } template std::string Name() { return Name(static_cast(nullptr)); } constexpr int kNameWidth = 15; constexpr int kDistWidth = 16; bool CanRunBenchmark(absl::string_view name) { static std::regex* const filter = []() -> std::regex* { return benchmarks.empty() || benchmarks == "all" ? nullptr : new std::regex(std::string(benchmarks)); }(); return filter == nullptr || std::regex_search(std::string(name), *filter); } struct Result { std::string name; std::string dist_name; Ratios ratios; }; template void RunForTypeAndDistribution(std::vector& results) { std::string name = absl::StrCat(Name(), "/", Name()); // We have to check against all three names (min/avg/max) before we run it. // If any of them is enabled, we run it. if (!CanRunBenchmark(absl::StrCat(name, "/min")) && !CanRunBenchmark(absl::StrCat(name, "/avg")) && !CanRunBenchmark(absl::StrCat(name, "/max"))) { return; } results.push_back({Name(), Name(), CollectMeanProbeLengths()}); } template void RunForType(std::vector& results) { RunForTypeAndDistribution>(results); RunForTypeAndDistribution>(results); RunForTypeAndDistribution>(results); RunForTypeAndDistribution>(results); #ifdef NDEBUG // Disable these in non-opt mode because they take too long. RunForTypeAndDistribution>(results); RunForTypeAndDistribution>(results); #endif // NDEBUG } } // namespace int main(int argc, char** argv) { // Parse the benchmark flags. Ignore all of them except the regex pattern. for (int i = 1; i < argc; ++i) { absl::string_view arg = argv[i]; const auto next = [&] { return argv[std::min(i + 1, argc - 1)]; }; if (absl::ConsumePrefix(&arg, "--benchmark_filter")) { if (arg == "") { // --benchmark_filter X benchmarks = next(); } else if (absl::ConsumePrefix(&arg, "=")) { // --benchmark_filter=X benchmarks = arg; } } // Any --benchmark flag turns on the mode. if (absl::ConsumePrefix(&arg, "--benchmark")) { if (benchmarks.empty()) benchmarks="all"; } } std::vector results; RunForType(results); RunForType(results); RunForType*>(results); RunForType*>(results); RunForType*>(results); RunForType*>(results); RunForType>(results); RunForType>(results); RunForType>(results); RunForType>(results); RunForType>(results); RunForType>(results); RunForType>(results); RunForType>>(results); RunForType, uint64_t>>(results); RunForType>>(results); RunForType, uint64_t>>(results); switch (output()) { case OutputStyle::kRegular: absl::PrintF("%-*s%-*s Min Avg Max\n%s\n", kNameWidth, "Type", kDistWidth, "Distribution", std::string(kNameWidth + kDistWidth + 10 * 3, '-')); for (const auto& result : results) { absl::PrintF("%-*s%-*s %8.4f %8.4f %8.4f\n", kNameWidth, result.name, kDistWidth, result.dist_name, result.ratios.min_load, result.ratios.avg_load, result.ratios.max_load); } break; case OutputStyle::kBenchmark: { absl::PrintF("{\n"); absl::PrintF(" \"benchmarks\": [\n"); absl::string_view comma; for (const auto& result : results) { auto print = [&](absl::string_view stat, double Ratios::*val) { std::string name = absl::StrCat(result.name, "/", result.dist_name, "/", stat); // Check the regex again. We might had have enabled only one of the // stats for the benchmark. if (!CanRunBenchmark(name)) return; absl::PrintF(" %s{\n", comma); absl::PrintF(" \"cpu_time\": %f,\n", 1e9 * result.ratios.*val); absl::PrintF(" \"real_time\": %f,\n", 1e9 * result.ratios.*val); absl::PrintF(" \"iterations\": 1,\n"); absl::PrintF(" \"name\": \"%s\",\n", name); absl::PrintF(" \"time_unit\": \"ns\"\n"); absl::PrintF(" }\n"); comma = ","; }; print("min", &Ratios::min_load); print("avg", &Ratios::avg_load); print("max", &Ratios::max_load); } absl::PrintF(" ],\n"); absl::PrintF(" \"context\": {\n"); absl::PrintF(" }\n"); absl::PrintF("}\n"); break; } } return 0; } abseil-20220623.1/absl/container/internal/raw_hash_set_test.cc000066400000000000000000002050261430371345100241140ustar00rootroot00000000000000// Copyright 2018 The Abseil Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "absl/container/internal/raw_hash_set.h" #include #include #include #include #include #include #include #include #include #include #include #include "gmock/gmock.h" #include "gtest/gtest.h" #include "absl/base/attributes.h" #include "absl/base/config.h" #include "absl/base/internal/cycleclock.h" #include "absl/base/internal/prefetch.h" #include "absl/base/internal/raw_logging.h" #include "absl/container/internal/container_memory.h" #include "absl/container/internal/hash_function_defaults.h" #include "absl/container/internal/hash_policy_testing.h" #include "absl/container/internal/hashtable_debug.h" #include "absl/strings/string_view.h" namespace absl { ABSL_NAMESPACE_BEGIN namespace container_internal { struct RawHashSetTestOnlyAccess { template static auto GetSlots(const C& c) -> decltype(c.slots_) { return c.slots_; } }; namespace { using ::testing::ElementsAre; using ::testing::Eq; using ::testing::Ge; using ::testing::Lt; using ::testing::Pair; using ::testing::UnorderedElementsAre; // Convenience function to static cast to ctrl_t. ctrl_t CtrlT(int i) { return static_cast(i); } TEST(Util, NormalizeCapacity) { EXPECT_EQ(1, NormalizeCapacity(0)); EXPECT_EQ(1, NormalizeCapacity(1)); EXPECT_EQ(3, NormalizeCapacity(2)); EXPECT_EQ(3, NormalizeCapacity(3)); EXPECT_EQ(7, NormalizeCapacity(4)); EXPECT_EQ(7, NormalizeCapacity(7)); EXPECT_EQ(15, NormalizeCapacity(8)); EXPECT_EQ(15, NormalizeCapacity(15)); EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1)); EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2)); } TEST(Util, GrowthAndCapacity) { // Verify that GrowthToCapacity gives the minimum capacity that has enough // growth. for (size_t growth = 0; growth < 10000; ++growth) { SCOPED_TRACE(growth); size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth)); // The capacity is large enough for `growth`. EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth)); // For (capacity+1) < kWidth, growth should equal capacity. if (capacity + 1 < Group::kWidth) { EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity)); } else { EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity)); } if (growth != 0 && capacity > 1) { // There is no smaller capacity that works. EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth)); } } for (size_t capacity = Group::kWidth - 1; capacity < 10000; capacity = 2 * capacity + 1) { SCOPED_TRACE(capacity); size_t growth = CapacityToGrowth(capacity); EXPECT_THAT(growth, Lt(capacity)); EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity); EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity); } } TEST(Util, probe_seq) { probe_seq<16> seq(0, 127); auto gen = [&]() { size_t res = seq.offset(); seq.next(); return res; }; std::vector offsets(8); std::generate_n(offsets.begin(), 8, gen); EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); seq = probe_seq<16>(128, 127); std::generate_n(offsets.begin(), 8, gen); EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64)); } TEST(BitMask, Smoke) { EXPECT_FALSE((BitMask(0))); EXPECT_TRUE((BitMask(5))); EXPECT_THAT((BitMask(0)), ElementsAre()); EXPECT_THAT((BitMask(0x1)), ElementsAre(0)); EXPECT_THAT((BitMask(0x2)), ElementsAre(1)); EXPECT_THAT((BitMask(0x3)), ElementsAre(0, 1)); EXPECT_THAT((BitMask(0x4)), ElementsAre(2)); EXPECT_THAT((BitMask(0x5)), ElementsAre(0, 2)); EXPECT_THAT((BitMask(0x55)), ElementsAre(0, 2, 4, 6)); EXPECT_THAT((BitMask(0xAA)), ElementsAre(1, 3, 5, 7)); } TEST(BitMask, WithShift) { // See the non-SSE version of Group for details on what this math is for. uint64_t ctrl = 0x1716151413121110; uint64_t hash = 0x12; constexpr uint64_t msbs = 0x8080808080808080ULL; constexpr uint64_t lsbs = 0x0101010101010101ULL; auto x = ctrl ^ (lsbs * hash); uint64_t mask = (x - lsbs) & ~x & msbs; EXPECT_EQ(0x0000000080800000, mask); BitMask b(mask); EXPECT_EQ(*b, 2); } TEST(BitMask, LeadingTrailing) { EXPECT_EQ((BitMask(0x00001a40).LeadingZeros()), 3); EXPECT_EQ((BitMask(0x00001a40).TrailingZeros()), 6); EXPECT_EQ((BitMask(0x00000001).LeadingZeros()), 15); EXPECT_EQ((BitMask(0x00000001).TrailingZeros()), 0); EXPECT_EQ((BitMask(0x00008000).LeadingZeros()), 0); EXPECT_EQ((BitMask(0x00008000).TrailingZeros()), 15); EXPECT_EQ((BitMask(0x0000008080808000).LeadingZeros()), 3); EXPECT_EQ((BitMask(0x0000008080808000).TrailingZeros()), 1); EXPECT_EQ((BitMask(0x0000000000000080).LeadingZeros()), 7); EXPECT_EQ((BitMask(0x0000000000000080).TrailingZeros()), 0); EXPECT_EQ((BitMask(0x8000000000000000).LeadingZeros()), 0); EXPECT_EQ((BitMask(0x8000000000000000).TrailingZeros()), 7); } TEST(Group, EmptyGroup) { for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h)); } TEST(Group, Match) { if (Group::kWidth == 16) { ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; EXPECT_THAT(Group{group}.Match(0), ElementsAre()); EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15)); EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10)); EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9)); EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8)); } else if (Group::kWidth == 8) { ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), ctrl_t::kDeleted, CtrlT(2), CtrlT(1), ctrl_t::kSentinel, CtrlT(1)}; EXPECT_THAT(Group{group}.Match(0), ElementsAre()); EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7)); EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4)); } else { FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; } } TEST(Group, MaskEmpty) { if (Group::kWidth == 16) { ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3), ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 4); } else if (Group::kWidth == 8) { ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), ctrl_t::kDeleted, CtrlT(2), CtrlT(1), ctrl_t::kSentinel, CtrlT(1)}; EXPECT_THAT(Group{group}.MaskEmpty().LowestBitSet(), 0); EXPECT_THAT(Group{group}.MaskEmpty().HighestBitSet(), 0); } else { FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; } } TEST(Group, MaskEmptyOrDeleted) { if (Group::kWidth == 16) { ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3), ctrl_t::kDeleted, CtrlT(5), ctrl_t::kSentinel, CtrlT(7), CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)}; EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 4); } else if (Group::kWidth == 8) { ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2), ctrl_t::kDeleted, CtrlT(2), CtrlT(1), ctrl_t::kSentinel, CtrlT(1)}; EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().LowestBitSet(), 0); EXPECT_THAT(Group{group}.MaskEmptyOrDeleted().HighestBitSet(), 3); } else { FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth; } } TEST(Batch, DropDeletes) { constexpr size_t kCapacity = 63; constexpr size_t kGroupWidth = container_internal::Group::kWidth; std::vector ctrl(kCapacity + 1 + kGroupWidth); ctrl[kCapacity] = ctrl_t::kSentinel; std::vector pattern = { ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2), ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted}; for (size_t i = 0; i != kCapacity; ++i) { ctrl[i] = pattern[i % pattern.size()]; if (i < kGroupWidth - 1) ctrl[i + kCapacity + 1] = pattern[i % pattern.size()]; } ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity); ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel); for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) { ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()]; if (i == kCapacity) expected = ctrl_t::kSentinel; if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty; if (IsFull(expected)) expected = ctrl_t::kDeleted; EXPECT_EQ(ctrl[i], expected) << i << " " << static_cast(pattern[i % pattern.size()]); } } TEST(Group, CountLeadingEmptyOrDeleted) { const std::vector empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted}; const std::vector full_examples = { CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3), CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel}; for (ctrl_t empty : empty_examples) { std::vector e(Group::kWidth, empty); EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted()); for (ctrl_t full : full_examples) { for (size_t i = 0; i != Group::kWidth; ++i) { std::vector f(Group::kWidth, empty); f[i] = full; EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted()); } std::vector f(Group::kWidth, empty); f[Group::kWidth * 2 / 3] = full; f[Group::kWidth / 2] = full; EXPECT_EQ( Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted()); } } } template struct ValuePolicy { using slot_type = T; using key_type = T; using init_type = T; template static void construct(Allocator* alloc, slot_type* slot, Args&&... args) { absl::allocator_traits::construct(*alloc, slot, std::forward(args)...); } template static void destroy(Allocator* alloc, slot_type* slot) { absl::allocator_traits::destroy(*alloc, slot); } template static void transfer(Allocator* alloc, slot_type* new_slot, slot_type* old_slot) { construct(alloc, new_slot, std::move(*old_slot)); destroy(alloc, old_slot); } static T& element(slot_type* slot) { return *slot; } template static decltype(absl::container_internal::DecomposeValue( std::declval(), std::declval()...)) apply(F&& f, Args&&... args) { return absl::container_internal::DecomposeValue( std::forward(f), std::forward(args)...); } }; using IntPolicy = ValuePolicy; using Uint8Policy = ValuePolicy; class StringPolicy { template ::value>::type> decltype(std::declval()( std::declval(), std::piecewise_construct, std::declval>(), std::declval())) static apply_impl(F&& f, std::pair, V> p) { const absl::string_view& key = std::get<0>(p.first); return std::forward(f)(key, std::piecewise_construct, std::move(p.first), std::move(p.second)); } public: struct slot_type { struct ctor {}; template slot_type(ctor, Ts&&... ts) : pair(std::forward(ts)...) {} std::pair pair; }; using key_type = std::string; using init_type = std::pair; template static void construct(allocator_type* alloc, slot_type* slot, Args... args) { std::allocator_traits::construct( *alloc, slot, typename slot_type::ctor(), std::forward(args)...); } template static void destroy(allocator_type* alloc, slot_type* slot) { std::allocator_traits::destroy(*alloc, slot); } template static void transfer(allocator_type* alloc, slot_type* new_slot, slot_type* old_slot) { construct(alloc, new_slot, std::move(old_slot->pair)); destroy(alloc, old_slot); } static std::pair& element(slot_type* slot) { return slot->pair; } template static auto apply(F&& f, Args&&... args) -> decltype(apply_impl(std::forward(f), PairArgs(std::forward(args)...))) { return apply_impl(std::forward(f), PairArgs(std::forward(args)...)); } }; struct StringHash : absl::Hash { using is_transparent = void; }; struct StringEq : std::equal_to { using is_transparent = void; }; struct StringTable : raw_hash_set> { using Base = typename StringTable::raw_hash_set; StringTable() {} using Base::Base; }; struct IntTable : raw_hash_set, std::equal_to, std::allocator> { using Base = typename IntTable::raw_hash_set; using Base::Base; }; struct Uint8Table : raw_hash_set, std::equal_to, std::allocator> { using Base = typename Uint8Table::raw_hash_set; using Base::Base; }; template struct CustomAlloc : std::allocator { CustomAlloc() {} template CustomAlloc(const CustomAlloc& other) {} template struct rebind { using other = CustomAlloc; }; }; struct CustomAllocIntTable : raw_hash_set, std::equal_to, CustomAlloc> { using Base = typename CustomAllocIntTable::raw_hash_set; using Base::Base; }; struct BadFastHash { template size_t operator()(const T&) const { return 0; } }; struct BadTable : raw_hash_set, std::allocator> { using Base = typename BadTable::raw_hash_set; BadTable() {} using Base::Base; }; TEST(Table, EmptyFunctorOptimization) { static_assert(std::is_empty>::value, ""); static_assert(std::is_empty>::value, ""); struct MockTable { void* ctrl; void* slots; size_t size; size_t capacity; size_t growth_left; void* infoz; }; struct MockTableInfozDisabled { void* ctrl; void* slots; size_t size; size_t capacity; size_t growth_left; }; struct StatelessHash { size_t operator()(absl::string_view) const { return 0; } }; struct StatefulHash : StatelessHash { size_t dummy; }; if (std::is_empty::value) { EXPECT_EQ(sizeof(MockTableInfozDisabled), sizeof(raw_hash_set, std::allocator>)); EXPECT_EQ(sizeof(MockTableInfozDisabled) + sizeof(StatefulHash), sizeof(raw_hash_set, std::allocator>)); } else { EXPECT_EQ(sizeof(MockTable), sizeof(raw_hash_set, std::allocator>)); EXPECT_EQ(sizeof(MockTable) + sizeof(StatefulHash), sizeof(raw_hash_set, std::allocator>)); } } TEST(Table, Empty) { IntTable t; EXPECT_EQ(0, t.size()); EXPECT_TRUE(t.empty()); } TEST(Table, LookupEmpty) { IntTable t; auto it = t.find(0); EXPECT_TRUE(it == t.end()); } TEST(Table, Insert1) { IntTable t; EXPECT_TRUE(t.find(0) == t.end()); auto res = t.emplace(0); EXPECT_TRUE(res.second); EXPECT_THAT(*res.first, 0); EXPECT_EQ(1, t.size()); EXPECT_THAT(*t.find(0), 0); } TEST(Table, Insert2) { IntTable t; EXPECT_TRUE(t.find(0) == t.end()); auto res = t.emplace(0); EXPECT_TRUE(res.second); EXPECT_THAT(*res.first, 0); EXPECT_EQ(1, t.size()); EXPECT_TRUE(t.find(1) == t.end()); res = t.emplace(1); EXPECT_TRUE(res.second); EXPECT_THAT(*res.first, 1); EXPECT_EQ(2, t.size()); EXPECT_THAT(*t.find(0), 0); EXPECT_THAT(*t.find(1), 1); } TEST(Table, InsertCollision) { BadTable t; EXPECT_TRUE(t.find(1) == t.end()); auto res = t.emplace(1); EXPECT_TRUE(res.second); EXPECT_THAT(*res.first, 1); EXPECT_EQ(1, t.size()); EXPECT_TRUE(t.find(2) == t.end()); res = t.emplace(2); EXPECT_THAT(*res.first, 2); EXPECT_TRUE(res.second); EXPECT_EQ(2, t.size()); EXPECT_THAT(*t.find(1), 1); EXPECT_THAT(*t.find(2), 2); } // Test that we do not add existent element in case we need to search through // many groups with deleted elements TEST(Table, InsertCollisionAndFindAfterDelete) { BadTable t; // all elements go to the same group. // Have at least 2 groups with Group::kWidth collisions // plus some extra collisions in the last group. constexpr size_t kNumInserts = Group::kWidth * 2 + 5; for (size_t i = 0; i < kNumInserts; ++i) { auto res = t.emplace(i); EXPECT_TRUE(res.second); EXPECT_THAT(*res.first, i); EXPECT_EQ(i + 1, t.size()); } // Remove elements one by one and check // that we still can find all other elements. for (size_t i = 0; i < kNumInserts; ++i) { EXPECT_EQ(1, t.erase(i)) << i; for (size_t j = i + 1; j < kNumInserts; ++j) { EXPECT_THAT(*t.find(j), j); auto res = t.emplace(j); EXPECT_FALSE(res.second) << i << " " << j; EXPECT_THAT(*res.first, j); EXPECT_EQ(kNumInserts - i - 1, t.size()); } } EXPECT_TRUE(t.empty()); } TEST(Table, InsertWithinCapacity) { IntTable t; t.reserve(10); const size_t original_capacity = t.capacity(); const auto addr = [&](int i) { return reinterpret_cast(&*t.find(i)); }; // Inserting an element does not change capacity. t.insert(0); EXPECT_THAT(t.capacity(), original_capacity); const uintptr_t original_addr_0 = addr(0); // Inserting another element does not rehash. t.insert(1); EXPECT_THAT(t.capacity(), original_capacity); EXPECT_THAT(addr(0), original_addr_0); // Inserting lots of duplicate elements does not rehash. for (int i = 0; i < 100; ++i) { t.insert(i % 10); } EXPECT_THAT(t.capacity(), original_capacity); EXPECT_THAT(addr(0), original_addr_0); // Inserting a range of duplicate elements does not rehash. std::vector dup_range; for (int i = 0; i < 100; ++i) { dup_range.push_back(i % 10); } t.insert(dup_range.begin(), dup_range.end()); EXPECT_THAT(t.capacity(), original_capacity); EXPECT_THAT(addr(0), original_addr_0); } TEST(Table, LazyEmplace) { StringTable t; bool called = false; auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { called = true; f("abc", "ABC"); }); EXPECT_TRUE(called); EXPECT_THAT(*it, Pair("abc", "ABC")); called = false; it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) { called = true; f("abc", "DEF"); }); EXPECT_FALSE(called); EXPECT_THAT(*it, Pair("abc", "ABC")); } TEST(Table, ContainsEmpty) { IntTable t; EXPECT_FALSE(t.contains(0)); } TEST(Table, Contains1) { IntTable t; EXPECT_TRUE(t.insert(0).second); EXPECT_TRUE(t.contains(0)); EXPECT_FALSE(t.contains(1)); EXPECT_EQ(1, t.erase(0)); EXPECT_FALSE(t.contains(0)); } TEST(Table, Contains2) { IntTable t; EXPECT_TRUE(t.insert(0).second); EXPECT_TRUE(t.contains(0)); EXPECT_FALSE(t.contains(1)); t.clear(); EXPECT_FALSE(t.contains(0)); } int decompose_constructed; int decompose_copy_constructed; int decompose_copy_assigned; int decompose_move_constructed; int decompose_move_assigned; struct DecomposeType { DecomposeType(int i = 0) : i(i) { // NOLINT ++decompose_constructed; } explicit DecomposeType(const char* d) : DecomposeType(*d) {} DecomposeType(const DecomposeType& other) : i(other.i) { ++decompose_copy_constructed; } DecomposeType& operator=(const DecomposeType& other) { ++decompose_copy_assigned; i = other.i; return *this; } DecomposeType(DecomposeType&& other) : i(other.i) { ++decompose_move_constructed; } DecomposeType& operator=(DecomposeType&& other) { ++decompose_move_assigned; i = other.i; return *this; } int i; }; struct DecomposeHash { using is_transparent = void; size_t operator()(const DecomposeType& a) const { return a.i; } size_t operator()(int a) const { return a; } size_t operator()(const char* a) const { return *a; } }; struct DecomposeEq { using is_transparent = void; bool operator()(const DecomposeType& a, const DecomposeType& b) const { return a.i == b.i; } bool operator()(const DecomposeType& a, int b) const { return a.i == b; } bool operator()(const DecomposeType& a, const char* b) const { return a.i == *b; } }; struct DecomposePolicy { using slot_type = DecomposeType; using key_type = DecomposeType; using init_type = DecomposeType; template static void construct(void*, DecomposeType* slot, T&& v) { ::new (slot) DecomposeType(std::forward(v)); } static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); } static DecomposeType& element(slot_type* slot) { return *slot; } template static auto apply(F&& f, const T& x) -> decltype(std::forward(f)(x, x)) { return std::forward(f)(x, x); } }; template void TestDecompose(bool construct_three) { DecomposeType elem{0}; const int one = 1; const char* three_p = "3"; const auto& three = three_p; const int elem_vector_count = 256; std::vector elem_vector(elem_vector_count, DecomposeType{0}); std::iota(elem_vector.begin(), elem_vector.end(), 0); using DecomposeSet = raw_hash_set>; DecomposeSet set1; decompose_constructed = 0; int expected_constructed = 0; EXPECT_EQ(expected_constructed, decompose_constructed); set1.insert(elem); EXPECT_EQ(expected_constructed, decompose_constructed); set1.insert(1); EXPECT_EQ(++expected_constructed, decompose_constructed); set1.emplace("3"); EXPECT_EQ(++expected_constructed, decompose_constructed); EXPECT_EQ(expected_constructed, decompose_constructed); { // insert(T&&) set1.insert(1); EXPECT_EQ(expected_constructed, decompose_constructed); } { // insert(const T&) set1.insert(one); EXPECT_EQ(expected_constructed, decompose_constructed); } { // insert(hint, T&&) set1.insert(set1.begin(), 1); EXPECT_EQ(expected_constructed, decompose_constructed); } { // insert(hint, const T&) set1.insert(set1.begin(), one); EXPECT_EQ(expected_constructed, decompose_constructed); } { // emplace(...) set1.emplace(1); EXPECT_EQ(expected_constructed, decompose_constructed); set1.emplace("3"); expected_constructed += construct_three; EXPECT_EQ(expected_constructed, decompose_constructed); set1.emplace(one); EXPECT_EQ(expected_constructed, decompose_constructed); set1.emplace(three); expected_constructed += construct_three; EXPECT_EQ(expected_constructed, decompose_constructed); } { // emplace_hint(...) set1.emplace_hint(set1.begin(), 1); EXPECT_EQ(expected_constructed, decompose_constructed); set1.emplace_hint(set1.begin(), "3"); expected_constructed += construct_three; EXPECT_EQ(expected_constructed, decompose_constructed); set1.emplace_hint(set1.begin(), one); EXPECT_EQ(expected_constructed, decompose_constructed); set1.emplace_hint(set1.begin(), three); expected_constructed += construct_three; EXPECT_EQ(expected_constructed, decompose_constructed); } decompose_copy_constructed = 0; decompose_copy_assigned = 0; decompose_move_constructed = 0; decompose_move_assigned = 0; int expected_copy_constructed = 0; int expected_move_constructed = 0; { // raw_hash_set(first, last) with random-access iterators DecomposeSet set2(elem_vector.begin(), elem_vector.end()); // Expect exactly one copy-constructor call for each element if no // rehashing is done. expected_copy_constructed += elem_vector_count; EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); EXPECT_EQ(expected_move_constructed, decompose_move_constructed); EXPECT_EQ(0, decompose_move_assigned); EXPECT_EQ(0, decompose_copy_assigned); } { // raw_hash_set(first, last) with forward iterators std::list elem_list(elem_vector.begin(), elem_vector.end()); expected_copy_constructed = decompose_copy_constructed; DecomposeSet set2(elem_list.begin(), elem_list.end()); // Expect exactly N elements copied into set, expect at most 2*N elements // moving internally for all resizing needed (for a growth factor of 2). expected_copy_constructed += elem_vector_count; EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); expected_move_constructed += elem_vector_count; EXPECT_LT(expected_move_constructed, decompose_move_constructed); expected_move_constructed += elem_vector_count; EXPECT_GE(expected_move_constructed, decompose_move_constructed); EXPECT_EQ(0, decompose_move_assigned); EXPECT_EQ(0, decompose_copy_assigned); expected_copy_constructed = decompose_copy_constructed; expected_move_constructed = decompose_move_constructed; } { // insert(first, last) DecomposeSet set2; set2.insert(elem_vector.begin(), elem_vector.end()); // Expect exactly N elements copied into set, expect at most 2*N elements // moving internally for all resizing needed (for a growth factor of 2). const int expected_new_elements = elem_vector_count; const int expected_max_element_moves = 2 * elem_vector_count; expected_copy_constructed += expected_new_elements; EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed); expected_move_constructed += expected_max_element_moves; EXPECT_GE(expected_move_constructed, decompose_move_constructed); EXPECT_EQ(0, decompose_move_assigned); EXPECT_EQ(0, decompose_copy_assigned); expected_copy_constructed = decompose_copy_constructed; expected_move_constructed = decompose_move_constructed; } } TEST(Table, Decompose) { TestDecompose(false); struct TransparentHashIntOverload { size_t operator()(const DecomposeType& a) const { return a.i; } size_t operator()(int a) const { return a; } }; struct TransparentEqIntOverload { bool operator()(const DecomposeType& a, const DecomposeType& b) const { return a.i == b.i; } bool operator()(const DecomposeType& a, int b) const { return a.i == b; } }; TestDecompose(true); TestDecompose(true); TestDecompose(true); } // Returns the largest m such that a table with m elements has the same number // of buckets as a table with n elements. size_t MaxDensitySize(size_t n) { IntTable t; t.reserve(n); for (size_t i = 0; i != n; ++i) t.emplace(i); const size_t c = t.bucket_count(); while (c == t.bucket_count()) t.emplace(n++); return t.size() - 1; } struct Modulo1000Hash { size_t operator()(int x) const { return x % 1000; } }; struct Modulo1000HashTable : public raw_hash_set, std::allocator> {}; // Test that rehash with no resize happen in case of many deleted slots. TEST(Table, RehashWithNoResize) { Modulo1000HashTable t; // Adding the same length (and the same hash) strings // to have at least kMinFullGroups groups // with Group::kWidth collisions. Then fill up to MaxDensitySize; const size_t kMinFullGroups = 7; std::vector keys; for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) { int k = i * 1000; t.emplace(k); keys.push_back(k); } const size_t capacity = t.capacity(); // Remove elements from all groups except the first and the last one. // All elements removed from full groups will be marked as ctrl_t::kDeleted. const size_t erase_begin = Group::kWidth / 2; const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth; for (size_t i = erase_begin; i < erase_end; ++i) { EXPECT_EQ(1, t.erase(keys[i])) << i; } keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end); auto last_key = keys.back(); size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key); // Make sure that we have to make a lot of probes for last key. ASSERT_GT(last_key_num_probes, kMinFullGroups); int x = 1; // Insert and erase one element, before inplace rehash happen. while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) { t.emplace(x); ASSERT_EQ(capacity, t.capacity()); // All elements should be there. ASSERT_TRUE(t.find(x) != t.end()) << x; for (const auto& k : keys) { ASSERT_TRUE(t.find(k) != t.end()) << k; } t.erase(x); ++x; } } TEST(Table, InsertEraseStressTest) { IntTable t; const size_t kMinElementCount = 250; std::deque keys; size_t i = 0; for (; i < MaxDensitySize(kMinElementCount); ++i) { t.emplace(i); keys.push_back(i); } const size_t kNumIterations = 1000000; for (; i < kNumIterations; ++i) { ASSERT_EQ(1, t.erase(keys.front())); keys.pop_front(); t.emplace(i); keys.push_back(i); } } TEST(Table, InsertOverloads) { StringTable t; // These should all trigger the insert(init_type) overload. t.insert({{}, {}}); t.insert({"ABC", {}}); t.insert({"DEF", "!!!"}); EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""), Pair("DEF", "!!!"))); } TEST(Table, LargeTable) { IntTable t; for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40); for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40)); } // Timeout if copy is quadratic as it was in Rust. TEST(Table, EnsureNonQuadraticAsInRust) { static const size_t kLargeSize = 1 << 15; IntTable t; for (size_t i = 0; i != kLargeSize; ++i) { t.insert(i); } // If this is quadratic, the test will timeout. IntTable t2; for (const auto& entry : t) t2.insert(entry); } TEST(Table, ClearBug) { IntTable t; constexpr size_t capacity = container_internal::Group::kWidth - 1; constexpr size_t max_size = capacity / 2 + 1; for (size_t i = 0; i < max_size; ++i) { t.insert(i); } ASSERT_EQ(capacity, t.capacity()); intptr_t original = reinterpret_cast(&*t.find(2)); t.clear(); ASSERT_EQ(capacity, t.capacity()); for (size_t i = 0; i < max_size; ++i) { t.insert(i); } ASSERT_EQ(capacity, t.capacity()); intptr_t second = reinterpret_cast(&*t.find(2)); // We are checking that original and second are close enough to each other // that they are probably still in the same group. This is not strictly // guaranteed. EXPECT_LT(std::abs(original - second), capacity * sizeof(IntTable::value_type)); } TEST(Table, Erase) { IntTable t; EXPECT_TRUE(t.find(0) == t.end()); auto res = t.emplace(0); EXPECT_TRUE(res.second); EXPECT_EQ(1, t.size()); t.erase(res.first); EXPECT_EQ(0, t.size()); EXPECT_TRUE(t.find(0) == t.end()); } TEST(Table, EraseMaintainsValidIterator) { IntTable t; const int kNumElements = 100; for (int i = 0; i < kNumElements; i ++) { EXPECT_TRUE(t.emplace(i).second); } EXPECT_EQ(t.size(), kNumElements); int num_erase_calls = 0; auto it = t.begin(); while (it != t.end()) { t.erase(it++); num_erase_calls++; } EXPECT_TRUE(t.empty()); EXPECT_EQ(num_erase_calls, kNumElements); } // Collect N bad keys by following algorithm: // 1. Create an empty table and reserve it to 2 * N. // 2. Insert N random elements. // 3. Take first Group::kWidth - 1 to bad_keys array. // 4. Clear the table without resize. // 5. Go to point 2 while N keys not collected std::vector CollectBadMergeKeys(size_t N) { static constexpr int kGroupSize = Group::kWidth - 1; auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector { for (size_t i = b; i != e; ++i) { t->emplace(i); } std::vector res; res.reserve(kGroupSize); auto it = t->begin(); for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) { res.push_back(*it); } return res; }; std::vector bad_keys; bad_keys.reserve(N); IntTable t; t.reserve(N * 2); for (size_t b = 0; bad_keys.size() < N; b += N) { auto keys = topk_range(b, b + N, &t); bad_keys.insert(bad_keys.end(), keys.begin(), keys.end()); t.erase(t.begin(), t.end()); EXPECT_TRUE(t.empty()); } return bad_keys; } struct ProbeStats { // Number of elements with specific probe length over all tested tables. std::vector all_probes_histogram; // Ratios total_probe_length/size for every tested table. std::vector single_table_ratios; friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) { ProbeStats res = a; res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(), b.all_probes_histogram.size())); std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(), res.all_probes_histogram.begin(), res.all_probes_histogram.begin(), std::plus()); res.single_table_ratios.insert(res.single_table_ratios.end(), b.single_table_ratios.begin(), b.single_table_ratios.end()); return res; } // Average ratio total_probe_length/size over tables. double AvgRatio() const { return std::accumulate(single_table_ratios.begin(), single_table_ratios.end(), 0.0) / single_table_ratios.size(); } // Maximum ratio total_probe_length/size over tables. double MaxRatio() const { return *std::max_element(single_table_ratios.begin(), single_table_ratios.end()); } // Percentile ratio total_probe_length/size over tables. double PercentileRatio(double Percentile = 0.95) const { auto r = single_table_ratios; auto mid = r.begin() + static_cast(r.size() * Percentile); if (mid != r.end()) { std::nth_element(r.begin(), mid, r.end()); return *mid; } else { return MaxRatio(); } } // Maximum probe length over all elements and all tables. size_t MaxProbe() const { return all_probes_histogram.size(); } // Fraction of elements with specified probe length. std::vector ProbeNormalizedHistogram() const { double total_elements = std::accumulate(all_probes_histogram.begin(), all_probes_histogram.end(), 0ull); std::vector res; for (size_t p : all_probes_histogram) { res.push_back(p / total_elements); } return res; } size_t PercentileProbe(double Percentile = 0.99) const { size_t idx = 0; for (double p : ProbeNormalizedHistogram()) { if (Percentile > p) { Percentile -= p; ++idx; } else { return idx; } } return idx; } friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) { out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio() << ", PercentileRatio:" << s.PercentileRatio() << ", MaxProbe:" << s.MaxProbe() << ", Probes=["; for (double p : s.ProbeNormalizedHistogram()) { out << p << ","; } out << "]}"; return out; } }; struct ExpectedStats { double avg_ratio; double max_ratio; std::vector> pecentile_ratios; std::vector> pecentile_probes; friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) { out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio << ", PercentileRatios: ["; for (auto el : s.pecentile_ratios) { out << el.first << ":" << el.second << ", "; } out << "], PercentileProbes: ["; for (auto el : s.pecentile_probes) { out << el.first << ":" << el.second << ", "; } out << "]}"; return out; } }; void VerifyStats(size_t size, const ExpectedStats& exp, const ProbeStats& stats) { EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats; EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats; for (auto pr : exp.pecentile_ratios) { EXPECT_LE(stats.PercentileRatio(pr.first), pr.second) << size << " " << pr.first << " " << stats; } for (auto pr : exp.pecentile_probes) { EXPECT_LE(stats.PercentileProbe(pr.first), pr.second) << size << " " << pr.first << " " << stats; } } using ProbeStatsPerSize = std::map; // Collect total ProbeStats on num_iters iterations of the following algorithm: // 1. Create new table and reserve it to keys.size() * 2 // 2. Insert all keys xored with seed // 3. Collect ProbeStats from final table. ProbeStats CollectProbeStatsOnKeysXoredWithSeed( const std::vector& keys, size_t num_iters) { const size_t reserve_size = keys.size() * 2; ProbeStats stats; int64_t seed = 0x71b1a19b907d6e33; while (num_iters--) { seed = static_cast(static_cast(seed) * 17 + 13); IntTable t1; t1.reserve(reserve_size); for (const auto& key : keys) { t1.emplace(key ^ seed); } auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); stats.all_probes_histogram.resize( std::max(stats.all_probes_histogram.size(), probe_histogram.size())); std::transform(probe_histogram.begin(), probe_histogram.end(), stats.all_probes_histogram.begin(), stats.all_probes_histogram.begin(), std::plus()); size_t total_probe_seq_length = 0; for (size_t i = 0; i < probe_histogram.size(); ++i) { total_probe_seq_length += i * probe_histogram[i]; } stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / keys.size()); t1.erase(t1.begin(), t1.end()); } return stats; } ExpectedStats XorSeedExpectedStats() { constexpr bool kRandomizesInserts = #ifdef NDEBUG false; #else // NDEBUG true; #endif // NDEBUG // The effective load factor is larger in non-opt mode because we insert // elements out of order. switch (container_internal::Group::kWidth) { case 8: if (kRandomizesInserts) { return {0.05, 1.0, {{0.95, 0.5}}, {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; } else { return {0.05, 2.0, {{0.95, 0.1}}, {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}}; } case 16: if (kRandomizesInserts) { return {0.1, 2.0, {{0.95, 0.1}}, {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; } else { return {0.05, 1.0, {{0.95, 0.05}}, {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}}; } } ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); return {}; } // TODO(b/80415403): Figure out why this test is so flaky, esp. on MSVC TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) { ProbeStatsPerSize stats; std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; for (size_t size : sizes) { stats[size] = CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200); } auto expected = XorSeedExpectedStats(); for (size_t size : sizes) { auto& stat = stats[size]; VerifyStats(size, expected, stat); } } // Collect total ProbeStats on num_iters iterations of the following algorithm: // 1. Create new table // 2. Select 10% of keys and insert 10 elements key * 17 + j * 13 // 3. Collect ProbeStats from final table ProbeStats CollectProbeStatsOnLinearlyTransformedKeys( const std::vector& keys, size_t num_iters) { ProbeStats stats; std::random_device rd; std::mt19937 rng(rd()); auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; }; std::uniform_int_distribution dist(0, keys.size()-1); while (num_iters--) { IntTable t1; size_t num_keys = keys.size() / 10; size_t start = dist(rng); for (size_t i = 0; i != num_keys; ++i) { for (size_t j = 0; j != 10; ++j) { t1.emplace(linear_transform(keys[(i + start) % keys.size()], j)); } } auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1); stats.all_probes_histogram.resize( std::max(stats.all_probes_histogram.size(), probe_histogram.size())); std::transform(probe_histogram.begin(), probe_histogram.end(), stats.all_probes_histogram.begin(), stats.all_probes_histogram.begin(), std::plus()); size_t total_probe_seq_length = 0; for (size_t i = 0; i < probe_histogram.size(); ++i) { total_probe_seq_length += i * probe_histogram[i]; } stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 / t1.size()); t1.erase(t1.begin(), t1.end()); } return stats; } ExpectedStats LinearTransformExpectedStats() { constexpr bool kRandomizesInserts = #ifdef NDEBUG false; #else // NDEBUG true; #endif // NDEBUG // The effective load factor is larger in non-opt mode because we insert // elements out of order. switch (container_internal::Group::kWidth) { case 8: if (kRandomizesInserts) { return {0.1, 0.5, {{0.95, 0.3}}, {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}}; } else { return {0.4, 0.6, {{0.95, 0.5}}, {{0.95, 1}, {0.99, 14}, {0.999, 23}, {0.9999, 26}}}; } case 16: if (kRandomizesInserts) { return {0.1, 0.4, {{0.95, 0.3}}, {{0.95, 1}, {0.99, 2}, {0.999, 9}, {0.9999, 15}}}; } else { return {0.05, 0.2, {{0.95, 0.1}}, {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}}; } } ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width"); return {}; } // TODO(b/80415403): Figure out why this test is so flaky. TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) { ProbeStatsPerSize stats; std::vector sizes = {Group::kWidth << 5, Group::kWidth << 10}; for (size_t size : sizes) { stats[size] = CollectProbeStatsOnLinearlyTransformedKeys( CollectBadMergeKeys(size), 300); } auto expected = LinearTransformExpectedStats(); for (size_t size : sizes) { auto& stat = stats[size]; VerifyStats(size, expected, stat); } } TEST(Table, EraseCollision) { BadTable t; // 1 2 3 t.emplace(1); t.emplace(2); t.emplace(3); EXPECT_THAT(*t.find(1), 1); EXPECT_THAT(*t.find(2), 2); EXPECT_THAT(*t.find(3), 3); EXPECT_EQ(3, t.size()); // 1 DELETED 3 t.erase(t.find(2)); EXPECT_THAT(*t.find(1), 1); EXPECT_TRUE(t.find(2) == t.end()); EXPECT_THAT(*t.find(3), 3); EXPECT_EQ(2, t.size()); // DELETED DELETED 3 t.erase(t.find(1)); EXPECT_TRUE(t.find(1) == t.end()); EXPECT_TRUE(t.find(2) == t.end()); EXPECT_THAT(*t.find(3), 3); EXPECT_EQ(1, t.size()); // DELETED DELETED DELETED t.erase(t.find(3)); EXPECT_TRUE(t.find(1) == t.end()); EXPECT_TRUE(t.find(2) == t.end()); EXPECT_TRUE(t.find(3) == t.end()); EXPECT_EQ(0, t.size()); } TEST(Table, EraseInsertProbing) { BadTable t(100); // 1 2 3 4 t.emplace(1); t.emplace(2); t.emplace(3); t.emplace(4); // 1 DELETED 3 DELETED t.erase(t.find(2)); t.erase(t.find(4)); // 1 10 3 11 12 t.emplace(10); t.emplace(11); t.emplace(12); EXPECT_EQ(5, t.size()); EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12)); } TEST(Table, Clear) { IntTable t; EXPECT_TRUE(t.find(0) == t.end()); t.clear(); EXPECT_TRUE(t.find(0) == t.end()); auto res = t.emplace(0); EXPECT_TRUE(res.second); EXPECT_EQ(1, t.size()); t.clear(); EXPECT_EQ(0, t.size()); EXPECT_TRUE(t.find(0) == t.end()); } TEST(Table, Swap) { IntTable t; EXPECT_TRUE(t.find(0) == t.end()); auto res = t.emplace(0); EXPECT_TRUE(res.second); EXPECT_EQ(1, t.size()); IntTable u; t.swap(u); EXPECT_EQ(0, t.size()); EXPECT_EQ(1, u.size()); EXPECT_TRUE(t.find(0) == t.end()); EXPECT_THAT(*u.find(0), 0); } TEST(Table, Rehash) { IntTable t; EXPECT_TRUE(t.find(0) == t.end()); t.emplace(0); t.emplace(1); EXPECT_EQ(2, t.size()); t.rehash(128); EXPECT_EQ(2, t.size()); EXPECT_THAT(*t.find(0), 0); EXPECT_THAT(*t.find(1), 1); } TEST(Table, RehashDoesNotRehashWhenNotNecessary) { IntTable t; t.emplace(0); t.emplace(1); auto* p = &*t.find(0); t.rehash(1); EXPECT_EQ(p, &*t.find(0)); } TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) { IntTable t; t.rehash(0); EXPECT_EQ(0, t.bucket_count()); } TEST(Table, RehashZeroDeallocatesEmptyTable) { IntTable t; t.emplace(0); t.clear(); EXPECT_NE(0, t.bucket_count()); t.rehash(0); EXPECT_EQ(0, t.bucket_count()); } TEST(Table, RehashZeroForcesRehash) { IntTable t; t.emplace(0); t.emplace(1); auto* p = &*t.find(0); t.rehash(0); EXPECT_NE(p, &*t.find(0)); } TEST(Table, ConstructFromInitList) { using P = std::pair; struct Q { operator P() const { return {}; } }; StringTable t = {P(), Q(), {}, {{}, {}}}; } TEST(Table, CopyConstruct) { IntTable t; t.emplace(0); EXPECT_EQ(1, t.size()); { IntTable u(t); EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find(0), 0); } { IntTable u{t}; EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find(0), 0); } { IntTable u = t; EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find(0), 0); } } TEST(Table, CopyConstructWithAlloc) { StringTable t; t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u(t, Alloc>()); EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } struct ExplicitAllocIntTable : raw_hash_set, std::equal_to, Alloc> { ExplicitAllocIntTable() {} }; TEST(Table, AllocWithExplicitCtor) { ExplicitAllocIntTable t; EXPECT_EQ(0, t.size()); } TEST(Table, MoveConstruct) { { StringTable t; t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u(std::move(t)); EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } { StringTable t; t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u{std::move(t)}; EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } { StringTable t; t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u = std::move(t); EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } } TEST(Table, MoveConstructWithAlloc) { StringTable t; t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u(std::move(t), Alloc>()); EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } TEST(Table, CopyAssign) { StringTable t; t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u; u = t; EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } TEST(Table, CopySelfAssign) { StringTable t; t.emplace("a", "b"); EXPECT_EQ(1, t.size()); t = *&t; EXPECT_EQ(1, t.size()); EXPECT_THAT(*t.find("a"), Pair("a", "b")); } TEST(Table, MoveAssign) { StringTable t; t.emplace("a", "b"); EXPECT_EQ(1, t.size()); StringTable u; u = std::move(t); EXPECT_EQ(1, u.size()); EXPECT_THAT(*u.find("a"), Pair("a", "b")); } TEST(Table, Equality) { StringTable t; std::vector> v = {{"a", "b"}, {"aa", "bb"}}; t.insert(std::begin(v), std::end(v)); StringTable u = t; EXPECT_EQ(u, t); } TEST(Table, Equality2) { StringTable t; std::vector> v1 = {{"a", "b"}, {"aa", "bb"}}; t.insert(std::begin(v1), std::end(v1)); StringTable u; std::vector> v2 = {{"a", "a"}, {"aa", "aa"}}; u.insert(std::begin(v2), std::end(v2)); EXPECT_NE(u, t); } TEST(Table, Equality3) { StringTable t; std::vector> v1 = {{"b", "b"}, {"bb", "bb"}}; t.insert(std::begin(v1), std::end(v1)); StringTable u; std::vector> v2 = {{"a", "a"}, {"aa", "aa"}}; u.insert(std::begin(v2), std::end(v2)); EXPECT_NE(u, t); } TEST(Table, NumDeletedRegression) { IntTable t; t.emplace(0); t.erase(t.find(0)); // construct over a deleted slot. t.emplace(0); t.clear(); } TEST(Table, FindFullDeletedRegression) { IntTable t; for (int i = 0; i < 1000; ++i) { t.emplace(i); t.erase(t.find(i)); } EXPECT_EQ(0, t.size()); } TEST(Table, ReplacingDeletedSlotDoesNotRehash) { size_t n; { // Compute n such that n is the maximum number of elements before rehash. IntTable t; t.emplace(0); size_t c = t.bucket_count(); for (n = 1; c == t.bucket_count(); ++n) t.emplace(n); --n; } IntTable t; t.rehash(n); const size_t c = t.bucket_count(); for (size_t i = 0; i != n; ++i) t.emplace(i); EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; t.erase(0); t.emplace(0); EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n; } TEST(Table, NoThrowMoveConstruct) { ASSERT_TRUE( std::is_nothrow_copy_constructible>::value); ASSERT_TRUE(std::is_nothrow_copy_constructible< std::equal_to>::value); ASSERT_TRUE(std::is_nothrow_copy_constructible>::value); EXPECT_TRUE(std::is_nothrow_move_constructible::value); } TEST(Table, NoThrowMoveAssign) { ASSERT_TRUE( std::is_nothrow_move_assignable>::value); ASSERT_TRUE( std::is_nothrow_move_assignable>::value); ASSERT_TRUE(std::is_nothrow_move_assignable>::value); ASSERT_TRUE( absl::allocator_traits>::is_always_equal::value); EXPECT_TRUE(std::is_nothrow_move_assignable::value); } TEST(Table, NoThrowSwappable) { ASSERT_TRUE( container_internal::IsNoThrowSwappable>()); ASSERT_TRUE(container_internal::IsNoThrowSwappable< std::equal_to>()); ASSERT_TRUE(container_internal::IsNoThrowSwappable>()); EXPECT_TRUE(container_internal::IsNoThrowSwappable()); } TEST(Table, HeterogeneousLookup) { struct Hash { size_t operator()(int64_t i) const { return i; } size_t operator()(double i) const { ADD_FAILURE(); return i; } }; struct Eq { bool operator()(int64_t a, int64_t b) const { return a == b; } bool operator()(double a, int64_t b) const { ADD_FAILURE(); return a == b; } bool operator()(int64_t a, double b) const { ADD_FAILURE(); return a == b; } bool operator()(double a, double b) const { ADD_FAILURE(); return a == b; } }; struct THash { using is_transparent = void; size_t operator()(int64_t i) const { return i; } size_t operator()(double i) const { return i; } }; struct TEq { using is_transparent = void; bool operator()(int64_t a, int64_t b) const { return a == b; } bool operator()(double a, int64_t b) const { return a == b; } bool operator()(int64_t a, double b) const { return a == b; } bool operator()(double a, double b) const { return a == b; } }; raw_hash_set> s{0, 1, 2}; // It will convert to int64_t before the query. EXPECT_EQ(1, *s.find(double{1.1})); raw_hash_set> ts{0, 1, 2}; // It will try to use the double, and fail to find the object. EXPECT_TRUE(ts.find(1.1) == ts.end()); } template using CallFind = decltype(std::declval().find(17)); template using CallErase = decltype(std::declval().erase(17)); template using CallExtract = decltype(std::declval().extract(17)); template using CallPrefetch = decltype(std::declval().prefetch(17)); template using CallCount = decltype(std::declval().count(17)); template