pax_global_header00006660000000000000000000000064150151043200014501gustar00rootroot0000000000000052 comment=8df8212e53577e1d8477a5c901457cd61d88afc7 seastar-25.05.0/000077500000000000000000000000001501510432000133145ustar00rootroot00000000000000seastar-25.05.0/.dockerignore000066400000000000000000000000131501510432000157620ustar00rootroot00000000000000.git build seastar-25.05.0/.gitattributes000066400000000000000000000000341501510432000162040ustar00rootroot00000000000000*.cc diff=cpp *.hh diff=cpp seastar-25.05.0/.github/000077500000000000000000000000001501510432000146545ustar00rootroot00000000000000seastar-25.05.0/.github/workflows/000077500000000000000000000000001501510432000167115ustar00rootroot00000000000000seastar-25.05.0/.github/workflows/alpinelinux.yaml000066400000000000000000000031551501510432000221310ustar00rootroot00000000000000name: Alpine Linux on: pull_request: workflow_dispatch: # Allows manual triggering concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} jobs: build-and-test: runs-on: ubuntu-latest container: image: alpine:latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Install build dependencies run: | apk update apk add --no-cache \ boost-dev \ bsd-compat-headers \ c-ares-dev \ cmake \ crypto++-dev \ gcc \ g++ \ fmt-dev \ gnutls-dev \ hwloc-dev \ libpciaccess-dev \ libucontext-dev \ libunwind-dev \ liburing-dev \ lksctp-tools-dev \ lz4-dev \ numactl-dev \ openssl \ openssl-dev \ protobuf-dev \ py3-yaml \ ragel \ samurai \ util-linux-dev \ valgrind-dev \ xfsprogs-dev \ yaml-cpp-dev - name: Configure build run: | cmake -B build -G Ninja \ -DCMAKE_BUILD_TYPE=RelWithDebInfo \ -DSeastar_DOCS=OFF - name: Build Seastar run: | cmake --build build - name: Run unit tests run: | ctest --test-dir build --output-on-failure -j2 seastar-25.05.0/.github/workflows/docker.yaml000066400000000000000000000015041501510432000210440ustar00rootroot00000000000000name: Verify Dockerfile Build on: pull_request: paths: - 'docker/dev/Dockerfile' - 'install-dependencies.sh' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} jobs: build: timeout-minutes: 20 runs-on: ubuntu-24.04 steps: - name: Checkout repository uses: actions/checkout@v4 with: sparse-checkout: | docker/dev/Dockerfile install-dependencies.sh - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Build Docker image uses: docker/build-push-action@v6 with: context: . file: docker/dev/Dockerfile push: false cache-from: type=gha cache-to: type=gha,mode=max seastar-25.05.0/.github/workflows/python-lint.yaml000066400000000000000000000006441501510432000220660ustar00rootroot00000000000000name: Python format on: [push, pull_request] jobs: python-format: name: Enforce python format runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: psf/black@24.8.0 with: version: "24.8.0" src: ./scripts # override options so that we can specify only specific files for now options: "--check --diff --include=.*addr2line.*" seastar-25.05.0/.github/workflows/test.yaml000066400000000000000000000062211501510432000205550ustar00rootroot00000000000000name: Test permissions: contents: read on: workflow_call: inputs: compiler: description: 'the C++ compiler to use' type: string required: true standard: description: 'the C++ standard to use' type: number required: true mode: description: 'build mode (debug, dev or release)' type: string required: true enables: description: 'the --enable-* option passed to configure.py' type: string default: '' required: false enable-ccache: description: 'build with ccache enabled' type: boolean default: true required: false options: description: 'additional options passed to configure.py' type: string default: '' required: false jobs: test: timeout-minutes: 40 runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 with: submodules: "${{ contains(inputs.enables, 'dpdk') }}" - run: | sudo apt-get update - name: Install build dependencies run: | sudo ./install-dependencies.sh - name: Install clang++ if: ${{ inputs.compiler == 'clang++' }} run: | sudo apt-get -y install clang - name: Install clang-scan-deps if: ${{ contains(inputs.enables, 'cxx-modules') }} run: | sudo apt-get -y install clang-tools - name: Install ccache if: ${{ inputs.enable-ccache }} run: | sudo apt-get -y install ccache - name: Setup ccache if: ${{ inputs.enable-ccache }} uses: hendrikmuhs/ccache-action@v1 with: key: ${{ inputs.compiler }}-${{ inputs.standard }}-${{ inputs.mode }}-${{ inputs.enables }} - name: Configure run: | if [ ${{ inputs.compiler }} = "clang++" ]; then CC=clang else CC=gcc fi if ${{ inputs.enable-ccache }}; then MAYBE_CCACHE_OPT="--ccache" fi ./configure.py \ --c++-standard ${{ inputs.standard }} \ --compiler ${{ inputs.compiler }} \ --c-compiler $CC \ --mode ${{ inputs.mode }} \ $MAYBE_CCACHE_OPT \ ${{ inputs.options }} \ ${{ inputs.enables }} - name: Build run: cmake --build build/${{inputs.mode}} - name: Check Header if: ${{ inputs.mode == 'dev' && inputs.compiler == 'clang++' }} run: cmake --build build/${{ inputs.mode }} --target checkheaders - name: Check Include Style if: ${{ inputs.mode == 'dev' && inputs.compiler == 'clang++' }} run: cmake --build build/${{ inputs.mode }} --target check-include-style - name: Build with C++20 modules if: ${{ contains(inputs.enables, 'cxx-modules') }} run: cmake --build build/${{ inputs.mode }} --target hello_cxx_module - name: Test if: ${{ ! contains(inputs.enables, 'cxx-modules') }} run: ./test.py --mode=${{ inputs.mode }} seastar-25.05.0/.github/workflows/tests.yaml000066400000000000000000000023161501510432000207410ustar00rootroot00000000000000name: Test permissions: contents: read on: [push, pull_request] concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }} jobs: regular_test: name: "Test (${{ matrix.compiler }}, C++${{ matrix.standard}}, ${{ matrix.mode }})" uses: ./.github/workflows/test.yaml strategy: fail-fast: false matrix: compiler: [clang++, g++] standard: [20, 23] mode: [dev, debug, release] with: compiler: ${{ matrix.compiler }} standard: ${{ matrix.standard }} mode: ${{ matrix.mode }} enables: ${{ matrix.enables }} options: ${{ matrix.options }} build_with_dpdk: name: "Test with DPDK enabled" uses: ./.github/workflows/test.yaml strategy: fail-fast: false with: compiler: clang++ standard: 23 mode: release enables: --enable-dpdk options: --cook dpdk build_with_cxx_modules: name: "Test with C++20 modules enabled" uses: ./.github/workflows/test.yaml strategy: fail-fast: false with: compiler: clang++ standard: 23 mode: debug enables: --enable-cxx-modules enable-ccache: false seastar-25.05.0/.gitignore000066400000000000000000000002431501510432000153030ustar00rootroot00000000000000.cooking_memory .cproject .project .settings build* build.ninja cscope.* __pycache__/ cmake/Cooking.cmake tags .idea/ .vscode/ compile_commands.json .clangd .cacheseastar-25.05.0/.gitmodules000066400000000000000000000000571501510432000154730ustar00rootroot00000000000000[submodule "dpdk"] path = dpdk url = ../dpdk seastar-25.05.0/.gitorderfile000066400000000000000000000000261501510432000157720ustar00rootroot00000000000000*.py *.hh *.rl *.cc * seastar-25.05.0/.mailmap000066400000000000000000000004571501510432000147430ustar00rootroot00000000000000Avi Kivity Avi Kivity' via seastar-dev Raphael S. Carvalho Raphael S. Carvalho' via seastar-dev Pavel Emelyanov Pavel Emelyanov' via seastar-dev seastar-25.05.0/CMakeLists.txt000066400000000000000000001206201501510432000160550ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # cmake_minimum_required (VERSION 3.13) list (APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake ${CMAKE_CURRENT_BINARY_DIR}) cmake_policy (SET CMP0090 NEW) foreach (policy CMP0127 CMP0135 CMP0167) if (POLICY ${policy}) cmake_policy (SET ${policy} NEW) endif () endforeach () include (Cooking OPTIONAL) # This variable impacts the way DPDK is configured by cmake-cooking (if DPDK is enabled), so its definition needs to # come before PROJECT. set (Seastar_DPDK_MACHINE "native" CACHE STRING "Configure DPDK for this processor architecture (if `Seastar_DPDK` is enabled). It configures -march or -mcpu") project (Seastar VERSION 1.0 LANGUAGES CXX) set (Seastar_ALLOC_FAILURE_INJECTION "DEFAULT" CACHE STRING "Enable failure injection into the Seastar allocator. Can be ON, OFF or DEFAULT (which enables it for Dev mode)") option (Seastar_TASK_BACKTRACE "Collect backtrace at deferring points." OFF) option (Seastar_DEBUG_ALLOCATIONS "For now just writes 0xab to newly allocated memory" OFF) option (Seastar_SSTRING "Use seastar's own string implementation" ON) option (Seastar_DEPRECATED_OSTREAM_FORMATTERS "Enable operator<< for formatting standard library containers, which will be deprecated in future" ON) set (Seastar_API_LEVEL "7" CACHE STRING "Seastar compatibility API level (7=unified CPU/IO scheduling groups") set_property (CACHE Seastar_API_LEVEL PROPERTY STRINGS 7) set (Seastar_SCHEDULING_GROUPS_COUNT "16" CACHE STRING "A positive number to set Seastar's reactor number of allowed different scheduling groups.") if (NOT Seastar_SCHEDULING_GROUPS_COUNT MATCHES "^[1-9][0-9]*") message(FATAL_ERROR "Seastar_SCHEDULING_GROUPS_COUNT must be a positive number (${Seastar_SCHEDULING_GROUPS_COUNT})") endif () # # Add a dev build type. # # All pre-defined build modes include optimizations or debug info, # which make them slow to build. The dev build mode is intended for # fast build/test iteration. # if (CMAKE_CXX_COMPILER_ID MATCHES Clang) set (CMAKE_CXX_FLAGS_DEV_OPT_LEVEL "-O2") else () set (CMAKE_CXX_FLAGS_DEV_OPT_LEVEL "-O1") endif () set (CMAKE_CXX_FLAGS_DEV "${CMAKE_CXX_FLAGS_DEV_OPT_LEVEL}" CACHE STRING "Flags used by the C++ compiler during dev builds." FORCE) set (CMAKE_C_FLAGS_DEV "-O1" CACHE STRING "Flags used by the C compiler during dev builds." FORCE) set (CMAKE_EXE_LINKER_FLAGS_DEV "" CACHE STRING "Flags used for linking binaries during dev builds." FORCE) set (CMAKE_SHARED_LINKER_FLAGS_DEV "" CACHE STRING "Flags used by the shared libraries linker during builds." FORCE) mark_as_advanced ( CMAKE_CXX_FLAGS_DEV CMAKE_C_FLAGS_DEV CMAKE_EXE_LINKER_FLAGS_DEV CMAKE_SHARED_LINKER_FLAGS_DEV) set (CMAKE_CXX_FLAGS_SANITIZE "-Os -g" CACHE STRING "Flags used by the C++ compiler during sanitize builds." FORCE) set (CMAKE_CXX_STANDARD "23" CACHE STRING "C++ standard to build with.") include (CMakeDependentOption) cmake_dependent_option (Seastar_MODULE "Build a C++20 module instead of a traditional library" OFF "CMAKE_VERSION VERSION_GREATER_EQUAL 3.26;CMAKE_CXX_STANDARD GREATER_EQUAL 20" OFF) set (CMAKE_C_FLAGS_SANITIZE "-Os -g" CACHE STRING "Flags used by the C compiler during sanitize builds." FORCE) set (CMAKE_EXE_LINKER_FLAGS_SANITIZE "" CACHE STRING "Flags used for linking binaries during sanitize builds." FORCE) set (CMAKE_SHARED_LINKER_FLAGS_SANITIZE "" CACHE STRING "Flags used by the shared libraries linker during sanitize builds." FORCE) mark_as_advanced ( CMAKE_CXX_FLAGS_SANITIZE CMAKE_C_FLAGS_SANITIZE CMAKE_EXE_LINKER_FLAGS_SANITIZE CMAKE_SHARED_LINKER_FLAGS_SANITIZE) set (CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Dev Sanitize." FORCE) if (NOT CMAKE_BUILD_TYPE) set (CMAKE_BUILD_TYPE "Release") endif () set (Seastar_ALLOC_PAGE_SIZE "" CACHE STRING "Override the Seastar allocator page size, in bytes.") function (set_option_if_package_is_found option_name package_name) # if the package is found, set the option on behalf of user unless it is # explicitly specified, if (DEFINED ${option_name}) return () endif () if (${package_name}_FOUND) set (${option_name} ON CACHE BOOL "") endif () endfunction () # When Seastar is a top-level project, enable the non-library targets by default. # If it is embedded with `add_subdirectory`, disable them. if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) set (Seastar_MASTER_PROJECT ON) else () set (Seastar_MASTER_PROJECT OFF) endif () option (Seastar_APPS "Enable application targets." ${Seastar_MASTER_PROJECT}) set (Seastar_CXX_FLAGS "" CACHE STRING "Semicolon-separated list of extra compilation flags for Seastar itself.") option (Seastar_DEMOS "Enable demonstration targets." ${Seastar_MASTER_PROJECT}) option (Seastar_DOCS "Enable documentation targets." ${Seastar_MASTER_PROJECT}) option (Seastar_DPDK "Enable DPDK support." OFF) option (Seastar_EXCLUDE_APPS_FROM_ALL "When enabled alongside Seastar_APPS, do not build applications by default." OFF) option (Seastar_EXCLUDE_DEMOS_FROM_ALL "When enabled alongside Seastar_DEMOS, do not build demonstrations by default." OFF) option (Seastar_EXCLUDE_TESTS_FROM_ALL "When enabled alongside Seastar_TESTING, do not build tests by default." OFF) option (Seastar_EXECUTE_ONLY_FAST_TESTS "Only execute tests which run quickly." OFF) option (Seastar_HWLOC "Enable hwloc support." ON) if (DEFINED Seastar_IO_URING) option (Seastar_IO_URING "Enable io_uring support." ON) endif () set (Seastar_JENKINS "" CACHE STRING "If non-empty, the prefix for XML files containing the results of running tests (for Jenkins).") set (Seastar_LD_FLAGS "" CACHE STRING "Semicolon-separated list of extra linking flags for Seastar itself.") option (Seastar_INSTALL "Install targets." ${Seastar_MASTER_PROJECT}) option (Seastar_TESTING "Enable testing targets." ${Seastar_MASTER_PROJECT}) include (CMakeDependentOption) cmake_dependent_option (Seastar_ENABLE_TESTS_ACCESSING_INTERNET "Enable tests accessing internet." ON "Seastar_TESTING" OFF) option (Seastar_COMPRESS_DEBUG "Compress debug info." ON) option (Seastar_SPLIT_DWARF "Use split dwarf." OFF) option (Seastar_HEAP_PROFILING "Enable heap profiling. No effect when Seastar is compiled with the default allocator." OFF) option (Seastar_DEFERRED_ACTION_REQUIRE_NOEXCEPT "Enable noexcept requirement for deferred actions." ON) set (Seastar_TEST_TIMEOUT "300" CACHE STRING "Maximum allowed time for a test to run, in seconds.") option (BUILD_SHARED_LIBS "Build seastar library as shared libraries instead of static" OFF) # We set the following environment variables # * ASAN_OPTIONS=disable_coredump=0:abort_on_error=1:detect_stack_use_after_return=1:verify_asan_link_order=0 # By default ASan disables core dumps because they used to be # huge. This is no longer the case since the shadow memory is # excluded, so it is safe to enable them. # * UBSAN_OPTIONS=halt_on_error=1:abort_on_error=1 # Fail the test if any undefined behavior is found and use abort # instead of exit. Using abort is what causes core dumps to be # produced. # * BOOST_TEST_CATCH_SYSTEM_ERRORS=no # Normally the boost test library handles SIGABRT and prevents core # dumps from being produced. # This works great with clang and gcc 10.2, but unfortunately not any # previous gcc. set (Seastar_ASAN_OPTIONS "disable_coredump=0:abort_on_error=1") if ((NOT (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")) OR (CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 10.2)) string (APPEND Seastar_ASAN_OPTIONS ":detect_stack_use_after_return=1") endif () set (Seastar_TEST_ENVIRONMENT "ASAN_OPTIONS=${Seastar_ASAN_OPTIONS};UBSAN_OPTIONS=halt_on_error=1:abort_on_error=1;BOOST_TEST_CATCH_SYSTEM_ERRORS=no" CACHE STRING "Environment variables for running tests") option (Seastar_UNUSED_RESULT_ERROR "Make [[nodiscard]] violations an error (instead of a warning)." OFF) set (Seastar_STACK_GUARDS "DEFAULT" CACHE STRING "Enable stack guards. Can be ON, OFF or DEFAULT (which enables it for non release builds)") set (Seastar_SANITIZE "DEFAULT" CACHE STRING "Enable ASAN and UBSAN. Can be ON, OFF or DEFAULT (which enables it for Debug and Sanitize)") set (Seastar_DEBUG_SHARED_PTR "DEFAULT" CACHE STRING "Enable shared_ptr debugging. Can be ON, OFF or DEFAULT (which enables it for Debug and Sanitize)") # # Useful (non-cache) variables. # set (Seastar_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set (Seastar_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) set (Seastar_GEN_BINARY_DIR ${Seastar_BINARY_DIR}/gen) # # Dependencies. # include (SeastarDependencies) seastar_find_dependencies () # Private build dependencies not visible to consumers find_package (ragel 6.10 REQUIRED) find_package (Threads REQUIRED) find_package (PthreadSetName REQUIRED) find_package (Valgrind REQUIRED) cmake_dependent_option (Seastar_LOGGER_COMPILE_TIME_FMT "Enable the compile-time {fmt} check when formatting logging messages" ON "fmt_VERSION VERSION_GREATER_EQUAL 8.0.0" OFF) # # Code generation helpers. # function (seastar_generate_protobuf) set (one_value_args TARGET VAR IN_FILE OUT_DIR) cmake_parse_arguments (args "" "${one_value_args}" "" ${ARGN}) get_filename_component (in_file_name ${args_IN_FILE} NAME_WE) get_filename_component (in_file_dir ${args_IN_FILE} DIRECTORY) set (header_out ${args_OUT_DIR}/${in_file_name}.pb.h) set (source_out ${args_OUT_DIR}/${in_file_name}.pb.cc) add_custom_command ( DEPENDS ${args_IN_FILE} protobuf::protoc OUTPUT ${header_out} ${source_out} COMMAND ${CMAKE_COMMAND} -E make_directory ${args_OUT_DIR} COMMAND protobuf::protoc ARGS --cpp_out=${args_OUT_DIR} -I${in_file_dir} ${args_IN_FILE}) add_custom_target (${args_TARGET} DEPENDS ${header_out} ${source_out}) set (${args_VAR} ${header_out} ${source_out} PARENT_SCOPE) endfunction () function (seastar_generate_ragel) set (one_value_args TARGET VAR IN_FILE OUT_FILE) cmake_parse_arguments (args "" "${one_value_args}" "" ${ARGN}) get_filename_component (out_dir ${args_OUT_FILE} DIRECTORY) add_custom_command ( DEPENDS ${args_IN_FILE} OUTPUT ${args_OUT_FILE} COMMAND ${CMAKE_COMMAND} -E make_directory ${out_dir} COMMAND ${ragel_RAGEL_EXECUTABLE} -G2 -o ${args_OUT_FILE} ${args_IN_FILE} COMMAND sed -i -e "'1h;2,$$H;$$!d;g'" -re "'s/static const char _nfa[^;]*;//g'" ${args_OUT_FILE}) add_custom_target (${args_TARGET} DEPENDS ${args_OUT_FILE}) set (${args_VAR} ${args_OUT_FILE} PARENT_SCOPE) endfunction () function (seastar_generate_swagger) set (one_value_args TARGET VAR IN_FILE OUT_DIR) cmake_parse_arguments (args "" "${one_value_args}" "" ${ARGN}) get_filename_component (in_file_name ${args_IN_FILE} NAME) set (generator ${Seastar_SOURCE_DIR}/scripts/seastar-json2code.py) set (header_out ${args_OUT_DIR}/${in_file_name}.hh) set (source_out ${args_OUT_DIR}/${in_file_name}.cc) add_custom_command ( DEPENDS ${args_IN_FILE} ${generator} OUTPUT ${header_out} ${source_out} COMMAND ${CMAKE_COMMAND} -E make_directory ${args_OUT_DIR} COMMAND ${generator} --create-cc -f ${args_IN_FILE} -o ${header_out}) add_custom_target (${args_TARGET} DEPENDS ${header_out} ${source_out}) set (${args_VAR} ${header_out} ${source_out} PARENT_SCOPE) endfunction () # # The `seastar` library. # seastar_generate_ragel ( TARGET seastar_http_chunk_parsers VAR http_chunk_parsers_file IN_FILE ${CMAKE_CURRENT_SOURCE_DIR}/src/http/chunk_parsers.rl OUT_FILE ${Seastar_GEN_BINARY_DIR}/include/seastar/http/chunk_parsers.hh) seastar_generate_ragel ( TARGET seastar_http_request_parser VAR http_request_parser_file IN_FILE ${CMAKE_CURRENT_SOURCE_DIR}/src/http/request_parser.rl OUT_FILE ${Seastar_GEN_BINARY_DIR}/include/seastar/http/request_parser.hh) seastar_generate_ragel ( TARGET seastar_http_response_parser VAR http_response_parser_file IN_FILE ${CMAKE_CURRENT_SOURCE_DIR}/src/http/response_parser.rl OUT_FILE ${Seastar_GEN_BINARY_DIR}/include/seastar/http/response_parser.hh) seastar_generate_protobuf ( TARGET seastar_proto_metrics2 VAR proto_metrics2_files IN_FILE ${CMAKE_CURRENT_SOURCE_DIR}/src/proto/metrics2.proto OUT_DIR ${Seastar_GEN_BINARY_DIR}/src/proto) add_library (seastar ${http_chunk_parsers_file} ${http_request_parser_file} ${proto_metrics2_files} ${seastar_dpdk_obj} include/seastar/core/abort_source.hh include/seastar/core/alien.hh include/seastar/core/align.hh include/seastar/core/aligned_buffer.hh include/seastar/core/app-template.hh include/seastar/core/array_map.hh include/seastar/core/bitops.hh include/seastar/core/bitset-iter.hh include/seastar/core/byteorder.hh include/seastar/core/cacheline.hh include/seastar/core/checked_ptr.hh include/seastar/core/chunked_fifo.hh include/seastar/core/circular_buffer.hh include/seastar/core/circular_buffer_fixed_capacity.hh include/seastar/core/condition-variable.hh include/seastar/core/deleter.hh include/seastar/core/distributed.hh include/seastar/core/do_with.hh include/seastar/core/dpdk_rte.hh include/seastar/core/enum.hh include/seastar/core/exception_hacks.hh include/seastar/core/execution_stage.hh include/seastar/core/expiring_fifo.hh include/seastar/core/fair_queue.hh include/seastar/core/file.hh include/seastar/core/file-types.hh include/seastar/core/fsqual.hh include/seastar/core/fstream.hh include/seastar/core/function_traits.hh include/seastar/core/future-util.hh include/seastar/core/future.hh include/seastar/core/gate.hh include/seastar/core/iostream-impl.hh include/seastar/core/iostream.hh include/seastar/util/later.hh include/seastar/core/layered_file.hh include/seastar/core/linux-aio.hh include/seastar/core/loop.hh include/seastar/core/lowres_clock.hh include/seastar/core/manual_clock.hh include/seastar/core/map_reduce.hh include/seastar/core/memory.hh include/seastar/core/metrics.hh include/seastar/core/metrics_api.hh include/seastar/core/metrics_registration.hh include/seastar/core/metrics_types.hh include/seastar/core/pipe.hh include/seastar/core/posix.hh include/seastar/core/preempt.hh include/seastar/core/prefetch.hh include/seastar/core/print.hh include/seastar/core/prometheus.hh include/seastar/core/queue.hh include/seastar/core/ragel.hh include/seastar/core/reactor.hh include/seastar/core/report_exception.hh include/seastar/core/resource.hh include/seastar/core/rwlock.hh include/seastar/core/scattered_message.hh include/seastar/core/scheduling.hh include/seastar/core/scollectd.hh include/seastar/core/scollectd_api.hh include/seastar/core/seastar.hh include/seastar/core/semaphore.hh include/seastar/core/shard_id.hh include/seastar/core/sharded.hh include/seastar/core/shared_future.hh include/seastar/core/shared_mutex.hh include/seastar/core/shared_ptr.hh include/seastar/core/shared_ptr_debug_helper.hh include/seastar/core/shared_ptr_incomplete.hh include/seastar/core/simple-stream.hh include/seastar/core/signal.hh include/seastar/core/slab.hh include/seastar/core/sleep.hh include/seastar/core/sstring.hh include/seastar/core/stall_sampler.hh include/seastar/core/stream.hh include/seastar/core/systemwide_memory_barrier.hh include/seastar/core/task.hh include/seastar/core/temporary_buffer.hh include/seastar/core/thread.hh include/seastar/core/thread_cputime_clock.hh include/seastar/core/thread_impl.hh include/seastar/core/timed_out_error.hh include/seastar/core/timer-set.hh include/seastar/core/timer.hh include/seastar/core/transfer.hh include/seastar/core/unaligned.hh include/seastar/core/units.hh include/seastar/core/vector-data-sink.hh include/seastar/core/weak_ptr.hh include/seastar/core/when_all.hh include/seastar/core/with_scheduling_group.hh include/seastar/core/with_timeout.hh include/seastar/http/api_docs.hh include/seastar/http/common.hh include/seastar/http/exception.hh include/seastar/http/file_handler.hh include/seastar/http/function_handlers.hh include/seastar/http/handlers.hh include/seastar/http/httpd.hh include/seastar/http/json_path.hh include/seastar/http/matcher.hh include/seastar/http/matchrules.hh include/seastar/http/mime_types.hh include/seastar/http/reply.hh include/seastar/http/request.hh include/seastar/http/routes.hh include/seastar/http/short_streams.hh include/seastar/http/transformers.hh include/seastar/http/client.hh include/seastar/json/formatter.hh include/seastar/json/json_elements.hh include/seastar/net/api.hh include/seastar/net/arp.hh include/seastar/net/byteorder.hh include/seastar/net/config.hh include/seastar/net/const.hh include/seastar/net/dhcp.hh include/seastar/net/dns.hh include/seastar/net/dpdk.hh include/seastar/net/ethernet.hh include/seastar/net/inet_address.hh include/seastar/net/ip.hh include/seastar/net/ip_checksum.hh include/seastar/net/native-stack.hh include/seastar/net/net.hh include/seastar/net/packet-data-source.hh include/seastar/net/packet-util.hh include/seastar/net/packet.hh include/seastar/net/posix-stack.hh include/seastar/net/proxy.hh include/seastar/net/socket_defs.hh include/seastar/net/stack.hh include/seastar/net/tcp-stack.hh include/seastar/net/tcp.hh include/seastar/net/tls.hh include/seastar/net/toeplitz.hh include/seastar/net/udp.hh include/seastar/net/unix_address.hh include/seastar/net/virtio-interface.hh include/seastar/net/virtio.hh include/seastar/rpc/lz4_compressor.hh include/seastar/rpc/lz4_fragmented_compressor.hh include/seastar/rpc/multi_algo_compressor_factory.hh include/seastar/rpc/rpc.hh include/seastar/rpc/rpc_impl.hh include/seastar/rpc/rpc_types.hh include/seastar/util/alloc_failure_injector.hh include/seastar/util/backtrace.hh include/seastar/util/concepts.hh include/seastar/util/bool_class.hh include/seastar/util/conversions.hh include/seastar/util/defer.hh include/seastar/util/eclipse.hh include/seastar/util/function_input_iterator.hh include/seastar/util/indirect.hh include/seastar/util/is_smart_ptr.hh include/seastar/util/lazy.hh include/seastar/util/log-cli.hh include/seastar/util/log-impl.hh include/seastar/util/log.hh include/seastar/util/noncopyable_function.hh include/seastar/util/optimized_optional.hh include/seastar/util/print_safe.hh include/seastar/util/process.hh include/seastar/util/program-options.hh include/seastar/util/read_first_line.hh include/seastar/util/reference_wrapper.hh include/seastar/util/spinlock.hh include/seastar/util/std-compat.hh include/seastar/util/transform_iterator.hh include/seastar/util/tuple_utils.hh include/seastar/util/variant_utils.hh include/seastar/util/closeable.hh include/seastar/util/source_location-compat.hh include/seastar/util/short_streams.hh include/seastar/websocket/common.hh include/seastar/websocket/server.hh src/core/alien.cc src/core/file.cc src/core/fair_queue.cc src/core/reactor_backend.cc src/core/thread_pool.cc src/core/app-template.cc src/core/dpdk_rte.cc src/core/exception_hacks.cc src/core/execution_stage.cc src/core/file-impl.hh src/core/fsnotify.cc src/core/fsqual.cc src/core/fstream.cc src/core/future.cc src/core/future-util.cc src/core/linux-aio.cc src/core/memory.cc src/core/metrics.cc src/core/on_internal_error.cc src/core/posix.cc src/core/prometheus.cc src/core/program_options.cc src/core/reactor.cc src/core/resource.cc src/core/sharded.cc src/core/scollectd.cc src/core/scollectd-impl.hh src/core/signal.cc src/core/systemwide_memory_barrier.cc src/core/smp.cc src/core/sstring.cc src/core/thread.cc src/core/uname.cc src/core/vla.hh src/core/io_queue.cc src/core/semaphore.cc src/core/condition-variable.cc src/http/api_docs.cc src/http/common.cc src/http/file_handler.cc src/http/httpd.cc src/http/json_path.cc src/http/matcher.cc src/http/mime_types.cc src/http/reply.cc src/http/routes.cc src/http/transformers.cc src/http/url.cc src/http/client.cc src/http/request.cc src/json/formatter.cc src/json/json_elements.cc src/net/arp.cc src/net/config.cc src/net/dhcp.cc src/net/dns.cc src/net/dpdk.cc src/net/ethernet.cc src/net/inet_address.cc src/net/ip.cc src/net/ip_checksum.cc src/net/native-stack-impl.hh src/net/native-stack.cc src/net/net.cc src/net/packet.cc src/net/posix-stack.cc src/net/proxy.cc src/net/socket_address.cc src/net/stack.cc src/net/tcp.cc src/net/tls.cc src/net/udp.cc src/net/unix_address.cc src/net/virtio.cc src/rpc/lz4_compressor.cc src/rpc/lz4_fragmented_compressor.cc src/rpc/rpc.cc src/util/alloc_failure_injector.cc src/util/backtrace.cc src/util/conversions.cc src/util/exceptions.cc src/util/file.cc src/util/log.cc src/util/process.cc src/util/program-options.cc src/util/read_first_line.cc src/util/tmp_file.cc src/util/short_streams.cc src/websocket/parser.cc src/websocket/common.cc src/websocket/server.cc ) add_library (Seastar::seastar ALIAS seastar) add_dependencies (seastar seastar_http_chunk_parsers seastar_http_request_parser seastar_http_response_parser seastar_proto_metrics2) target_include_directories (seastar PUBLIC $ $ $ $ PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/src) set (Seastar_PRIVATE_CXX_FLAGS -fno-semantic-interposition -Wall -Werror -Wimplicit-fallthrough -Wdeprecated -Wno-error=deprecated) if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") include (CheckGcc107852) if (NOT Cxx_Compiler_BZ107852_Free) list (APPEND Seastar_PRIVATE_CXX_FLAGS -Wno-error=stringop-overflow -Wno-error=array-bounds) endif () list (APPEND Seastar_PRIVATE_CXX_FLAGS -Wdeprecated-declarations -Wno-error=deprecated-declarations) endif () if (CMAKE_CXX_STANDARD GREATER_EQUAL 23) include (CheckP2582R1) if (Cxx_Compiler_IMPLEMENTS_P2581R1) target_compile_definitions (seastar PUBLIC SEASTAR_P2581R1) endif () endif () if (BUILD_SHARED_LIBS) # use initial-exec TLS, as it puts the TLS variables in the static TLS space # instead of allocating them using malloc. otherwise intercepting mallocs and # friends could lead to recursive call of malloc functions when a dlopen'ed # shared object references a TLS variable and it in turn uses malloc. the # downside of this workaround is that the static TLS space is used, and it is # a global resource. list (APPEND Seastar_PRIVATE_CXX_FLAGS $<$,RelWithDebInfo;Dev>:-ftls-model=initial-exec>) else () list (APPEND Seastar_PRIVATE_CXX_FLAGS -fvisibility=hidden) endif () if (Seastar_COMPRESS_DEBUG) # -gz doesn't imply -g, so it is safe to add it regardless of debug # info being enabled. list (APPEND Seastar_PRIVATE_CXX_FLAGS -gz) endif () target_link_libraries (seastar PUBLIC Boost::boost Boost::program_options Boost::thread c-ares::cares fmt::fmt lz4::lz4 SourceLocation::source_location PRIVATE ${CMAKE_DL_LIBS} GnuTLS::gnutls StdAtomic::atomic lksctp-tools::lksctp-tools protobuf::libprotobuf rt::rt ucontext::ucontext yaml-cpp::yaml-cpp Threads::Threads) if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.26) target_link_libraries (seastar PRIVATE "$") else () target_link_libraries (seastar PRIVATE "$") endif () if (Seastar_DPDK) target_link_libraries (seastar PRIVATE DPDK::dpdk) endif () include (TriStateOption) tri_state_option (${Seastar_SANITIZE} DEFAULT_BUILD_TYPES "Debug" "Sanitize" CONDITION condition) if (condition) if (NOT Sanitizers_FOUND) message (FATAL_ERROR "Sanitizers not found!") endif () set (Seastar_Sanitizers_OPTIONS ${Sanitizers_COMPILE_OPTIONS}) target_link_libraries (seastar PUBLIC $<${condition}:Sanitizers::address> $<${condition}:Sanitizers::undefined_behavior>) endif () # We only need valgrind to find uninitialized memory uses, so disable # the leak sanitizer. # To test with valgrind run "ctest -T memcheck" set( MEMORYCHECK_COMMAND_OPTIONS "--error-exitcode=1 --leak-check=no --trace-children=yes" ) include (CTest) # # To disable -Werror, pass -Wno-error to Seastar_CXX_FLAGS. # target_compile_definitions(seastar PUBLIC SEASTAR_API_LEVEL=${Seastar_API_LEVEL} $<$:SEASTAR_BUILD_SHARED_LIBS>) target_compile_features(seastar PUBLIC cxx_std_${CMAKE_CXX_STANDARD}) include (CheckCXXCompilerFlag) check_cxx_compiler_flag ("-Wno-maybe-uninitialized -Werror" MaybeUninitialized_FOUND) if (MaybeUninitialized_FOUND) target_compile_options (seastar PUBLIC # With std::experimental::optional it is easy to hit # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88897. We disable # -Wmaybe-uninitialized in here since otherwise we would have to # disable it on many types used inside optional<>. -Wno-maybe-uninitialized) endif () if (Seastar_SSTRING) target_compile_definitions (seastar PUBLIC SEASTAR_SSTRING) endif () if (Seastar_DEPRECATED_OSTREAM_FORMATTERS) target_compile_definitions (seastar PUBLIC SEASTAR_DEPRECATED_OSTREAM_FORMATTERS) endif () if (LinuxMembarrier_FOUND) list (APPEND Seastar_PRIVATE_COMPILE_DEFINITIONS SEASTAR_HAS_MEMBARRIER) target_link_libraries (seastar PRIVATE LinuxMembarrier::membarrier) endif () tri_state_option (${Seastar_ALLOC_FAILURE_INJECTION} DEFAULT_BUILD_TYPES "Dev" CONDITION condition) if (condition) target_compile_definitions (seastar PUBLIC $<${condition}:SEASTAR_ENABLE_ALLOC_FAILURE_INJECTION>) endif () if (Seastar_TASK_BACKTRACE) target_compile_definitions (seastar PUBLIC SEASTAR_TASK_BACKTRACE) endif () if (Seastar_DEBUG_ALLOCATIONS) target_compile_definitions (seastar PRIVATE SEASTAR_DEBUG_ALLOCATIONS) endif () if (Sanitizers_FIBER_SUPPORT) list (APPEND Seastar_PRIVATE_COMPILE_DEFINITIONS SEASTAR_HAVE_ASAN_FIBER_SUPPORT) endif () if (Seastar_ALLOC_PAGE_SIZE) target_compile_definitions (seastar PUBLIC SEASTAR_OVERRIDE_ALLOCATOR_PAGE_SIZE=${Seastar_ALLOC_PAGE_SIZE}) endif () if (Seastar_LOGGER_COMPILE_TIME_FMT) target_compile_definitions (seastar PUBLIC SEASTAR_LOGGER_COMPILE_TIME_FMT) endif () target_compile_definitions (seastar PUBLIC SEASTAR_SCHEDULING_GROUPS_COUNT=${Seastar_SCHEDULING_GROUPS_COUNT}) if (Seastar_CXX_FLAGS) list (APPEND Seastar_PRIVATE_CXX_FLAGS ${Seastar_CXX_FLAGS}) endif () # When using split dwarf --gdb-index is effectively required since # otherwise gdb is just too slow. We also want to use split dwarf in # as many compilation units as possible. So while these flags don't # have to be public, we don't expect anyone to want to build seastar # with them and some client code without. if (Seastar_SPLIT_DWARF) set (Seastar_SPLIT_DWARF_FLAG "-Wl,--gdb-index") target_link_libraries (seastar PUBLIC $<$>:${Seastar_SPLIT_DWARF_FLAG}>) target_compile_options (seastar PUBLIC $<$>:-gsplit-dwarf>) endif () if (Seastar_HEAP_PROFILING) set_property ( SOURCE "src/core/memory.cc" PROPERTY COMPILE_DEFINITIONS SEASTAR_HEAPPROF) set_property ( SOURCE "src/core/reactor.cc" PROPERTY COMPILE_DEFINITIONS SEASTAR_HEAPPROF) endif () if (Seastar_DEFERRED_ACTION_REQUIRE_NOEXCEPT) list (APPEND Seastar_PRIVATE_COMPILE_DEFINITIONS SEASTAR_DEFERRED_ACTION_REQUIRE_NOEXCEPT) endif () if (Seastar_DPDK) if (CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64") target_compile_options (seastar PUBLIC -mcpu=${Seastar_DPDK_MACHINE} -mtune=${Seastar_DPDK_MACHINE}) else() target_compile_options (seastar PUBLIC -march=${Seastar_DPDK_MACHINE}) endif () target_compile_definitions (seastar PUBLIC SEASTAR_HAVE_DPDK) endif () if (Seastar_HWLOC) list (APPEND Seastar_PRIVATE_COMPILE_DEFINITIONS SEASTAR_HAVE_HWLOC) target_link_libraries (seastar PRIVATE hwloc::hwloc) endif () set_option_if_package_is_found (Seastar_IO_URING LibUring) if (Seastar_IO_URING) list (APPEND Seastar_PRIVATE_COMPILE_DEFINITIONS SEASTAR_HAVE_URING) target_link_libraries (seastar PRIVATE URING::uring) endif () if (Seastar_LD_FLAGS) target_link_options (seastar PRIVATE ${Seastar_LD_FLAGS}) endif () if (SystemTap-SDT_FOUND) list (APPEND Seastar_PRIVATE_COMPILE_DEFINITIONS SEASTAR_HAVE_SYSTEMTAP_SDT) target_link_libraries (seastar PRIVATE SystemTap::SDT) endif () check_cxx_compiler_flag ("-Werror=unused-result" ErrorUnused_FOUND) if (ErrorUnused_FOUND) if (Seastar_UNUSED_RESULT_ERROR) target_compile_options (seastar PUBLIC -Werror=unused-result) else() target_compile_options (seastar PUBLIC -Wno-error=unused-result) endif () endif () check_cxx_compiler_flag ("-Wno-error=#warnings" ErrorWarnings_FOUND) if (ErrorWarnings_FOUND) target_compile_options (seastar PRIVATE "-Wno-error=#warnings") endif () foreach (definition SEASTAR_DEBUG SEASTAR_DEFAULT_ALLOCATOR SEASTAR_SHUFFLE_TASK_QUEUE) target_compile_definitions (seastar PUBLIC $<$,Debug;Sanitize>:${definition}>) endforeach () tri_state_option (${Seastar_DEBUG_SHARED_PTR} DEFAULT_BUILD_TYPES "Debug" "Sanitize" CONDITION condition) if (condition) target_compile_definitions (seastar PUBLIC $<${condition}:SEASTAR_DEBUG_SHARED_PTR>) endif () tri_state_option (${Seastar_DEBUG_SHARED_PTR} DEFAULT_BUILD_TYPES "Debug" "Sanitize" CONDITION condition) if (condition) target_compile_definitions (seastar PUBLIC $<${condition}:SEASTAR_DEBUG_PROMISE>) endif () include (CheckLibc) tri_state_option (${Seastar_STACK_GUARDS} DEFAULT_BUILD_TYPES "Debug" "Sanitize" "Dev" CONDITION condition) if (condition) # check for -fstack-clash-protection together with -Werror, because # otherwise clang can soft-fail (return 0 but emit a warning) instead. check_cxx_compiler_flag ("-fstack-clash-protection -Werror" StackClashProtection_FOUND) if (StackClashProtection_FOUND) target_compile_options (seastar PUBLIC $<${condition}:-fstack-clash-protection>) endif () target_compile_definitions (seastar PRIVATE $<${condition}:SEASTAR_THREAD_STACK_GUARDS>) endif () target_compile_definitions (seastar PUBLIC $<$,Dev;Debug>:SEASTAR_TYPE_ERASE_MORE>) target_compile_definitions (seastar PRIVATE ${Seastar_PRIVATE_COMPILE_DEFINITIONS}) target_compile_options (seastar PRIVATE ${Seastar_PRIVATE_CXX_FLAGS}) set_target_properties (seastar PROPERTIES CXX_STANDARD ${CMAKE_CXX_STANDARD} CXX_EXTENSIONS ON) add_library (seastar_private INTERFACE) target_compile_definitions (seastar_private INTERFACE ${Seastar_PRIVATE_COMPILE_DEFINITIONS}) target_compile_options (seastar_private INTERFACE ${Seastar_PRIVATE_CXX_FLAGS}) target_link_libraries (seastar_private INTERFACE seastar) # # The testing library. # if (Seastar_INSTALL OR Seastar_TESTING) add_library (seastar_testing include/seastar/testing/entry_point.hh include/seastar/testing/exchanger.hh include/seastar/testing/random.hh include/seastar/testing/seastar_test.hh include/seastar/testing/test_case.hh include/seastar/testing/test_runner.hh include/seastar/testing/thread_test_case.hh src/testing/entry_point.cc src/testing/random.cc src/testing/seastar_test.cc src/testing/test_runner.cc) add_library (Seastar::seastar_testing ALIAS seastar_testing) target_compile_definitions (seastar_testing PRIVATE ${Seastar_PRIVATE_COMPILE_DEFINITIONS}) target_compile_options (seastar_testing PRIVATE ${Seastar_PRIVATE_CXX_FLAGS}) target_link_libraries (seastar_testing PUBLIC Boost::unit_test_framework Boost::dynamic_linking seastar) add_library(seastar_perf_testing src/testing/random.cc include/seastar/testing/perf_tests.hh tests/perf/perf_tests.cc tests/perf/linux_perf_event.cc) add_library (Seastar::seastar_perf_testing ALIAS seastar_perf_testing) target_compile_definitions (seastar_perf_testing PRIVATE ${Seastar_PRIVATE_COMPILE_DEFINITIONS}) target_compile_options (seastar_perf_testing PRIVATE ${Seastar_PRIVATE_CXX_FLAGS}) target_link_libraries (seastar_perf_testing PUBLIC seastar) endif () if (Seastar_MODULE) if (POLICY CMP0155) cmake_policy (SET CMP0155 NEW) endif () include (CxxModulesRules) add_subdirectory (src) endif () # # The tests themselves. # if (Seastar_TESTING) enable_testing () if (Seastar_EXCLUDE_TESTS_FROM_ALL) set (exclude EXCLUDE_FROM_ALL) else () set (exclude "") endif () add_subdirectory (tests ${exclude}) endif () # # Demonstrations. # if (Seastar_DEMOS) if (Seastar_EXCLUDE_DEMOS_FROM_ALL) set (exclude EXCLUDE_FROM_ALL) else () set (exclude "") endif () add_subdirectory (demos ${exclude}) endif () # # Documentation. # if (Seastar_DOCS) add_subdirectory (doc) endif () # # Applications. # if (Seastar_APPS) if (Seastar_EXCLUDE_APPS_FROM_ALL) set (exclude EXCLUDE_FROM_ALL) else () set (exclude "") endif () add_subdirectory (apps ${exclude}) endif () if (CMAKE_BUILD_TYPE STREQUAL "Dev") include (CheckHeaders) include (CheckIncludeStyle) add_custom_target (checkheaders) add_custom_target (check-include-style) foreach (lib seastar seastar_testing seastar_perf_testing) if (TARGET ${lib}) seastar_check_self_contained (checkheaders ${lib} INCLUDE "\\.hh$" # impl.hh headers are internal implementations of .hh, so they are not # compilable. let's exclude them from the files to be checked. EXCLUDE "_impl.hh$|-impl.hh$") seastar_check_include_style (check-include-style ${lib}) endif () endforeach () endif () # # Installation and export. # if (Seastar_INSTALL) # # pkg-config generation. # # Note that unlike the CMake "config module", this description is not relocatable because # some dependencies do not natively support pkg-config. # # Necessary here for pkg-config. include (GNUInstallDirs) # Set paths in pkg-config files for installation. set (Seastar_PKG_CONFIG_PREFIX ${CMAKE_INSTALL_PREFIX}) set (Seastar_PKG_CONFIG_LIBDIR ${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}) set (Seastar_PKG_CONFIG_SEASTAR_INCLUDE_FLAGS "-I${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_INCLUDEDIR}") get_property (_is_Multi_Config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if (_is_Multi_Config) # use different library names for each config set (Seastar_PC "_$.pc") else () set (Seastar_PC ".pc") endif () if(CMAKE_CXX_EXTENSIONS) set(Seastar_CXX_COMPILE_OPTION ${CMAKE_CXX${CMAKE_CXX_STANDARD}_EXTENSION_COMPILE_OPTION}) else() set(Seastar_CXX_COMPILE_OPTION ${CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION}) endif() configure_file ( ${CMAKE_CURRENT_SOURCE_DIR}/pkgconfig/seastar.pc.in ${CMAKE_CURRENT_BINARY_DIR}/pkgconfig/seastar-install${Seastar_PC}.in @ONLY) configure_file ( ${CMAKE_CURRENT_SOURCE_DIR}/pkgconfig/seastar-testing.pc.in ${CMAKE_CURRENT_BINARY_DIR}/pkgconfig/seastar-testing-install.pc.in @ONLY) # Set paths in pkg-config files for direct use in the build directory. set (Seastar_PKG_CONFIG_PREFIX ${CMAKE_CURRENT_BINARY_DIR}) set (Seastar_PKG_CONFIG_LIBDIR ${CMAKE_CURRENT_BINARY_DIR}) set (Seastar_PKG_CONFIG_SEASTAR_INCLUDE_FLAGS "-I${CMAKE_CURRENT_SOURCE_DIR}/include -I${CMAKE_CURRENT_BINARY_DIR}/gen/include") configure_file ( ${CMAKE_CURRENT_SOURCE_DIR}/pkgconfig/seastar.pc.in ${CMAKE_CURRENT_BINARY_DIR}/pkgconfig/seastar${Seastar_PC}.in @ONLY) configure_file ( ${CMAKE_CURRENT_SOURCE_DIR}/pkgconfig/seastar-testing.pc.in ${CMAKE_CURRENT_BINARY_DIR}/pkgconfig/seastar-testing.pc.in @ONLY) file (GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/seastar${Seastar_PC} INPUT ${CMAKE_CURRENT_BINARY_DIR}/pkgconfig/seastar${Seastar_PC}.in) file (GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/seastar-testing.pc INPUT ${CMAKE_CURRENT_BINARY_DIR}/pkgconfig/seastar-testing.pc.in) file (GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/seastar-install${Seastar_PC} INPUT ${CMAKE_CURRENT_BINARY_DIR}/pkgconfig/seastar-install${Seastar_PC}.in) file (GENERATE OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/seastar-testing-install.pc INPUT ${CMAKE_CURRENT_BINARY_DIR}/pkgconfig/seastar-testing-install.pc.in) include (CMakePackageConfigHelpers) set (install_cmakedir ${CMAKE_INSTALL_LIBDIR}/cmake/Seastar) install ( DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) install ( DIRECTORY ${Seastar_GEN_BINARY_DIR}/include/ DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) install ( PROGRAMS ${CMAKE_CURRENT_SOURCE_DIR}/scripts/seastar-json2code.py DESTINATION ${CMAKE_INSTALL_BINDIR}) install ( TARGETS seastar seastar_testing seastar_perf_testing EXPORT seastar-export LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}) install ( EXPORT seastar-export FILE SeastarTargets.cmake NAMESPACE Seastar:: DESTINATION ${install_cmakedir}) write_basic_package_version_file ( ${CMAKE_CURRENT_BINARY_DIR}/SeastarConfigVersion.cmake VERSION ${PROJECT_VERSION} COMPATIBILITY ExactVersion) configure_package_config_file ( ${CMAKE_CURRENT_LIST_DIR}/cmake/SeastarConfig.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/SeastarConfig.cmake INSTALL_DESTINATION ${install_cmakedir}) install ( FILES ${CMAKE_CURRENT_BINARY_DIR}/SeastarConfig.cmake ${CMAKE_CURRENT_BINARY_DIR}/SeastarConfigVersion.cmake DESTINATION ${install_cmakedir}) install ( FILES ${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindGnuTLS.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindLinuxMembarrier.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindSanitizers.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindSourceLocation.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindStdAtomic.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Findc-ares.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Finddpdk.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Findhwloc.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Findlksctp-tools.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Findlz4.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Findragel.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Findrt.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Finducontext.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Findyaml-cpp.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/SeastarDependencies.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindLibUring.cmake ${CMAKE_CURRENT_SOURCE_DIR}/cmake/FindSystemTap-SDT.cmake DESTINATION ${install_cmakedir}) install ( DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/cmake/code_tests DESTINATION ${install_cmakedir}) install ( FILES ${CMAKE_CURRENT_BINARY_DIR}/seastar-install${Seastar_PC} DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig RENAME seastar${Seastar_PC}) install ( FILES ${CMAKE_CURRENT_BINARY_DIR}/seastar-testing-install.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig RENAME seastar-testing.pc) # # Export targets from the build tree for the user package registry. # export ( EXPORT seastar-export FILE ${CMAKE_CURRENT_BINARY_DIR}/SeastarTargets.cmake NAMESPACE Seastar::) export (PACKAGE Seastar) # # Packaging. # set (CPACK_PACKAGE_VERSION_MAJOR ${PROJECT_VERSION_MAJOR}) set (CPACK_PACKAGE_VERSION_MINOR ${PROJECT_VERSION_MINOR}) set (CPACK_PACKAGE_VERSION_PATCH ${PROJECT_VERSION_PATCH}) include (CPack) endif () seastar-25.05.0/CONTRIBUTING.md000066400000000000000000000013271501510432000155500ustar00rootroot00000000000000# Contributing Code to Seastar There are two ways to contribute code to Seastar: * send your changes as [patches](https://github.com/scylladb/scylla/wiki/Formatting-and-sending-patches) to the [mailing list](https://groups.google.com/forum/#!forum/seastar-dev). * alternatively, open a [github pull request](https://github.com/scylladb/seastar/pulls). # Asking questions or requesting help Use the [Seastar mailing list](https://groups.google.com/forum/#!forum/seastar-dev) for general questions and help. # Reporting an issue Please use the [Issue Tracker](https://github.com/scylladb/seastar/issues/) to report issues. Supply as much information about your environment as possible, especially for performance problems. seastar-25.05.0/HACKING.md000066400000000000000000000077141501510432000147130ustar00rootroot00000000000000# Developing and using Seastar ## Configuring the project There are multiple ways to configure Seastar and its dependencies. ### Use system-packages for most dependencies See the instructions in [README.md](./README.md). ### Download and install all external dependencies in a project-specific location - First pull the git submodules using `git submodule update --init --recursive` - Use `cmake-cooking` to prepare a development environment with all dependencies. This allows for reproducible development environments, but means that approximately 3 GiB of dependencies get installed to `build/_cooking_`: ``` ./cooking.sh ``` - The same as above, and enable DPDK support: ``` ./cooking.sh -- -DSeastar_DPDK=ON ``` - Use system packages for all dependencies except `dpdk`, which is provided by `cmake-cooking` (and not yet widely available via system package-managers): ``` ./cooking.sh -i dpdk ``` - Use `cmake-cooking` for all dependencies except for Boost: ``` ./cooking.sh -e Boost ``` - The same, but compile in "release" mode: ``` ./cooking.sh -e Boost -t Release ``` ## Using an IDE with CMake support If you use `configure.py` or `cooking.sh` to to configure Seastar, then the easiest way to use an IDE (such as Qt Creator, or CLion) for development is to instruct the IDE, when it invokes CMake, to include the following option: ``` -DCMAKE_PREFIX_PATH=${source_dir}/build/_cooking/installed ``` where `${source_dir}` is the root of the Seastar source tree on your file-system. This will allow the IDE to also index Seastar's dependencies. ## Building the project ``` cd $my_build_dir ninja ``` If you used `configure.py` to configure Seastar, then the build directory will be `build/$mode`. For example, `build/release`. If you use `cooking.sh`, then the build directory will just be `build`. ## Running tests Make sure you are in the "build" directory. - Run unit tests: ``` ninja test_unit ``` - Run all tests: ``` ninja test ``` - Build and run a specific test: ``` ninja test_unit_thread_run ``` ## Building documentation Make sure you are in the "build" directory. - Build all documentation: ``` ninja docs ``` - Build the tutorial in HTML form: ``` ninja doc_tutorial_html ``` - Build the tutorial in HTML form (one file per chapter): ``` ninja doc_tutorial_html_split ``` - Build the Doxygen documentation: ``` ninja doc_api ``` ## Installing the project Choose the install path: With `configure.py`: ``` ./configure.py --mode=release --prefix=/my/install/path ``` With `cooking.sh`: ``` ./cooking.sh -- -DCMAKE_INSTALL_PREFIX=/my/install/path ``` ``` ninja -C build install ``` ## Using Seastar in an application ### CMake Once Seastar has been installed, it is sufficient to add a dependency on Seastar with ``` find_package (Seastar ${VERSION} REQUIRED) add_executable (my_program my_program.cc) target_link_libraries (my_program PRIVATE Seastar::seastar) ``` where `VERSION` is the desired version. If you'd like to use `cmake-cooking` to set up a development environment which includes Seastar and its dependencies (a "recipe"), you can include Seastar as follows: ``` cooking_ingredient (Seastar COOKING_RECIPE COOKING_CMAKE_ARGS -DSeastar_APPS=OFF -DSeastar_DEMOS=OFF -DSeastar_DOCS=OFF -DSeastar_TESTING=OFF EXTERNAL_PROJECT_ARGS SOURCE_DIR ${MY_SEASTAR_SOURCE_DIR}) ``` ### pkg-config Seastar includes a `seastar.pc` file. It can be used from both the install and build directories. Compiling a single file: ``` g++ foo.cc -o foo $(pkg-config --libs --cflags --static /path/to/seastar.pc) ``` Compiling multiple files: ``` # Compiling sources into object files g++ -c $(pkg-config --cflags /path/to/seastar.pc) foo.cc -o foo.o g++ -c $(pkg-config --cflags /path/to/seastar.pc) bar.cc -o bar.o # Linking object files into an executable g++ -o foo_bar foo.o bar.o $(pkg-config --libs --static /path/to/seastar.pc) ``` The `--static` flag is needed to include transitive (private) dependencies of `libseastar.a`. seastar-25.05.0/LICENSE000066400000000000000000000236761501510432000143370ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS seastar-25.05.0/NOTICE000066400000000000000000000007141501510432000142220ustar00rootroot00000000000000Seastar Framework Copyright 2015 Cloudius Systems This works contains software from the OSv project (http://osv.io), licensed under the BSD license. This work contains software from the DPDK project (http://dpdk.org), licensed under the BSD license. The software is under the dpdk/ directory. This work contains software from the Android Open Source Project, licensed under the Apache2 license. The software is in the include/seastar/util/sampler.hh file. seastar-25.05.0/README-DPDK.md000066400000000000000000000024551501510432000153210ustar00rootroot00000000000000Seastar and DPDK ================ Seastar uses the Data Plane Development Kit to drive NIC hardware directly. This provides an enormous performance boost. To enable DPDK, specify `--enable-dpdk` to `./configure.py`, and `--dpdk-pmd` as a run-time parameter. This will use the DPDK package provided as a git submodule with the seastar sources. Please note, if `--enable-dpdk` is used to build DPDK on an aarch64 machine, you need to specify [target architecture](https://gcc.gnu.org/onlinedocs/gcc/AArch64-Options.html) with optional [feature modifiers](https://gcc.gnu.org/onlinedocs/gcc/AArch64-Options.html#aarch64-feature-modifiers) with the `--cflags` option as well, like: ```console $ ./configure.py --mode debug --enable-dpdk --cflags='-march=armv8-a+crc+crypto' ``` To use your own self-compiled DPDK package, follow this procedure: 1. Setup host to compile DPDK: - Ubuntu `sudo apt-get install -y build-essential linux-image-extra-$(uname -r)` 2. Prepare a DPDK SDK: - Download the latest DPDK release: `wget https://fast.dpdk.org/rel/dpdk-23.07.tar.xz` - Untar it. - Follow the [Quick Start Guide](https://core.dpdk.org/doc/quick-start/) - Pass `-Dmbuf_refcnt_atomic=false` to meson. 3. Modify the CMake cache (`CMakeCache.txt`) to inform CMake of the location of the installed DPDK SDK. seastar-25.05.0/README.md000066400000000000000000000202351501510432000145750ustar00rootroot00000000000000Seastar ======= [![Test](https://github.com/scylladb/seastar/actions/workflows/tests.yaml/badge.svg)](https://github.com/scylladb/seastar/actions/workflows/tests.yaml) [![Version](https://img.shields.io/github/tag/scylladb/seastar.svg?label=version&colorB=green)](https://github.com/scylladb/seastar/releases) [![License: Apache2](https://img.shields.io/github/license/scylladb/seastar.svg)](https://github.com/scylladb/seastar/blob/master/LICENSE) [![n00b issues](https://img.shields.io/github/issues/scylladb/seastar/n00b.svg?colorB=green)](https://github.com/scylladb/seastar/labels/n00b) Introduction ------------ SeaStar is an event-driven framework allowing you to write non-blocking, asynchronous code in a relatively straightforward manner (once understood). It is based on [futures](http://en.wikipedia.org/wiki/Futures_and_promises). Building Seastar -------------------- For more details and alternative work-flows, read [HACKING.md](./HACKING.md). Assuming that you would like to use system packages (RPMs or DEBs) for Seastar's dependencies, first install them: ``` $ sudo ./install-dependencies.sh ``` then configure (in "release" mode): ``` $ ./configure.py --mode=release ``` then compile: ``` $ ninja -C build/release ``` In case there are compilation issues, especially like ```g++: internal compiler error: Killed (program cc1plus)``` try giving more memory to gcc, either by limiting the amount of threads ( -j1 ) and/or allowing at least 4g ram to your machine. If you're missing a dependency of Seastar, then it is possible to have the configuration process fetch a version of the dependency locally for development. For example, to fetch `fmt` locally, configure Seastar like this: ``` $ ./configure.py --mode=dev --cook fmt ``` `--cook` can be repeated many times for selecting multiple dependencies. Build modes ---------------------------------------------------------------------------- The configure.py script is a wrapper around cmake. The --mode argument maps to CMAKE_BUILD_TYPE, and supports the following modes | | CMake mode | Debug info | Optimi­zations | Sanitizers | Allocator | Checks | Use for | | -------- | ------------------- | ---------- | ------------------ |------------- | --------- | -------- | -------------------------------------- | | debug | `Debug` | Yes | `-O0` | ASAN, UBSAN | System | All | gdb | | release | `RelWithDebInfo` | Yes | `-O3` | None | Seastar | Asserts | production | | dev | `Dev` (Custom) | No | `-O1` | None | Seastar | Asserts | build and test cycle | | sanitize | `Sanitize` (Custom) | Yes | `-Os` | ASAN, UBSAN | System | All | second level of tests, track down bugs | Note that seastar is more sensitive to allocators and optimizations than usual. A quick rule of the thumb of the relative performances is that release is 2 times faster than dev, 150 times faster than sanitize and 300 times faster than debug. Using Seastar from its build directory (without installation) ---------------------------------------------------------------------------- It's possible to consume Seastar directly from its build directory with CMake or `pkg-config`. We'll assume that the Seastar repository is located in a directory at `$seastar_dir`. Via `pkg-config`: ``` $ g++ my_app.cc $(pkg-config --libs --cflags --static $seastar_dir/build/release/seastar.pc) -o my_app ``` and with CMake using the `Seastar` package: `CMakeLists.txt` for `my_app`: ``` set (CMAKE_CXX_STANDARD 23) find_package (Seastar REQUIRED) add_executable (my_app my_app.cc) target_link_libraries (my_app Seastar::seastar) ``` ``` $ mkdir $my_app_dir/build $ cd $my_app_dir/build $ cmake -DCMAKE_PREFIX_PATH="$seastar_dir/build/release;$seastar_dir/build/release/_cooking/installed" -DCMAKE_MODULE_PATH=$seastar_dir/cmake $my_app_dir ``` The `CMAKE_PREFIX_PATH` values ensure that CMake can locate Seastar and its compiled submodules. The `CMAKE_MODULE_PATH` value ensures that CMake can uses Seastar's CMake scripts for locating its dependencies. Using an installed Seastar -------------------------------- You can also consume Seastar after it has been installed to the file-system. **Important:** - Seastar works with a customized version of DPDK, so by default builds and installs the DPDK submodule to `$build_dir/_cooking/installed` First, configure the installation path: ``` $ ./configure.py --mode=release --prefix=/usr/local ``` then run the `install` target: ``` $ ninja -C build/release install ``` then consume it from `pkg-config`: ``` $ g++ my_app.cc $(pkg-config --libs --cflags --static seastar) -o my_app ``` or consume it with the same `CMakeLists.txt` as before but with a simpler CMake invocation: ``` $ cmake .. ``` (If Seastar has not been installed to a "standard" location like `/usr` or `/usr/local`, then you can invoke CMake with `-DCMAKE_PREFIX_PATH=$my_install_root`.) There are also instructions for building on any host that supports [Docker](doc/building-docker.md). Use of the [DPDK](http://dpdk.org) is [optional](doc/building-dpdk.md). #### Seastar's C++ standard: C++20 or C++23 Seastar supports both C++20, and C++23. The build defaults to the latest standard supported by your compiler, but can be explicitly selected with the `--c++-standard` configure option, e.g., `--c++-standard=20`, or if using CMake directly, by setting on the `CMAKE_CXX_STANDARD` CMake variable. See the [compatibity statement](doc/compatibility.md) for more information. Getting started --------------- There is a [mini tutorial](doc/mini-tutorial.md) and a [more comprehensive one](doc/tutorial.md). The documentation is available on the [web](http://docs.seastar.io/master/index.html). Resources --------- * Seasatar Development Mailing List: Discuss challenges, propose improvements with sending code contributions (patches), and get help from experienced developers. Subscribe or browse archives: [here](https://groups.google.com/forum/#!forum/seastar-dev) (or email seastar-dev@googlegroups.com). * GitHub Discussions: For more casual conversations and quick questions, consider using the Seastar project's [discussions on Github](https://github.com/scylladb/seastar/discussions). * Issue Tracker: File bug reports on the project's [issue tracker](https://github.com/scylladb/seastar/issues). Learn more about Seastar on the main [project website](http://seastar.io). The Native TCP/IP Stack ----------------------- Seastar comes with its own [userspace TCP/IP stack](doc/native-stack.md) for better performance. Recommended hardware configuration for SeaStar ---------------------------------------------- * CPUs - As much as you need. SeaStar is highly friendly for multi-core and NUMA * NICs - As fast as possible, we recommend 10G or 40G cards. It's possible to use 1G too but you may be limited by their capacity. In addition, the more hardware queue per cpu the better for SeaStar. Otherwise we have to emulate that in software. * Disks - Fast SSDs with high number of IOPS. * Client machines - Usually a single client machine can't load our servers. Both memaslap (memcached) and WRK (httpd) cannot over load their matching server counter parts. We recommend running the client on different machine than the servers and use several of them. Projects using Seastar ---------------------------------------------- * [cpv-cql-driver](https://github.com/cpv-project/cpv-cql-driver): C++ driver for Cassandra/Scylla based on seastar framework * [cpv-framework](https://github.com/cpv-project/cpv-framework): A web framework written in c++ based on seastar framework * [redpanda](https://vectorized.io/): A Kafka replacement for mission critical systems * [Scylla](https://github.com/scylladb/scylla): A fast and reliable NoSQL data store compatible with Cassandra and DynamoDB * [smf](https://github.com/smfrpc/smf): The fastest RPC in the West * [Ceph - Crimson](https://github.com/ceph/ceph): Next-generation OSD (Object Storage Daemon) implementation based on the Seastar framework seastar-25.05.0/apps/000077500000000000000000000000001501510432000142575ustar00rootroot00000000000000seastar-25.05.0/apps/CMakeLists.txt000066400000000000000000000027061501510432000170240ustar00rootroot00000000000000# This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # # Logical target for all applications. add_custom_target (apps) macro (seastar_add_app name) set (args ${ARGN}) cmake_parse_arguments ( parsed_args "" "" "SOURCES" ${args}) set (target app_${name}) add_executable (${target} ${parsed_args_SOURCES}) target_include_directories (${target} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries (${target} PRIVATE seastar_private) set_target_properties (${target} PROPERTIES OUTPUT_NAME ${name}) add_dependencies (apps ${target}) endmacro () add_subdirectory (httpd) add_subdirectory (io_tester) add_subdirectory (rpc_tester) add_subdirectory (iotune) add_subdirectory (memcached) add_subdirectory (seawreck) seastar-25.05.0/apps/httpd/000077500000000000000000000000001501510432000154025ustar00rootroot00000000000000seastar-25.05.0/apps/httpd/CMakeLists.txt000066400000000000000000000021561501510432000201460ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # seastar_generate_swagger ( TARGET app_httpd_swagger VAR app_httpd_swagger_files IN_FILE ${CMAKE_CURRENT_SOURCE_DIR}/demo.json OUT_DIR ${CMAKE_CURRENT_BINARY_DIR}) seastar_add_app (httpd SOURCES ${app_httpd_swagger_files} main.cc) target_include_directories (app_httpd PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) add_dependencies (app_httpd app_httpd_swagger) seastar-25.05.0/apps/httpd/demo.json000066400000000000000000000046151501510432000172270ustar00rootroot00000000000000{ "apiVersion": "0.0.1", "swaggerVersion": "1.2", "basePath": "{{Protocol}}://{{Host}}", "resourcePath": "/hello", "produces": [ "application/json" ], "apis": [ { "path": "/hello/world/{var1}/{var2}", "operations": [ { "method": "GET", "summary": "Returns the number of seconds since the system was booted", "type": "long", "nickname": "hello_world", "produces": [ "application/json" ], "parameters": [ { "name":"var2", "description":"Full path of file or directory", "required":true, "allowMultiple":true, "type":"string", "paramType":"path" }, { "name":"var1", "description":"Full path of file or directory", "required":true, "allowMultiple":false, "type":"string", "paramType":"path" }, { "name":"query_enum", "description":"The operation to perform", "required":true, "allowMultiple":false, "type":"string", "paramType":"query", "enum":["VAL1", "VAL2", "VAL3"] } ] } ] } ], "models" : { "my_object": { "id": "my_object", "description": "Demonstrate an object", "properties": { "var1": { "type": "string", "description": "The first parameter in the path" }, "var2": { "type": "string", "description": "The second parameter in the path" }, "enum_var" : { "type": "string", "description": "Demonstrate an enum returned, note this is not the same enum type of the request", "enum":["VAL1", "VAL2", "VAL3"] } } } } } seastar-25.05.0/apps/httpd/main.cc000066400000000000000000000130331501510432000166350ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright 2015 Cloudius Systems */ #include #include #include #include #include #include #include #include #include "demo.json.hh" #include #include #include #include #include #include #include "../lib/stop_signal.hh" namespace bpo = boost::program_options; using namespace seastar; using namespace httpd; class handl : public httpd::handler_base { public: virtual future > handle(const sstring& path, std::unique_ptr req, std::unique_ptr rep) { rep->_content = "hello"; rep->done("html"); return make_ready_future>(std::move(rep)); } }; void set_routes(routes& r) { function_handler* h1 = new function_handler([](const_req req) { return "hello"; }); function_handler* h2 = new function_handler([](std::unique_ptr req) { return make_ready_future("json-future"); }); r.add(operation_type::GET, url("/"), h1); r.add(operation_type::GET, url("/jf"), h2); r.add(operation_type::GET, url("/file").remainder("path"), new directory_handler("/")); demo_json::hello_world.set(r, [] (const_req req) { demo_json::my_object obj; obj.var1 = req.param.at("var1"); obj.var2 = req.param.at("var2"); demo_json::ns_hello_world::query_enum v = demo_json::ns_hello_world::str2query_enum(req.query_parameters.at("query_enum")); // This demonstrate enum conversion obj.enum_var = v; return obj; }); } int main(int ac, char** av) { app_template app; app.add_options()("port", bpo::value()->default_value(10000), "HTTP Server port"); app.add_options()("prometheus_port", bpo::value()->default_value(9180), "Prometheus port. Set to zero in order to disable."); app.add_options()("prometheus_address", bpo::value()->default_value("0.0.0.0"), "Prometheus address"); app.add_options()("prometheus_prefix", bpo::value()->default_value("seastar_httpd"), "Prometheus metrics prefix"); return app.run(ac, av, [&] { return seastar::async([&] { seastar_apps_lib::stop_signal stop_signal; auto&& config = app.configuration(); httpd::http_server_control prometheus_server; bool prometheus_started = false; auto stop_prometheus = defer([&] () noexcept { if (prometheus_started) { std::cout << "Stoppping Prometheus server" << std::endl; // This can throw, but won't. prometheus_server.stop().get(); } }); uint16_t pport = config["prometheus_port"].as(); if (pport) { prometheus::config pctx; net::inet_address prom_addr(config["prometheus_address"].as()); pctx.metric_help = "seastar::httpd server statistics"; pctx.prefix = config["prometheus_prefix"].as(); std::cout << "starting prometheus API server" << std::endl; prometheus_server.start("prometheus").get(); prometheus::start(prometheus_server, pctx).get(); prometheus_started = true; prometheus_server.listen(socket_address{prom_addr, pport}).handle_exception([prom_addr, pport] (auto ep) { std::cerr << seastar::format("Could not start Prometheus API server on {}:{}: {}\n", prom_addr, pport, ep); return make_exception_future<>(ep); }).get(); } uint16_t port = config["port"].as(); auto server = std::make_unique(); auto rb = make_shared("apps/httpd/"); server->start().get(); auto stop_server = defer([&] () noexcept { std::cout << "Stoppping HTTP server" << std::endl; // This can throw, but won't. server->stop().get(); }); server->set_routes(set_routes).get(); server->set_routes([rb](routes& r){rb->set_api_doc(r);}).get(); server->set_routes([rb](routes& r) {rb->register_function(r, "demo", "hello world application");}).get(); server->listen(port).get(); std::cout << "Seastar HTTP server listening on port " << port << " ...\n"; stop_signal.wait().get(); return 0; }); }); } seastar-25.05.0/apps/io_tester/000077500000000000000000000000001501510432000162545ustar00rootroot00000000000000seastar-25.05.0/apps/io_tester/CMakeLists.txt000066400000000000000000000017351501510432000210220ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # seastar_add_app (io_tester SOURCES io_tester.cc) seastar_add_app (ioinfo SOURCES ioinfo.cc) target_link_libraries (app_io_tester PRIVATE yaml-cpp::yaml-cpp) target_link_libraries (app_ioinfo PRIVATE yaml-cpp::yaml-cpp) seastar-25.05.0/apps/io_tester/conf.yaml000066400000000000000000000010561501510432000200670ustar00rootroot00000000000000- name: big_writes shards: all type: seqwrite shard_info: parallelism: 10 reqsize: 256kB shares: 10 think_time: 0 - name: latency_reads shards: [0] type: randread data_size: 1GB shard_info: parallelism: 1 reqsize: 512 shares: 100 think_time: 1000us - name: cpu_hog shards: [0] type: cpu shard_info: parallelism: 1 execution_time: 90us think_time: 10us - name: unlinking shards: all type: unlink data_size: 2GB files_count: 5000 shard_info: parallelism: 10 think_time: 10us seastar-25.05.0/apps/io_tester/io_tester.cc000066400000000000000000001261021501510432000205620ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2017 ScyllaDB */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #pragma GCC diagnostic push // see https://github.com/boostorg/accumulators/pull/54 #pragma GCC diagnostic ignored "-Wuninitialized" #include #include #include #include #include #include #include #pragma GCC diagnostic pop #include #include #include #include #include #include using namespace seastar; using namespace std::chrono_literals; using namespace boost::accumulators; static constexpr uint64_t extent_size_hint_alignment{1u << 20}; // 1MB static auto random_seed = std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); static thread_local std::default_random_engine random_generator(random_seed); class context; enum class request_type { seqread, seqwrite, randread, randwrite, append, cpu, unlink }; namespace std { template <> struct hash { size_t operator() (const request_type& type) const { return static_cast(type); } }; } auto allocate_and_fill_buffer(size_t buffer_size) { constexpr size_t alignment{4096u}; auto buffer = allocate_aligned_buffer(buffer_size, alignment); std::uniform_int_distribution fill('@', '~'); memset(buffer.get(), fill(random_generator), buffer_size); return buffer; } future> create_and_fill_file(sstring name, uint64_t fsize, open_flags flags, file_open_options options) { return open_file_dma(name, flags, options).then([fsize] (auto f) mutable { return do_with(std::move(f), [fsize] (auto& f) { return f.size().then([f, fsize] (uint64_t pre_truncate_size) mutable { return f.truncate(fsize).then([f, fsize, pre_truncate_size] () mutable { if (pre_truncate_size >= fsize) { return make_ready_future>(std::pair{f, 0u}); } const uint64_t buffer_size{256ul << 10}; const uint64_t additional_iteration = (fsize % buffer_size == 0) ? 0 : 1; const uint64_t buffers_count{static_cast(fsize / buffer_size) + additional_iteration}; const uint64_t last_buffer_id = (buffers_count - 1u); const uint64_t last_write_position = buffer_size * last_buffer_id; return do_with(std::views::iota(UINT64_C(0), buffers_count), [f, buffer_size] (auto& buffers_range) mutable { return max_concurrent_for_each(buffers_range.begin(), buffers_range.end(), 64, [f, buffer_size] (auto buffer_id) mutable { auto source_buffer = allocate_and_fill_buffer(buffer_size); auto write_position = buffer_id * buffer_size; return do_with(std::move(source_buffer), [f, write_position, buffer_size] (const auto& buffer) mutable { return f.dma_write(write_position, buffer.get(), buffer_size).discard_result(); }); }); }).then([f]() mutable { return f.flush(); }).then([f, last_write_position]() { return make_ready_future>(std::pair{f, last_write_position}); }); }); }); }); }); } future<> busyloop_sleep(std::chrono::steady_clock::time_point until, std::chrono::steady_clock::time_point now) { return do_until([until] { return std::chrono::steady_clock::now() >= until; }, [] { return yield(); }); } template future<> timer_sleep(std::chrono::steady_clock::time_point until, std::chrono::steady_clock::time_point now) { return seastar::sleep(std::chrono::duration_cast(until - now)); } using sleep_fn = std::function(std::chrono::steady_clock::time_point until, std::chrono::steady_clock::time_point now)>; class pause_distribution { public: virtual std::chrono::duration get() = 0; template Dur get_as() { return std::chrono::duration_cast(get()); } virtual ~pause_distribution() {} }; using pause_fn = std::function(std::chrono::duration)>; class uniform_process : public pause_distribution { std::chrono::duration _pause; public: uniform_process(std::chrono::duration period) : _pause(period) { } std::chrono::duration get() override { return _pause; } }; std::unique_ptr make_uniform_pause(std::chrono::duration d) { return std::make_unique(d); } class poisson_process : public pause_distribution { std::random_device _rd; std::mt19937 _rng; std::exponential_distribution _exp; public: poisson_process(std::chrono::duration period) : _rng(_rd()) , _exp(1.0 / period.count()) { } std::chrono::duration get() override { return std::chrono::duration(_exp(_rng)); } }; std::unique_ptr make_poisson_pause(std::chrono::duration d) { return std::make_unique(d); } struct byte_size { uint64_t size; }; struct duration_time { std::chrono::duration time; }; class shard_config { std::unordered_set _shards; public: shard_config() : _shards(boost::copy_range>(boost::irange(0u, smp::count))) {} shard_config(std::unordered_set s) : _shards(std::move(s)) {} bool is_set(unsigned cpu) const { return _shards.count(cpu); } }; struct shard_info { unsigned parallelism = 0; unsigned rps = 0; unsigned batch = 1; unsigned limit = std::numeric_limits::max(); unsigned shares = 10; std::string sched_class = ""; uint64_t request_size = 4 << 10; uint64_t bandwidth = 0; std::chrono::duration think_time = 0ms; std::chrono::duration think_after = 0ms; std::chrono::duration execution_time = 1ms; seastar::scheduling_group scheduling_group = seastar::default_scheduling_group(); }; struct options { bool dsync = false; ::sleep_fn sleep_fn = timer_sleep; ::pause_fn pause_fn = make_uniform_pause; }; class class_data; struct job_config { std::string name; request_type type; shard_config shard_placement; ::shard_info shard_info; ::options options; // size of each individual file. Every class and every shard have its file, so in a normal // system with many shards we'll naturally have many files and that will push the data out // of the disk's cache. An exception to that rule is unlink_class_data, that creates files_count // files with file_size/files_count. uint64_t file_size; // the value passed as a hint for allocated extent size // if not specified, then file_size is used as a hint std::optional extent_allocation_size_hint; // the number of files to create and unlink by unlink_class_data per shard // remaining operations utilize only one file per shard std::optional files_count; uint64_t offset_in_bdev; std::unique_ptr gen_class_data(); }; std::array quantiles = { 0.5, 0.95, 0.99, 0.999}; static bool keep_files = false; future<> maybe_remove_file(sstring fname) { return keep_files ? make_ready_future<>() : remove_file(fname); } future<> maybe_close_file(file& f) { return f ? f.close() : make_ready_future<>(); } class class_data { protected: using accumulator_type = accumulator_set>; job_config _config; uint64_t _alignment; uint64_t _last_pos = 0; uint64_t _offset = 0; seastar::scheduling_group _sg; size_t _data = 0; std::chrono::duration _total_duration; std::chrono::steady_clock::time_point _start = {}; accumulator_type _latencies; uint64_t _requests = 0; std::uniform_int_distribution _pos_distribution; file _file; bool _think = false; ::sleep_fn _sleep_fn = timer_sleep; timer<> _thinker; virtual future<> do_start(sstring dir, directory_entry_type type) = 0; virtual future issue_request(char *buf, io_intent* intent) = 0; public: class_data(job_config cfg) : _config(std::move(cfg)) , _alignment(_config.shard_info.request_size >= 4096 ? 4096 : 512) , _sg(cfg.shard_info.scheduling_group) , _latencies(extended_p_square_probabilities = quantiles) , _pos_distribution(0, _config.file_size / _config.shard_info.request_size) , _sleep_fn(_config.options.sleep_fn) , _thinker([this] { think_tick(); }) { if (_config.shard_info.think_after > 0us) { _thinker.arm(std::chrono::duration_cast(_config.shard_info.think_after)); } else if (_config.shard_info.think_time > 0us) { _think = true; } } virtual ~class_data() = default; private: void think_tick() { if (_think) { _think = false; _thinker.arm(std::chrono::duration_cast(_config.shard_info.think_after)); } else { _think = true; _thinker.arm(std::chrono::duration_cast(_config.shard_info.think_time)); } } future<> issue_request(char* buf, io_intent* intent, std::chrono::steady_clock::time_point start, std::chrono::steady_clock::time_point stop) { return issue_request(buf, intent).then([this, start, stop] (auto size) { auto now = std::chrono::steady_clock::now(); if (now < stop) { this->add_result(size, std::chrono::duration_cast(now - start)); } return make_ready_future<>(); }); } future<> issue_requests_in_parallel(std::chrono::steady_clock::time_point stop) { return parallel_for_each(std::views::iota(0u, parallelism()), [this, stop] (auto dummy) mutable { auto bufptr = allocate_aligned_buffer(this->req_size(), _alignment); auto buf = bufptr.get(); return do_until([this, stop] { return std::chrono::steady_clock::now() > stop || requests() > limit(); }, [this, buf, stop] () mutable { auto start = std::chrono::steady_clock::now(); return issue_request(buf, nullptr, start, stop).then([this] { return think(); }); }).finally([bufptr = std::move(bufptr)] {}); }); } future<> issue_requests_at_rate(std::chrono::steady_clock::time_point stop) { return do_with(io_intent{}, 0u, [this, stop] (io_intent& intent, unsigned& in_flight) { return parallel_for_each(std::views::iota(0u, parallelism()), [this, stop, &intent, &in_flight] (auto dummy) mutable { auto bufptr = allocate_aligned_buffer(this->req_size(), _alignment); auto buf = bufptr.get(); auto pause = std::chrono::duration_cast(1s) / rps(); auto pause_dist = _config.options.pause_fn(pause); return seastar::sleep((pause / parallelism()) * dummy).then([this, buf, stop, pause = pause_dist.get(), &intent, &in_flight] () mutable { return do_until([this, stop] { return std::chrono::steady_clock::now() > stop || requests() > limit(); }, [this, buf, stop, pause, &intent, &in_flight] () mutable { auto start = std::chrono::steady_clock::now(); in_flight++; return parallel_for_each(std::views::iota(0u, batch()), [this, buf, &intent, start, stop] (auto dummy) { return issue_request(buf, &intent, start, stop); }).then([this, start, pause] { auto now = std::chrono::steady_clock::now(); auto p = pause->template get_as(); auto next = start + p; if (next > now) { return this->_sleep_fn(next, now); } else { // probably the system cannot keep-up with this rate return make_ready_future<>(); } }).handle_exception_type([] (const cancelled_error&) { // expected }).finally([&in_flight] { in_flight--; }); }); }).finally([bufptr = std::move(bufptr), pause = std::move(pause_dist)] {}); }).then([&intent, &in_flight] { intent.cancel(); return do_until([&in_flight] { return in_flight == 0; }, [] { return seastar::sleep(100ms /* ¯\_(ツ)_/¯ */); }); }); }); } public: future<> issue_requests(std::chrono::steady_clock::time_point stop) { _start = std::chrono::steady_clock::now(); return with_scheduling_group(_sg, [this, stop] { if (rps() == 0) { return issue_requests_in_parallel(stop); } else { return issue_requests_at_rate(stop); } }).then([this] { _total_duration = std::chrono::steady_clock::now() - _start; }); } future<> think() { if (_think) { return seastar::sleep(std::chrono::duration_cast(_config.shard_info.think_time)); } else { return make_ready_future<>(); } } // Generate the test file(s) for reads and writes alike. It is much simpler to just generate one file per job instead of expecting // job dependencies between creators and consumers. Removal of files is an exception - it creates multiple files during startup to // unlink them. So every job (a class in a shard) will have its own file(s) and will operate differently depending on the type: // // sequential reads : will read the file from pos = 0 onwards, back to 0 on EOF // sequential writes : will write the file from pos = 0 onwards, back to 0 on EOF // random reads : will read the file at random positions, between 0 and EOF // random writes : will overwrite the file at a random position, between 0 and EOF // append : will write to the file from pos = EOF onwards, always appending to the end. // unlink : will unlink files created at the beginning of the execution // cpu : CPU-only load, file is not created. future<> start(sstring dir, directory_entry_type type) { return do_start(dir, type).then([this] { if (this_shard_id() == 0 && _config.shard_info.bandwidth != 0) { return make_ready_future<>(); // FIXME _iop.update_bandwidth(_config.shard_info.bandwidth); } else { return make_ready_future<>(); } }); } future<> stop() { return stop_hook().finally([this] { return maybe_close_file(_file); }); } const sstring name() const { return _config.name; } protected: sstring type_str() const { return std::unordered_map{ { request_type::seqread, "SEQ READ" }, { request_type::seqwrite, "SEQ WRITE" }, { request_type::randread, "RAND READ" }, { request_type::randwrite, "RAND WRITE" }, { request_type::append , "APPEND" }, { request_type::cpu , "CPU" }, { request_type::unlink, "UNLINK" }, }[_config.type];; } request_type req_type() const { return _config.type; } sstring think_time() const { if (_config.shard_info.think_time == std::chrono::duration(0)) { return "NO think time"; } else { return format("{:d} us think time", std::chrono::duration_cast(_config.shard_info.think_time).count()); } } size_t req_size() const { return _config.shard_info.request_size; } unsigned parallelism() const { return _config.shard_info.parallelism; } unsigned rps() const { return _config.shard_info.rps; } unsigned batch() const { return _config.shard_info.batch; } unsigned limit() const noexcept { return _config.shard_info.limit; } unsigned shares() const { return _config.shard_info.shares; } std::chrono::duration total_duration() const { return _total_duration; } uint64_t file_size_mb() const { return _config.file_size >> 20; } uint64_t total_data() const { return _data; } uint64_t max_latency() const { return max(_latencies); } uint64_t average_latency() const { return mean(_latencies); } uint64_t quantile_latency(double q) const { return quantile(_latencies, quantile_probability = q); } uint64_t requests() const noexcept { return _requests; } bool is_sequential() const { return (req_type() == request_type::seqread) || (req_type() == request_type::seqwrite); } bool is_random() const { return (req_type() == request_type::randread) || (req_type() == request_type::randwrite); } uint64_t get_pos() { uint64_t pos; if (is_random()) { pos = _pos_distribution(random_generator) * req_size(); } else { pos = _last_pos + req_size(); if (is_sequential() && (pos >= _config.file_size)) { pos = 0; } } _last_pos = pos; return pos + _offset; } void add_result(size_t data, std::chrono::microseconds latency) { _data += data; _latencies(latency.count()); _requests++; } public: virtual void emit_results(YAML::Emitter& out) = 0; virtual future<> stop_hook() { return make_ready_future<>(); } }; class io_class_data : public class_data { protected: bool _is_dev_null = false; future on_io_completed(future f) { if (!_is_dev_null) { return f; } return f.then([this] (auto size_f) { return make_ready_future(this->req_size()); }); } public: io_class_data(job_config cfg) : class_data(std::move(cfg)) {} future<> do_start(sstring path, directory_entry_type type) override { if (type == directory_entry_type::directory) { return do_start_on_directory(path); } if (type == directory_entry_type::block_device) { return do_start_on_bdev(path); } if (type == directory_entry_type::char_device && path == "/dev/null") { return do_start_on_dev_null(); } throw std::runtime_error(format("Unsupported storage. {} should be directory or block device", path)); } private: future<> do_start_on_directory(sstring dir) { auto fname = format("{}/test-{}-{:d}", dir, name(), this_shard_id()); auto flags = open_flags::rw | open_flags::create; if (_config.options.dsync) { flags |= open_flags::dsync; } file_open_options options; options.extent_allocation_size_hint = _config.extent_allocation_size_hint.value_or(_config.file_size); options.append_is_unlikely = true; return create_and_fill_file(fname, _config.file_size, flags, options).then([this](std::pair p) { _file = std::move(p.first); _last_pos = (req_type() == request_type::append) ? p.second : 0u; return make_ready_future<>(); }).then([fname] { // If keep_files == false, then the file shall not exist after the execution. // After the following function call the usage of the file is valid until `this->_file` object is closed. return maybe_remove_file(fname); }); } future<> do_start_on_bdev(sstring name) { auto flags = open_flags::rw; if (_config.options.dsync) { flags |= open_flags::dsync; } return open_file_dma(name, flags).then([this] (auto f) { _file = std::move(f); return _file.size().then([this] (uint64_t size) { auto shard_area_size = align_down(size / smp::count, 1 << 20); if (_config.offset_in_bdev + _config.file_size > shard_area_size) { throw std::runtime_error("Data doesn't fit the blockdevice"); } _offset = shard_area_size * this_shard_id() + _config.offset_in_bdev; return make_ready_future<>(); }); }); } future<> do_start_on_dev_null() { file_open_options options; options.append_is_unlikely = true; return open_file_dma("/dev/null", open_flags::rw, std::move(options)).then([this] (auto f) { _file = std::move(f); _is_dev_null = true; return make_ready_future<>(); }); } void emit_one_metrics(YAML::Emitter& out, sstring m_name) { const auto& values = seastar::metrics::impl::get_value_map(); const auto& mf = values.find(m_name); SEASTAR_ASSERT(mf != values.end()); for (auto&& mi : mf->second) { auto&& cname = mi.first.labels().find("class"); if (cname != mi.first.labels().end() && cname->second == name()) { out << YAML::Key << m_name << YAML::Value << mi.second->get_function()().d(); } } } void emit_metrics(YAML::Emitter& out) { emit_one_metrics(out, "io_queue_total_exec_sec"); emit_one_metrics(out, "io_queue_total_delay_sec"); emit_one_metrics(out, "io_queue_total_operations"); emit_one_metrics(out, "io_queue_starvation_time_sec"); emit_one_metrics(out, "io_queue_consumption"); emit_one_metrics(out, "io_queue_adjusted_consumption"); emit_one_metrics(out, "io_queue_activations"); } public: virtual void emit_results(YAML::Emitter& out) override { auto throughput_kbs = (total_data() >> 10) / total_duration().count(); auto iops = requests() / total_duration().count(); out << YAML::Key << "throughput" << YAML::Value << throughput_kbs << YAML::Comment("kB/s"); out << YAML::Key << "IOPS" << YAML::Value << iops; out << YAML::Key << "latencies" << YAML::Comment("usec"); out << YAML::BeginMap; out << YAML::Key << "average" << YAML::Value << average_latency(); for (auto& q: quantiles) { out << YAML::Key << fmt::format("p{}", q) << YAML::Value << quantile_latency(q); } out << YAML::Key << "max" << YAML::Value << max_latency(); out << YAML::EndMap; out << YAML::Key << "stats" << YAML::BeginMap; out << YAML::Key << "total_requests" << YAML::Value << requests(); emit_metrics(out); out << YAML::EndMap; } }; class read_io_class_data : public io_class_data { public: read_io_class_data(job_config cfg) : io_class_data(std::move(cfg)) {} future issue_request(char *buf, io_intent* intent) override { auto f = _file.dma_read(this->get_pos(), buf, this->req_size(), intent); return on_io_completed(std::move(f)); } }; class write_io_class_data : public io_class_data { public: write_io_class_data(job_config cfg) : io_class_data(std::move(cfg)) {} future issue_request(char *buf, io_intent* intent) override { auto f = _file.dma_write(this->get_pos(), buf, this->req_size(), intent); return on_io_completed(std::move(f)); } }; class unlink_class_data : public class_data { private: sstring _dir_path{}; uint64_t _file_id_to_remove{0u}; public: unlink_class_data(job_config cfg) : class_data(std::move(cfg)) { if (!_config.files_count.has_value()) { throw std::runtime_error("request_type::unlink requires specifying 'files_count'"); } } future<> do_start(sstring path, directory_entry_type type) override { if (type == directory_entry_type::directory) { return do_start_on_directory(path); } throw std::runtime_error(format("Unsupported storage. {} should be directory", path)); } future issue_request(char *buf, io_intent* intent) override { if (all_files_removed()) { fmt::print("[WARNING]: Cannot issue request in unlink_class_data! All files have been removed for shard_id={}\n" "[WARNING]: Please create more files or adjust the frequency of unlinks.", this_shard_id()); return make_ready_future(0u); } const auto fname = get_filename(_file_id_to_remove); ++_file_id_to_remove; return remove_file(fname).then([]{ return make_ready_future(0u); }); } void emit_results(YAML::Emitter& out) override { const auto iops = requests() / total_duration().count(); out << YAML::Key << "IOPS" << YAML::Value << iops; out << YAML::Key << "latencies" << YAML::Comment("usec"); out << YAML::BeginMap; out << YAML::Key << "average" << YAML::Value << average_latency(); out << YAML::Key << "max" << YAML::Value << max_latency(); out << YAML::EndMap; out << YAML::Key << "stats" << YAML::BeginMap; out << YAML::Key << "total_requests" << YAML::Value << requests(); out << YAML::EndMap; } private: future<> stop_hook() override { if (all_files_removed() || keep_files) { return make_ready_future<>(); } return max_concurrent_for_each(std::views::iota(_file_id_to_remove, files_count()), max_concurrency(), [this] (uint64_t file_id) { const auto fname = get_filename(file_id); return remove_file(fname); }); } uint64_t files_count() const { return *_config.files_count; } uint64_t max_concurrency() const { // When we have many files it is easy to exceed the limit of open file descriptors. // To avoid that the limit is divided between shards (leaving some room for other jobs). return static_cast((1024u / smp::count) * 0.8); } bool all_files_removed() const { return files_count() <= _file_id_to_remove; } sstring get_filename(uint64_t file_id) const { return format("{}/test-{}-shard-{:d}-file-{}", _dir_path, name(), this_shard_id(), file_id); } future<> do_start_on_directory(sstring path) { _dir_path = std::move(path); return max_concurrent_for_each(std::views::iota(UINT64_C(0), files_count()), max_concurrency(), [this] (uint64_t file_id) { const auto fname = get_filename(file_id); const auto fsize = align_up(_config.file_size / files_count(), extent_size_hint_alignment); const auto flags = open_flags::rw | open_flags::create; file_open_options options; options.extent_allocation_size_hint = _config.extent_allocation_size_hint.value_or(fsize); options.append_is_unlikely = true; return create_and_fill_file(fname, fsize, flags, options).then([](std::pair p) { return do_with(std::move(p.first), [] (auto& f) { return f.close(); }); }); }); } }; class cpu_class_data : public class_data { public: cpu_class_data(job_config cfg) : class_data(std::move(cfg)) {} future<> do_start(sstring dir, directory_entry_type type) override { return make_ready_future<>(); } future issue_request(char *buf, io_intent* intent) override { // We do want the execution time to be a busy loop, and not just a bunch of // continuations until our time is up: by doing this we can also simulate the behavior // of I/O continuations in the face of reactor stalls. auto start = std::chrono::steady_clock::now(); do { } while ((std::chrono::steady_clock::now() - start) < _config.shard_info.execution_time); return make_ready_future(1); } virtual void emit_results(YAML::Emitter& out) override { auto throughput = total_data() / total_duration().count(); out << YAML::Key << "throughput" << YAML::Value << throughput; } }; std::unique_ptr job_config::gen_class_data() { if (type == request_type::cpu) { return std::make_unique(*this); } else if (type == request_type::unlink) { return std::make_unique(*this); } else if ((type == request_type::seqread) || (type == request_type::randread)) { return std::make_unique(*this); } else { return std::make_unique(*this); } } /// YAML parsing functions namespace YAML { template<> struct convert { static bool decode(const Node& node, byte_size& bs) { auto str = node.as(); unsigned shift = 0; if (str.back() == 'B') { str.pop_back(); shift = std::unordered_map{ { 'k', 10 }, { 'M', 20 }, { 'G', 30 }, }[str.back()]; str.pop_back(); } bs.size = (boost::lexical_cast(str) << shift); return bs.size >= 512; } }; template<> struct convert { static bool decode(const Node& node, duration_time& dt) { auto str = node.as(); if (str == "0") { dt.time = 0ns; return true; } if (str.back() != 's') { return false; } str.pop_back(); std::unordered_map> unit = { { 'n', 1ns }, { 'u', 1us }, { 'm', 1ms }, }; if (unit.count(str.back())) { auto u = str.back(); str.pop_back(); dt.time = (boost::lexical_cast(str) * unit[u]); } else { dt.time = (boost::lexical_cast(str) * 1s); } return true; } }; template<> struct convert { static bool decode(const Node& node, shard_config& shards) { try { auto str = node.as(); return (str == "all"); } catch (YAML::TypedBadConversion& e) { shards = shard_config(boost::copy_range>(node.as>())); return true; } return false; } }; template<> struct convert { static bool decode(const Node& node, request_type& rt) { static std::unordered_map mappings = { { "seqread", request_type::seqread }, { "seqwrite", request_type::seqwrite}, { "randread", request_type::randread }, { "randwrite", request_type::randwrite }, { "append", request_type::append}, { "cpu", request_type::cpu}, { "unlink", request_type::unlink }, }; auto reqstr = node.as(); if (!mappings.count(reqstr)) { return false; } rt = mappings[reqstr]; return true; } }; template<> struct convert { static bool decode(const Node& node, shard_info& sl) { if (node["parallelism"]) { sl.parallelism = node["parallelism"].as(); } if (node["rps"]) { sl.rps = node["rps"].as(); } if (node["batch"]) { sl.batch = node["batch"].as(); } if (node["limit"]) { sl.limit = node["limit"].as(); } if (node["shares"]) { sl.shares = node["shares"].as(); } else if (node["class"]) { sl.sched_class = node["class"].as(); } if (node["bandwidth"]) { sl.bandwidth = node["bandwidth"].as().size; } if (node["reqsize"]) { sl.request_size = node["reqsize"].as().size; } if (node["think_time"]) { sl.think_time = node["think_time"].as().time; } if (node["think_after"]) { sl.think_after = node["think_after"].as().time; } if (node["execution_time"]) { sl.execution_time = node["execution_time"].as().time; } return true; } }; template<> struct convert { static bool decode(const Node& node, options& op) { if (node["dsync"]) { op.dsync = node["dsync"].as(); } if (node["sleep_type"]) { auto st = node["sleep_type"].as(); if (st == "busyloop") { op.sleep_fn = busyloop_sleep; } else if (st == "lowres") { op.sleep_fn = timer_sleep; } else if (st == "steady") { op.sleep_fn = timer_sleep; } else { throw std::runtime_error(seastar::format("Unknown sleep_type {}", st)); } } if (node["pause_distribution"]) { auto pd = node["pause_distribution"].as(); if (pd == "uniform") { op.pause_fn = make_uniform_pause; } else if (pd == "poisson") { op.pause_fn = make_poisson_pause; } else { throw std::runtime_error(seastar::format("Unknown pause_distribution {}", pd)); } } return true; } }; template<> struct convert { static bool decode(const Node& node, job_config& cl) { cl.name = node["name"].as(); cl.type = node["type"].as(); cl.shard_placement = node["shards"].as(); // The data_size is used to divide the available (and effectively // constant) disk space between workloads. Each shard inside the // workload thus uses its portion of the assigned space. if (node["data_size"]) { const uint64_t per_shard_bytes = node["data_size"].as().size / smp::count; cl.file_size = align_up(per_shard_bytes, extent_size_hint_alignment); } else { cl.file_size = 1ull << 30; // 1G by default } // By default the file size is used as the allocation hint. // However, certain tests may require using a specific value (e.g. 32MB). if (node["extent_allocation_size_hint"]) { cl.extent_allocation_size_hint = node["extent_allocation_size_hint"].as().size; } // By default a job may create 0 or 1 file. // That is not the case for unlink_class_data - it creates multiple // files that are unlinked during the execution. if (node["files_count"]) { cl.files_count = node["files_count"].as(); } if (node["shard_info"]) { cl.shard_info = node["shard_info"].as(); } if (node["options"]) { cl.options = node["options"].as(); } return true; } }; } /// Each shard has one context, and the context is responsible for creating the classes that should /// run in this shard. class context { std::vector> _cl; sstring _dir; directory_entry_type _type; std::chrono::seconds _duration; semaphore _finished; public: context(sstring dir, directory_entry_type dtype, std::vector req_config, unsigned duration) : _cl(boost::copy_range>>(req_config | boost::adaptors::filtered([] (auto& cfg) { return cfg.shard_placement.is_set(this_shard_id()); }) | boost::adaptors::transformed([] (auto& cfg) { return cfg.gen_class_data(); }) )) , _dir(dir) , _type(dtype) , _duration(duration) , _finished(0) {} future<> stop() { return parallel_for_each(_cl, [] (std::unique_ptr& cl) { return cl->stop(); }); } future<> start() { return parallel_for_each(_cl, [this] (std::unique_ptr& cl) { return cl->start(_dir, _type); }); } future<> issue_requests() { return parallel_for_each(_cl.begin(), _cl.end(), [this] (std::unique_ptr& cl) { return cl->issue_requests(std::chrono::steady_clock::now() + _duration).finally([this] { _finished.signal(1); }); }); } future<> emit_results(YAML::Emitter& out) { return _finished.wait(_cl.size()).then([this, &out] { for (auto& cl: _cl) { out << YAML::Key << cl->name(); out << YAML::BeginMap; cl->emit_results(out); out << YAML::EndMap; } return make_ready_future<>(); }); } }; static void show_results(distributed& ctx) { YAML::Emitter out; out << YAML::BeginDoc; out << YAML::BeginSeq; for (unsigned i = 0; i < smp::count; ++i) { out << YAML::BeginMap; out << YAML::Key << "shard" << YAML::Value << i; ctx.invoke_on(i, [&out] (auto& c) { return c.emit_results(out); }).get(); out << YAML::EndMap; } out << YAML::EndSeq; out << YAML::EndDoc; std::cout << out.c_str(); } int main(int ac, char** av) { namespace bpo = boost::program_options; app_template app; auto opt_add = app.add_options(); opt_add ("storage", bpo::value()->default_value("."), "directory or block device where to execute the test") ("duration", bpo::value()->default_value(10), "for how long (in seconds) to run the test") ("conf", bpo::value()->default_value("./conf.yaml"), "YAML file containing benchmark specification") ("keep-files", bpo::value()->default_value(false), "keep test files, next run may re-use them") ; distributed ctx; return app.run(ac, av, [&] { return seastar::async([&] { auto& opts = app.configuration(); auto& storage = opts["storage"].as(); auto st_type = engine().file_type(storage).get(); if (!st_type) { throw std::runtime_error(format("Unknown storage {}", storage)); } if (*st_type == directory_entry_type::directory) { auto fs = file_system_at(storage).get(); if (fs != fs_type::xfs) { std::cout << "WARNING!!! This is a performance test. " << storage << " is not on XFS" << std::endl; } } keep_files = opts["keep-files"].as(); auto& duration = opts["duration"].as(); auto& yaml = opts["conf"].as(); YAML::Node doc = YAML::LoadFile(yaml); auto reqs = doc.as>(); struct sched_class { seastar::scheduling_group sg; }; std::unordered_map sched_classes; parallel_for_each(reqs, [&sched_classes] (auto& r) { if (r.shard_info.sched_class != "") { return make_ready_future<>(); } return seastar::create_scheduling_group(r.name, r.shard_info.shares).then([&r, &sched_classes] (seastar::scheduling_group sg) { sched_classes.insert(std::make_pair(r.name, sched_class { .sg = sg, })); }); }).get(); for (job_config& r : reqs) { auto cname = r.shard_info.sched_class != "" ? r.shard_info.sched_class : r.name; fmt::print("Job {} -> sched class {}\n", r.name, cname); auto& sc = sched_classes.at(cname); r.shard_info.scheduling_group = sc.sg; } if (*st_type == directory_entry_type::block_device) { uint64_t off = 0; for (job_config& r : reqs) { r.offset_in_bdev = off; off += r.file_size; } } ctx.start(storage, *st_type, reqs, duration).get(); internal::at_exit([&ctx] { return ctx.stop(); }); std::cout << "Creating initial files..." << std::endl; ctx.invoke_on_all([] (auto& c) { return c.start(); }).get(); std::cout << "Starting evaluation..." << std::endl; ctx.invoke_on_all([] (auto& c) { return c.issue_requests(); }).get(); show_results(ctx); ctx.stop().get(); }).or_terminate(); }); } seastar-25.05.0/apps/io_tester/ioinfo.cc000066400000000000000000000124041501510432000200470ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2021 ScyllaDB */ #include #include #include #include #include #include #include using namespace seastar; int main(int ac, char** av) { namespace bpo = boost::program_options; app_template app; auto opt_add = app.add_options(); opt_add ("directory", bpo::value()->default_value("."), "directory to work on") ("max-reqsize", bpo::value()->default_value(128u * 1024u), "maximum request size in bytes used when calculating capacity (default: 128kB)") ; return app.run(ac, av, [&] { return seastar::async([&] { auto& opts = app.configuration(); auto& storage = opts["directory"].as(); auto max_reqsz = opts["max-reqsize"].as(); YAML::Emitter out; out << YAML::BeginDoc; out << YAML::BeginMap; engine().open_file_dma(storage + "/tempfile", open_flags::rw | open_flags::create | open_flags::exclusive).then([&] (file f) { return with_closeable(std::move(f), [&out, &storage, max_reqsz] (file& f) { return remove_file(storage + "/tempfile").then([&out, &f] { out << YAML::Key << "disk_read_max_length" << YAML::Value << f.disk_read_max_length(); out << YAML::Key << "disk_write_max_length" << YAML::Value << f.disk_write_max_length(); }).then([&out, &f, max_reqsz] { return f.stat().then([&out, max_reqsz] (auto st) { auto& ioq = engine().get_io_queue(st.st_dev); auto& cfg = ioq.get_config(); out << YAML::Key << "device" << YAML::Value << st.st_dev; out << YAML::Key << "io_latency_goal_ms" << YAML::Value << std::chrono::duration_cast>(cfg.rate_limit_duration).count(); out << YAML::Key << "io_queue" << YAML::BeginMap; out << YAML::Key << "id" << YAML::Value << ioq.id(); out << YAML::Key << "req_count_rate" << YAML::Value << cfg.req_count_rate; out << YAML::Key << "blocks_count_rate" << YAML::Value << cfg.blocks_count_rate; out << YAML::Key << "disk_req_write_to_read_multiplier" << YAML::Value << cfg.disk_req_write_to_read_multiplier; out << YAML::Key << "disk_blocks_write_to_read_multiplier" << YAML::Value << cfg.disk_blocks_write_to_read_multiplier; out << YAML::EndMap; out << YAML::Key << "fair_queue" << YAML::BeginMap; out << YAML::Key << "capacities" << YAML::BeginMap; for (size_t sz = 512; sz <= max_reqsz; sz <<= 1) { out << YAML::Key << sz << YAML::BeginMap; out << YAML::Key << "read" << YAML::Value << ioq.request_capacity(internal::io_direction_and_length(internal::io_direction_and_length::read_idx, sz)); out << YAML::Key << "write" << YAML::Value << ioq.request_capacity(internal::io_direction_and_length(internal::io_direction_and_length::write_idx, sz)); out << YAML::EndMap; } out << YAML::EndMap; const auto& fg = internal::get_fair_group(ioq, internal::io_direction_and_length::write_idx); out << YAML::Key << "per_tick_grab_threshold" << YAML::Value << fg.per_tick_grab_threshold(); const auto& tb = fg.token_bucket(); out << YAML::Key << "token_bucket" << YAML::BeginMap; out << YAML::Key << "limit" << YAML::Value << tb.limit(); out << YAML::Key << "rate" << YAML::Value << tb.rate(); out << YAML::Key << "threshold" << YAML::Value << tb.threshold(); out << YAML::EndMap; out << YAML::EndMap; }); }); }); }).get(); out << YAML::EndMap; out << YAML::EndDoc; std::cout << out.c_str(); }); }); } seastar-25.05.0/apps/iotune/000077500000000000000000000000001501510432000155625ustar00rootroot00000000000000seastar-25.05.0/apps/iotune/CMakeLists.txt000066400000000000000000000015511501510432000203240ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # seastar_add_app (iotune SOURCES iotune.cc) target_link_libraries (app_iotune PRIVATE yaml-cpp::yaml-cpp) seastar-25.05.0/apps/iotune/iotune.cc000066400000000000000000001106561501510432000174050ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2018 ScyllaDB * * The goal of this program is to allow a user to properly configure the Seastar I/O * scheduler. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace seastar; using namespace std::chrono_literals; namespace fs = std::filesystem; logger iotune_logger("iotune"); using iotune_clock = std::chrono::steady_clock; static thread_local std::default_random_engine random_generator(std::chrono::duration_cast(iotune_clock::now().time_since_epoch()).count()); void check_device_properties(fs::path dev_sys_file) { auto sched_file = dev_sys_file / "queue" / "scheduler"; auto sched_string = read_first_line(sched_file); auto beg = sched_string.find('['); size_t len = sched_string.size(); if (beg == sstring::npos) { beg = 0; } else { auto end = sched_string.find(']'); if (end != sstring::npos) { len = end - beg - 1; } beg++; } auto scheduler = sched_string.substr(beg, len); if ((scheduler != "noop") && (scheduler != "none")) { iotune_logger.warn("Scheduler for {} set to {}. It is recommend to set it to noop before evaluation so as not to skew the results.", sched_file.string(), scheduler); } auto nomerges_file = dev_sys_file / "queue" / "nomerges"; auto nomerges = read_first_line_as(nomerges_file); if (nomerges != 2u) { iotune_logger.warn("nomerges for {} set to {}. It is recommend to set it to 2 before evaluation so that merges are disabled. Results can be skewed otherwise.", nomerges_file.string(), nomerges); } auto write_cache_file = dev_sys_file / "queue" / "write_cache"; auto write_cache = read_first_line_as(write_cache_file); if (write_cache == "write back") { iotune_logger.warn("write_cache for {} is set to write back. Some disks have poor implementation of this mode, pay attention to the measurements accuracy.", write_cache_file.string()); } } struct evaluation_directory { sstring _name; // We know that if we issue more than this, they will be blocked on linux anyway. unsigned _max_iodepth = 0; unsigned _force_io_depth; uint64_t _available_space; uint64_t _min_data_transfer_size = 512; unsigned _disks_per_array = 0; void scan_device(unsigned dev_maj, unsigned dev_min) { scan_device(fmt::format("{}:{}", dev_maj, dev_min)); } void scan_device(std::string dev_str) { scan_device(fs::path("/sys/dev/block") / dev_str); } void scan_device(fs::path sys_file) { try { sys_file = fs::canonical(sys_file); bool is_leaf = true; if (fs::exists(sys_file / "slaves")) { for (auto& dev : fs::directory_iterator(sys_file / "slaves")) { is_leaf = false; scan_device(read_first_line(dev.path() / "dev")); } } // our work is done if not leaf. We'll tune the leaves if (!is_leaf) { return; } if (fs::exists(sys_file / "partition")) { scan_device(sys_file.remove_filename()); } else { check_device_properties(sys_file); auto queue_dir = sys_file / "queue"; auto disk_min_io_size = read_first_line_as(queue_dir / "minimum_io_size"); _min_data_transfer_size = std::max(_min_data_transfer_size, disk_min_io_size); _max_iodepth += read_first_line_as(queue_dir / "nr_requests"); _disks_per_array++; } } catch (std::system_error& se) { iotune_logger.error("Error while parsing sysfs. Will continue with guessed values: {}", se.what()); _max_iodepth = 128; } if (_force_io_depth != 0) { _max_iodepth = _force_io_depth; } _disks_per_array = std::max(_disks_per_array, 1u); } public: evaluation_directory(sstring name, unsigned force_io_depth) : _name(name) , _force_io_depth(force_io_depth) , _available_space(fs::space(fs::path(_name)).available) {} unsigned max_iodepth() const { return _max_iodepth; } fs::path path() const { return fs::path(_name); } const sstring& name() const { return _name; } unsigned disks_per_array() const { return _disks_per_array; } uint64_t minimum_io_size() const { return _min_data_transfer_size; } future<> discover_directory() { return seastar::async([this] { auto f = open_directory(_name).get(); auto st = f.stat().get(); f.close().get(); scan_device(major(st.st_dev), minor(st.st_dev)); }); } uint64_t available_space() const { return _available_space; } }; struct io_rates { float bytes_per_sec = 0; float iops = 0; io_rates operator+(const io_rates& a) const { return io_rates{bytes_per_sec + a.bytes_per_sec, iops + a.iops}; } io_rates& operator+=(const io_rates& a) { bytes_per_sec += a.bytes_per_sec; iops += a.iops; return *this; } }; struct row_stats { size_t points; double average; double stdev; float stdev_percents() const { return points > 0 ? stdev / average : 0.0; } }; template static row_stats get_row_stats_for(const std::vector& v) { if (v.size() == 0) { return row_stats{0, 0.0, 0.0}; } double avg = std::accumulate(v.begin(), v.end(), 0.0) / v.size(); double stdev = std::sqrt(std::transform_reduce(v.begin(), v.end(), 0.0, std::plus(), [avg] (auto& v) -> double { return (v - avg) * (v - avg); }) / v.size()); return row_stats{ v.size(), avg, stdev }; } class invalid_position : public std::exception { public: virtual const char* what() const noexcept { return "file access position invalid"; } }; struct position_generator { virtual uint64_t get_pos() = 0; virtual bool is_sequential() const = 0; virtual ~position_generator() {} }; class sequential_issuer : public position_generator { size_t _buffer_size; uint64_t _position = 0; uint64_t _size_limit; public: sequential_issuer(size_t buffer_size, uint64_t size_limit) : _buffer_size(buffer_size) , _size_limit(size_limit) {} virtual bool is_sequential() const { return true; } virtual uint64_t get_pos() { if (_position >= _size_limit) { // Wrap around if reaching EOF. The write bandwidth is lower, // and we also split the write bandwidth among shards, while we // read only from shard 0, so shard 0's file may not be large // enough to read from. _position = 0; } auto pos = _position; _position += _buffer_size; return pos; } }; class random_issuer : public position_generator { size_t _buffer_size; uint64_t _last_position; std::uniform_int_distribution _pos_distribution; public: random_issuer(size_t buffer_size, uint64_t last_position) : _buffer_size(buffer_size) , _last_position(last_position) , _pos_distribution(0, (last_position / buffer_size) - 1) {} virtual bool is_sequential() const { return false; } virtual uint64_t get_pos() { uint64_t pos = _pos_distribution(random_generator) * _buffer_size; if (pos >= _last_position) { throw invalid_position(); } return pos; } }; class request_issuer { public: virtual future issue_request(uint64_t pos, char* buf, uint64_t size) = 0; virtual ~request_issuer() {} }; class write_request_issuer : public request_issuer { file _file; public: explicit write_request_issuer(file f) : _file(f) {} future issue_request(uint64_t pos, char* buf, uint64_t size) override { return _file.dma_write(pos, buf, size); } }; class read_request_issuer : public request_issuer { file _file; public: explicit read_request_issuer(file f) : _file(f) {} future issue_request(uint64_t pos, char* buf, uint64_t size) override { return _file.dma_read(pos, buf, size); } }; class io_worker { class requests_rate_meter { std::vector& _rates; const unsigned& _requests; unsigned _prev_requests = 0; timer<> _tick; static constexpr auto period = 1s; public: requests_rate_meter(std::chrono::duration duration, std::vector& rates, const unsigned& requests) : _rates(rates) , _requests(requests) , _tick([this] { _rates.push_back(_requests - _prev_requests); _prev_requests = _requests; }) { _rates.reserve(256); // ~2 minutes if (duration > 4 * period) { _tick.arm_periodic(period); } } ~requests_rate_meter() { if (_tick.armed()) { _tick.cancel(); } else { _rates.push_back(_requests); } } }; uint64_t _bytes = 0; uint64_t _max_offset = 0; unsigned _requests = 0; size_t _buffer_size; std::chrono::time_point> _start_measuring; std::chrono::time_point> _end_measuring; std::chrono::time_point> _end_load; // track separately because in the sequential case we may exhaust the file before _duration std::chrono::time_point> _last_time_seen; requests_rate_meter _rr_meter; std::unique_ptr _pos_impl; std::unique_ptr _req_impl; public: bool is_sequential() const { return _pos_impl->is_sequential(); } bool should_stop() const { return iotune_clock::now() >= _end_load; } io_worker(size_t buffer_size, std::chrono::duration duration, std::unique_ptr reqs, std::unique_ptr pos, std::vector& rates) : _buffer_size(buffer_size) , _start_measuring(iotune_clock::now() + std::chrono::duration(10ms)) , _end_measuring(_start_measuring + duration) , _end_load(_end_measuring + 10ms) , _last_time_seen(_start_measuring) , _rr_meter(duration, rates, _requests) , _pos_impl(std::move(pos)) , _req_impl(std::move(reqs)) {} std::unique_ptr get_buffer() { return allocate_aligned_buffer(_buffer_size, _buffer_size); } future<> issue_request(char* buf) { uint64_t pos = _pos_impl->get_pos(); return _req_impl->issue_request(pos, buf, _buffer_size).then([this, pos] (size_t size) { auto now = iotune_clock::now(); _max_offset = std::max(_max_offset, pos + size); if ((now > _start_measuring) && (now < _end_measuring)) { _last_time_seen = now; _bytes += size; _requests++; } }); } uint64_t max_offset() const noexcept { return _max_offset; } io_rates get_io_rates() const { io_rates rates; auto t = _last_time_seen - _start_measuring; if (!t.count()) { throw std::runtime_error("No data collected"); } rates.bytes_per_sec = _bytes / t.count(); rates.iops = _requests / t.count(); return rates; } }; class test_file { public: enum class pattern { sequential, random }; private: fs::path _dirpath; uint64_t _file_size; file _file; uint64_t _forced_random_io_buffer_size; std::unique_ptr get_position_generator(size_t buffer_size, pattern access_pattern) { if (access_pattern == pattern::sequential) { return std::make_unique(buffer_size, _file_size); } else { return std::make_unique(buffer_size, _file_size); } } uint64_t calculate_buffer_size(pattern access_pattern, uint64_t buffer_size, uint64_t operation_alignment) const { if (access_pattern == pattern::random && _forced_random_io_buffer_size != 0u) { return _forced_random_io_buffer_size; } return std::max(buffer_size, operation_alignment); } public: test_file(const ::evaluation_directory& dir, uint64_t maximum_size, uint64_t random_io_buffer_size) : _dirpath(dir.path() / fs::path(fmt::format("ioqueue-discovery-{}", this_shard_id()))) , _file_size(maximum_size) , _forced_random_io_buffer_size(random_io_buffer_size) {} future<> create_data_file() { // XFS likes access in many directories better. return make_directory(_dirpath.string()).then([this] { auto testfile = _dirpath / fs::path("testfile"); file_open_options options; options.extent_allocation_size_hint = _file_size; return open_file_dma(testfile.string(), open_flags::rw | open_flags::create, std::move(options)).then([this, testfile] (file file) { _file = file; if (this_shard_id() == 0) { iotune_logger.info("Filesystem parameters: read alignment {}, write alignment {}", _file.disk_read_dma_alignment(), _file.disk_write_dma_alignment()); } return remove_file(testfile.string()).then([this] { return remove_file(_dirpath.string()); }); }).then([this] { return _file.truncate(_file_size); }); }); } future do_workload(std::unique_ptr worker_ptr, unsigned max_os_concurrency, bool update_file_size = false) { if (update_file_size) { _file_size = 0; } auto worker = worker_ptr.get(); auto concurrency = std::views::iota(0u, max_os_concurrency); return parallel_for_each(std::move(concurrency), [worker] (unsigned idx) { auto bufptr = worker->get_buffer(); auto buf = bufptr.get(); return do_until([worker] { return worker->should_stop(); }, [buf, worker] { return worker->issue_request(buf); }).finally([alive = std::move(bufptr)] {}); }).then_wrapped([this, worker = std::move(worker_ptr), update_file_size] (future<> f) { try { f.get(); } catch (invalid_position& ip) { // expected if sequential. Example: reading and the file ended. if (!worker->is_sequential()) { throw; } } if (update_file_size) { _file_size = worker->max_offset(); } return make_ready_future(worker->get_io_rates()); }); } future read_workload(size_t buffer_size, pattern access_pattern, unsigned max_os_concurrency, std::chrono::duration duration, std::vector& rates) { buffer_size = calculate_buffer_size(access_pattern, buffer_size, _file.disk_read_dma_alignment()); auto worker = std::make_unique(buffer_size, duration, std::make_unique(_file), get_position_generator(buffer_size, access_pattern), rates); return do_workload(std::move(worker), max_os_concurrency); } future write_workload(size_t buffer_size, pattern access_pattern, unsigned max_os_concurrency, std::chrono::duration duration, std::vector& rates) { buffer_size = calculate_buffer_size(access_pattern, buffer_size, _file.disk_write_dma_alignment()); auto worker = std::make_unique(buffer_size, duration, std::make_unique(_file), get_position_generator(buffer_size, access_pattern), rates); bool update_file_size = worker->is_sequential(); return do_workload(std::move(worker), max_os_concurrency, update_file_size).then([this] (io_rates r) { return _file.flush().then([r = std::move(r)] () mutable { return make_ready_future(std::move(r)); }); }); } future<> stop() { return _file ? _file.close() : make_ready_future<>(); } }; class iotune_multi_shard_context { ::evaluation_directory _test_directory; uint64_t _random_io_buffer_size; unsigned per_shard_io_depth() const { auto iodepth = _test_directory.max_iodepth() / smp::count; if (this_shard_id() < _test_directory.max_iodepth() % smp::count) { iodepth++; } return std::min(iodepth, 128u); } seastar::sharded _iotune_test_file; std::vector serial_rates; seastar::sharded> sharded_rates; public: future<> stop() { return _iotune_test_file.stop().then([this] { return sharded_rates.stop(); }); } future<> start() { const auto maximum_size = (_test_directory.available_space() / (2 * smp::count)); return _iotune_test_file.start(_test_directory, maximum_size, _random_io_buffer_size).then([this] { return sharded_rates.start(); }); } future get_serial_rates() { row_stats ret = get_row_stats_for(serial_rates); serial_rates.clear(); return make_ready_future(ret); } future get_sharded_worst_rates() { return sharded_rates.map_reduce0([] (std::vector& rates) { row_stats ret = get_row_stats_for(rates); rates.clear(); return ret; }, row_stats{0, 0.0, 0.0}, [] (const row_stats& res, row_stats lres) { return res.stdev < lres.stdev ? lres : res; }); } future<> create_data_file() { return _iotune_test_file.invoke_on_all([] (test_file& tf) { return tf.create_data_file(); }); } future write_sequential_data(unsigned shard, size_t buffer_size, std::chrono::duration duration) { return _iotune_test_file.invoke_on(shard, [this, buffer_size, duration] (test_file& tf) { return tf.write_workload(buffer_size, test_file::pattern::sequential, 4 * _test_directory.disks_per_array(), duration, serial_rates); }); } future read_sequential_data(unsigned shard, size_t buffer_size, std::chrono::duration duration) { return _iotune_test_file.invoke_on(shard, [this, buffer_size, duration] (test_file& tf) { return tf.read_workload(buffer_size, test_file::pattern::sequential, 4 * _test_directory.disks_per_array(), duration, serial_rates); }); } future write_random_data(size_t buffer_size, std::chrono::duration duration) { return _iotune_test_file.map_reduce0([buffer_size, this, duration] (test_file& tf) { const auto shard_io_depth = per_shard_io_depth(); if (shard_io_depth == 0) { return make_ready_future(); } else { return tf.write_workload(buffer_size, test_file::pattern::random, shard_io_depth, duration, sharded_rates.local()); } }, io_rates(), std::plus()); } future read_random_data(size_t buffer_size, std::chrono::duration duration) { return _iotune_test_file.map_reduce0([buffer_size, this, duration] (test_file& tf) { const auto shard_io_depth = per_shard_io_depth(); if (shard_io_depth == 0) { return make_ready_future(); } else { return tf.read_workload(buffer_size, test_file::pattern::random, shard_io_depth, duration, sharded_rates.local()); } }, io_rates(), std::plus()); } private: template future saturate(float rate_threshold, size_t buffer_size, std::chrono::duration duration, Fn&& workload) { return _iotune_test_file.invoke_on(0, [this, rate_threshold, buffer_size, duration, workload] (test_file& tf) { return (tf.*workload)(buffer_size, test_file::pattern::sequential, 1, duration, serial_rates).then([this, rate_threshold, buffer_size, duration, workload] (io_rates rates) { serial_rates.clear(); if (rates.bytes_per_sec < rate_threshold) { // The throughput with the given buffer-size is already "small enough", so // return back its previous value return make_ready_future(buffer_size * 2); } else { return saturate(rate_threshold, buffer_size / 2, duration, workload); } }); }); } public: future saturate_write(float rate_threshold, size_t buffer_size, std::chrono::duration duration) { return saturate(rate_threshold, buffer_size, duration, &test_file::write_workload); } future saturate_read(float rate_threshold, size_t buffer_size, std::chrono::duration duration) { return saturate(rate_threshold, buffer_size, duration, &test_file::read_workload); } iotune_multi_shard_context(::evaluation_directory dir, uint64_t random_io_buffer_size) : _test_directory(dir) , _random_io_buffer_size(random_io_buffer_size) {} }; struct disk_descriptor { std::string mountpoint; uint64_t read_iops; uint64_t read_bw; uint64_t write_iops; uint64_t write_bw; std::optional read_sat_len; std::optional write_sat_len; }; void string_to_file(sstring conf_file, sstring buf) { auto f = file_desc::open(conf_file, O_WRONLY | O_CLOEXEC | O_CREAT | O_TRUNC, 0664); auto ret = f.write(buf.data(), buf.size()); if (!ret || (*ret != buf.size())) { throw std::runtime_error(fmt::format("Can't write {}: {}", conf_file, *ret)); } } void write_configuration_file(sstring conf_file, std::string format, sstring properties_file) { sstring buf; if (format == "seastar") { buf = fmt::format("io-properties-file={}\n", properties_file); } else { buf = fmt::format("SEASTAR_IO=\"--io-properties-file={}\"\n", properties_file); } string_to_file(conf_file, buf); } void write_property_file(sstring conf_file, std::vector disk_descriptors) { YAML::Emitter out; out << YAML::BeginMap; out << YAML::Key << "disks"; out << YAML::BeginSeq; for (auto& desc : disk_descriptors) { out << YAML::BeginMap; out << YAML::Key << "mountpoint" << YAML::Value << desc.mountpoint; out << YAML::Key << "read_iops" << YAML::Value << desc.read_iops; out << YAML::Key << "read_bandwidth" << YAML::Value << desc.read_bw; out << YAML::Key << "write_iops" << YAML::Value << desc.write_iops; out << YAML::Key << "write_bandwidth" << YAML::Value << desc.write_bw; if (desc.read_sat_len) { out << YAML::Key << "read_saturation_length" << YAML::Value << *desc.read_sat_len; } if (desc.write_sat_len) { out << YAML::Key << "write_saturation_length" << YAML::Value << *desc.write_sat_len; } out << YAML::EndMap; } out << YAML::EndSeq; out << YAML::EndMap; out << YAML::Newline; string_to_file(conf_file, sstring(out.c_str(), out.size())); } // Returns the mountpoint of a path. It works by walking backwards from the canonical path // (absolute, with symlinks resolved), until we find a point that crosses a device ID. fs::path mountpoint_of(sstring filename) { fs::path mnt_candidate = fs::canonical(fs::path(filename)); std::optional candidate_id = {}; auto current = mnt_candidate; do { auto f = open_directory(current.string()).get(); auto st = f.stat().get(); if ((candidate_id) && (*candidate_id != st.st_dev)) { return mnt_candidate; } mnt_candidate = current; candidate_id = st.st_dev; current = current.parent_path(); } while (mnt_candidate != current); return mnt_candidate; } int main(int ac, char** av) { namespace bpo = boost::program_options; bool fs_check = false; app_template::config app_cfg; app_cfg.name = "IOTune"; app_template app(std::move(app_cfg)); auto opt_add = app.add_options(); opt_add ("evaluation-directory", bpo::value>()->required(), "directory where to execute the evaluation") ("properties-file", bpo::value(), "path in which to write the YAML file") ("options-file", bpo::value(), "path in which to write the legacy conf file") ("duration", bpo::value()->default_value(120), "time, in seconds, for which to run the test") ("format", bpo::value()->default_value("seastar"), "Configuration file format (seastar | envfile)") ("fs-check", bpo::bool_switch(&fs_check), "perform FS check only") ("accuracy", bpo::value()->default_value(3), "acceptable deviation of measurements (percents)") ("saturation", bpo::value()->default_value(""), "measure saturation lengths (read | write | both) (this is very slow!)") ("random-io-buffer-size", bpo::value()->default_value(0), "force buffer size for random write and random read") ("force-io-depth", bpo::value()->default_value(0), "force io depth to a certain size (overriding auto detection logic)") ; return app.run(ac, av, [&] { return seastar::async([&] { auto& configuration = app.configuration(); auto eval_dirs = configuration["evaluation-directory"].as>(); auto format = configuration["format"].as(); auto duration = std::chrono::duration(configuration["duration"].as() * 1s); auto accuracy = configuration["accuracy"].as(); auto saturation = configuration["saturation"].as(); auto random_io_buffer_size = configuration["random-io-buffer-size"].as(); auto force_io_depth = configuration["force-io-depth"].as(); bool read_saturation, write_saturation; if (saturation == "") { read_saturation = false; write_saturation = false; } else if (saturation == "both") { read_saturation = true; write_saturation = true; } else if (saturation == "read") { read_saturation = true; write_saturation = false; } else if (saturation == "write") { read_saturation = false; write_saturation = true; } else { fmt::print("Bad --saturation value\n"); return 1; } std::vector disk_descriptors; std::unordered_map mountpoint_map; // We want to evaluate once per mountpoint, but we still want to write in one of the // directories that we were provided - we may not have permissions to write into the // mountpoint itself. If we are passed more than one directory per mountpoint, we don't // really care to which one we write, so this simple hash will do. for (auto& eval_dir : eval_dirs) { mountpoint_map[mountpoint_of(eval_dir).string()] = eval_dir; } for (auto eval: mountpoint_map) { auto mountpoint = eval.first; auto eval_dir = eval.second; if (!filesystem_has_good_aio_support(eval_dir, false)) { iotune_logger.error("Linux AIO is not supported by filesystem at {}", eval_dir); return 1; } auto rec = 10000000000ULL; auto avail = fs_avail(eval_dir).get(); if (avail < rec) { uint64_t val; const char* units; if (avail >= 1000000000) { val = (avail + 500000000) / 1000000000; units = "GB"; } else if (avail >= 1000000) { val = (avail + 500000) / 1000000; units = "MB"; } else { val = avail; units = "bytes"; } iotune_logger.warn("Available space on filesystem at {}: {} {}: is less than recommended: {} GB", eval_dir, val, units, rec / 1000000000ULL); } iotune_logger.info("{} passed sanity checks", eval_dir); if (fs_check) { continue; } // Directory is the same object for all tests. ::evaluation_directory test_directory(eval_dir, force_io_depth); test_directory.discover_directory().get(); iotune_logger.info("Disk parameters: max_iodepth={} disks_per_array={} minimum_io_size={}", test_directory.max_iodepth(), test_directory.disks_per_array(), test_directory.minimum_io_size()); if (test_directory.max_iodepth() < smp::count) { iotune_logger.warn("smp::count={} is greater than max_iodepth={} - shards above max_io_depth " "will be ignored during random read and random write measurements", smp::count, test_directory.max_iodepth()); } if (random_io_buffer_size != 0u) { iotune_logger.info("Forcing buffer_size={} for random IO!", random_io_buffer_size); } ::iotune_multi_shard_context iotune_tests(test_directory, random_io_buffer_size); iotune_tests.start().get(); auto stop = defer([&iotune_tests] () noexcept { try { iotune_tests.stop().get(); } catch (...) { fmt::print("Error occurred during iotune context shutdown: {}", std::current_exception()); abort(); } }); row_stats rates; auto accuracy_msg = [accuracy, &rates] { auto stdev = rates.stdev_percents() * 100.0; return (accuracy == 0 || stdev > accuracy) ? fmt::format(" (deviation {}%)", int(round(stdev))) : std::string(""); }; iotune_tests.create_data_file().get(); fmt::print("Starting Evaluation. This may take a while...\n"); fmt::print("Measuring sequential write bandwidth: "); std::cout.flush(); io_rates write_bw; size_t sequential_buffer_size = 1 << 20; for (unsigned shard = 0; shard < smp::count; ++shard) { write_bw += iotune_tests.write_sequential_data(shard, sequential_buffer_size, duration * 0.70 / smp::count).get(); } write_bw.bytes_per_sec /= smp::count; rates = iotune_tests.get_serial_rates().get(); fmt::print("{} MB/s{}\n", uint64_t(write_bw.bytes_per_sec / (1024 * 1024)), accuracy_msg()); std::optional write_sat; if (write_saturation) { fmt::print("Measuring write saturation length: "); std::cout.flush(); write_sat = iotune_tests.saturate_write(write_bw.bytes_per_sec * (1.0 - rates.stdev_percents()), sequential_buffer_size/2, duration * 0.70).get(); fmt::print("{}\n", *write_sat); } fmt::print("Measuring sequential read bandwidth: "); std::cout.flush(); auto read_bw = iotune_tests.read_sequential_data(0, sequential_buffer_size, duration * 0.1).get(); rates = iotune_tests.get_serial_rates().get(); fmt::print("{} MB/s{}\n", uint64_t(read_bw.bytes_per_sec / (1024 * 1024)), accuracy_msg()); std::optional read_sat; if (read_saturation) { fmt::print("Measuring read saturation length: "); std::cout.flush(); read_sat = iotune_tests.saturate_read(read_bw.bytes_per_sec * (1.0 - rates.stdev_percents()), sequential_buffer_size/2, duration * 0.1).get(); fmt::print("{}\n", *read_sat); } fmt::print("Measuring random write IOPS: "); std::cout.flush(); auto write_iops = iotune_tests.write_random_data(test_directory.minimum_io_size(), duration * 0.1).get(); rates = iotune_tests.get_sharded_worst_rates().get(); fmt::print("{} IOPS{}\n", uint64_t(write_iops.iops), accuracy_msg()); fmt::print("Measuring random read IOPS: "); std::cout.flush(); auto read_iops = iotune_tests.read_random_data(test_directory.minimum_io_size(), duration * 0.1).get(); rates = iotune_tests.get_sharded_worst_rates().get(); fmt::print("{} IOPS{}\n", uint64_t(read_iops.iops), accuracy_msg()); struct disk_descriptor desc; desc.mountpoint = mountpoint; desc.read_iops = read_iops.iops; desc.read_bw = read_bw.bytes_per_sec; desc.read_sat_len = read_sat; desc.write_iops = write_iops.iops; desc.write_bw = write_bw.bytes_per_sec; desc.write_sat_len = write_sat; disk_descriptors.push_back(std::move(desc)); } if (fs_check) { return 0; } auto file = "properties file"; try { if (configuration.count("properties-file")) { fmt::print("Writing result to {}\n", configuration["properties-file"].as()); write_property_file(configuration["properties-file"].as(), disk_descriptors); } file = "configuration file"; if (configuration.count("options-file")) { fmt::print("Writing result to {}\n", configuration["options-file"].as()); write_configuration_file(configuration["options-file"].as(), format, configuration["properties-file"].as()); } } catch (...) { iotune_logger.error("Exception when writing {}: {}.\nPlease add the above values manually to your seastar command line.", file, std::current_exception()); return 1; } return 0; }); }); } seastar-25.05.0/apps/lib/000077500000000000000000000000001501510432000150255ustar00rootroot00000000000000seastar-25.05.0/apps/lib/stop_signal.hh000066400000000000000000000043201501510432000176670ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2020 Cloudius Systems, Ltd. */ #pragma once #include #include #include #include /// Seastar apps lib namespace namespace seastar_apps_lib { /// \brief Futurized SIGINT/SIGTERM signals handler class /// /// Seastar-style helper class that allows easy waiting for SIGINT/SIGTERM signals /// from your app. /// /// Example: /// \code /// #include /// ... /// int main() { /// ... /// seastar::thread th([] { /// seastar_apps_lib::stop_signal stop_signal; /// /// stop_signal.wait().get(); // this will wait till we receive SIGINT or SIGTERM signal /// }); /// \endcode class stop_signal { bool _caught = false; seastar::condition_variable _cond; private: void signaled() { if (_caught) { return; } _caught = true; _cond.broadcast(); } public: stop_signal() { seastar::handle_signal(SIGINT, [this] { signaled(); }); seastar::handle_signal(SIGTERM, [this] { signaled(); }); } ~stop_signal() { // There's no way to unregister a handler yet, so register a no-op handler instead. seastar::handle_signal(SIGINT, [] {}); seastar::handle_signal(SIGTERM, [] {}); } seastar::future<> wait() { return _cond.wait([this] { return _caught; }); } bool stopping() const { return _caught; } }; }seastar-25.05.0/apps/memcached/000077500000000000000000000000001501510432000161655ustar00rootroot00000000000000seastar-25.05.0/apps/memcached/CMakeLists.txt000066400000000000000000000025531501510432000207320ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # set (Seastar_APP_MEMCACHED_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set (Seastar_APP_MEMCACHED_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) seastar_generate_ragel ( TARGET app_memcached_ascii VAR app_memcached_ascii_file IN_FILE ${CMAKE_CURRENT_SOURCE_DIR}/ascii.rl OUT_FILE ${CMAKE_CURRENT_BINARY_DIR}/ascii.hh) seastar_add_app (memcached SOURCES ${app_memcached_ascii_file} memcache.cc memcached.hh) target_include_directories (app_memcached PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) add_dependencies (app_memcached app_memcached_ascii) # # Tests. # if (Seastar_TESTING) add_subdirectory (tests) endif () seastar-25.05.0/apps/memcached/ascii.rl000066400000000000000000000113321501510432000176140ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include "memcached.hh" #include #include #include using namespace seastar; %%{ machine memcache_ascii_protocol; access _fsm_; action mark { g.mark_start(p); } action start_blob { g.mark_start(p); _size_left = _size; } action advance_blob { auto len = std::min((uint32_t)(pe - p), _size_left); _size_left -= len; p += len; if (_size_left == 0) { _blob = str(); p--; fret; } p--; } crlf = '\r\n'; sp = ' '; u32 = digit+ >{ _u32 = 0; } ${ _u32 *= 10; _u32 += fc - '0'; }; u64 = digit+ >{ _u64 = 0; } ${ _u64 *= 10; _u64 += fc - '0'; }; key = [^ ]+ >mark %{ _key = memcache::item_key(str()); }; flags = digit+ >mark %{ _flags_str = str(); }; expiration = u32 %{ _expiration = _u32; }; size = u32 >mark %{ _size = _u32; _size_str = str(); }; blob := any+ >start_blob $advance_blob; maybe_noreply = (sp "noreply" @{ _noreply = true; })? >{ _noreply = false; }; maybe_expiration = (sp expiration)? >{ _expiration = 0; }; version_field = u64 %{ _version = _u64; }; insertion_params = sp key sp flags sp expiration sp size maybe_noreply (crlf @{ fcall blob; } ) crlf; set = "set" insertion_params @{ _state = state::cmd_set; }; add = "add" insertion_params @{ _state = state::cmd_add; }; replace = "replace" insertion_params @{ _state = state::cmd_replace; }; cas = "cas" sp key sp flags sp expiration sp size sp version_field maybe_noreply (crlf @{ fcall blob; } ) crlf @{ _state = state::cmd_cas; }; get = "get" (sp key %{ _keys.emplace_back(std::move(_key)); })+ crlf @{ _state = state::cmd_get; }; gets = "gets" (sp key %{ _keys.emplace_back(std::move(_key)); })+ crlf @{ _state = state::cmd_gets; }; delete = "delete" sp key maybe_noreply crlf @{ _state = state::cmd_delete; }; flush = "flush_all" maybe_expiration maybe_noreply crlf @{ _state = state::cmd_flush_all; }; version = "version" crlf @{ _state = state::cmd_version; }; stats = "stats" crlf @{ _state = state::cmd_stats; }; stats_hash = "stats hash" crlf @{ _state = state::cmd_stats_hash; }; incr = "incr" sp key sp u64 maybe_noreply crlf @{ _state = state::cmd_incr; }; decr = "decr" sp key sp u64 maybe_noreply crlf @{ _state = state::cmd_decr; }; main := (add | replace | set | get | gets | delete | flush | version | cas | stats | incr | decr | stats_hash) >eof{ _state = state::eof; }; prepush { prepush(); } postpop { postpop(); } }%% class memcache_ascii_parser : public ragel_parser_base { %% write data nofinal noprefix; public: enum class state { error, eof, cmd_set, cmd_cas, cmd_add, cmd_replace, cmd_get, cmd_gets, cmd_delete, cmd_flush_all, cmd_version, cmd_stats, cmd_stats_hash, cmd_incr, cmd_decr, }; state _state; uint32_t _u32; uint64_t _u64; memcache::item_key _key; sstring _flags_str; uint32_t _expiration; uint32_t _size; sstring _size_str; uint32_t _size_left; uint64_t _version; sstring _blob; bool _noreply; std::vector _keys; public: void init() { init_base(); _state = state::error; _keys.clear(); %% write init; } char* parse(char* p, char* pe, char* eof) { sstring_builder::guard g(_builder, p, pe); auto str = [this, &g, &p] { g.mark_end(p); return get_str(); }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wmisleading-indentation" #endif #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wimplicit-fallthrough" %% write exec; #pragma GCC diagnostic pop #ifdef __clang__ #pragma clang diagnostic pop #endif if (_state != state::error) { return p; } if (p != pe) { p = pe; return p; } return nullptr; } bool eof() const { return _state == state::eof; } }; seastar-25.05.0/apps/memcached/memcache.cc000066400000000000000000001554711501510432000202530ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright 2014-2015 Cloudius Systems */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ascii.hh" #include "memcached.hh" #include #define PLATFORM "seastar" #define VERSION "v1.0" #define VERSION_STRING PLATFORM " " VERSION using namespace seastar; using namespace net; namespace memcache { namespace bi = boost::intrusive; static constexpr double default_slab_growth_factor = 1.25; static constexpr uint64_t default_slab_page_size = 1UL*MB; static constexpr uint64_t default_per_cpu_slab_size = 0UL; // zero means reclaimer is enabled. static __thread slab_allocator* slab; static thread_local std::unique_ptr> slab_holder; template using optional = std::optional; using clock_type = lowres_clock; // // "Expiration" is a uint32_t value. // The minimal value of _time is when "expiration" is set to (seconds_in_a_month // + 1). // In this case _time will have a value of // // (seconds_in_a_month + 1 - Wall_Clock_Time_Since_Epoch) // // because lowres_clock now() initialized to zero when the application starts. // // We will use a timepoint at LLONG_MIN to represent a "never expire" value // since it will not collide with the minimum _time value mentioned above for // about 290 thousand years to come. // static constexpr clock_type::time_point never_expire_timepoint = clock_type::time_point(clock_type::duration::min()); struct expiration { using time_point = clock_type::time_point; using duration = time_point::duration; static constexpr uint32_t seconds_in_a_month = 60U * 60 * 24 * 30; time_point _time = never_expire_timepoint; expiration() {} expiration(clock_type::duration wc_to_clock_type_delta, uint32_t s) { using namespace std::chrono; static_assert(sizeof(clock_type::duration::rep) >= 8, "clock_type::duration::rep must be at least 8 bytes wide"); if (s == 0U) { return; // means never expire. } else if (s <= seconds_in_a_month) { _time = clock_type::now() + seconds(s); // from delta } else { // // seastar::reactor supports only a monotonic clock at the moment // therefore this may make the elements with the absolute expiration // time expire at the wrong time if the wall clock has been updated // during the expiration period. However the original memcached has // the same weakness. // // TODO: Fix this when a support for system_clock-based timers is // added to the seastar::reactor. // _time = time_point(seconds(s) + wc_to_clock_type_delta); // from real time } } bool ever_expires() { return _time != never_expire_timepoint; } time_point to_time_point() { return _time; } }; class item : public slab_item_base { public: using version_type = uint64_t; using time_point = expiration::time_point; using duration = expiration::duration; static constexpr uint8_t field_alignment = alignof(void*); private: using hook_type = bi::unordered_set_member_hook<>; // TODO: align shared data to cache line boundary version_type _version; hook_type _cache_link; bi::list_member_hook<> _timer_link; size_t _key_hash; expiration _expiry; uint32_t _value_size; uint32_t _slab_page_index; uint16_t _ref_count; uint8_t _key_size; uint8_t _ascii_prefix_size; char _data[]; // layout: data=key, (data+key_size)=ascii_prefix, (data+key_size+ascii_prefix_size)=value. friend class cache; public: item(uint32_t slab_page_index, item_key&& key, sstring&& ascii_prefix, sstring&& value, expiration expiry, version_type version = 1) : _version(version) , _key_hash(key.hash()) , _expiry(expiry) , _value_size(value.size()) , _slab_page_index(slab_page_index) , _ref_count(0U) , _key_size(key.key().size()) , _ascii_prefix_size(ascii_prefix.size()) { SEASTAR_ASSERT(_key_size <= std::numeric_limits::max()); SEASTAR_ASSERT(_ascii_prefix_size <= std::numeric_limits::max()); // storing key memcpy(_data, key.key().c_str(), _key_size); // storing ascii_prefix memcpy(_data + align_up(_key_size, field_alignment), ascii_prefix.c_str(), _ascii_prefix_size); // storing value memcpy(_data + align_up(_key_size, field_alignment) + align_up(_ascii_prefix_size, field_alignment), value.c_str(), _value_size); } item(const item&) = delete; item(item&&) = delete; clock_type::time_point get_timeout() { return _expiry.to_time_point(); } version_type version() { return _version; } const std::string_view key() const { return std::string_view(_data, _key_size); } const std::string_view ascii_prefix() const { const char *p = _data + align_up(_key_size, field_alignment); return std::string_view(p, _ascii_prefix_size); } const std::string_view value() const { const char *p = _data + align_up(_key_size, field_alignment) + align_up(_ascii_prefix_size, field_alignment); return std::string_view(p, _value_size); } size_t key_size() const { return _key_size; } size_t ascii_prefix_size() const { return _ascii_prefix_size; } size_t value_size() const { return _value_size; } optional data_as_integral() { auto str = value().data(); if (str[0] == '-') { return {}; } auto len = _value_size; // Strip trailing space while (len && str[len - 1] == ' ') { len--; } try { return {boost::lexical_cast(str, len)}; } catch (const boost::bad_lexical_cast& e) { return {}; } } // needed by timer_set bool cancel() { return false; } // Methods required by slab allocator. uint32_t get_slab_page_index() const { return _slab_page_index; } bool is_unlocked() const { return _ref_count == 1; } friend bool operator==(const item &a, const item &b) { return (a._key_hash == b._key_hash) && (a._key_size == b._key_size) && (memcmp(a._data, b._data, a._key_size) == 0); } friend std::size_t hash_value(const item &i) { return i._key_hash; } friend inline void intrusive_ptr_add_ref(item* it) { SEASTAR_ASSERT(it->_ref_count >= 0); ++it->_ref_count; if (it->_ref_count == 2) { slab->lock_item(it); } } friend inline void intrusive_ptr_release(item* it) { --it->_ref_count; if (it->_ref_count == 1) { slab->unlock_item(it); } else if (it->_ref_count == 0) { slab->free(it); } SEASTAR_ASSERT(it->_ref_count >= 0); } friend struct item_key_cmp; }; struct item_key_cmp { private: bool compare(const item_key& key, const item& it) const { return (it._key_hash == key.hash()) && (it._key_size == key.key().size()) && (memcmp(it._data, key.key().c_str(), it._key_size) == 0); } public: bool operator()(const item_key& key, const item& it) const { return compare(key, it); } bool operator()(const item& it, const item_key& key) const { return compare(key, it); } }; using item_ptr = foreign_ptr>; struct cache_stats { size_t _get_hits {}; size_t _get_misses {}; size_t _set_adds {}; size_t _set_replaces {}; size_t _cas_hits {}; size_t _cas_misses {}; size_t _cas_badval {}; size_t _delete_misses {}; size_t _delete_hits {}; size_t _incr_misses {}; size_t _incr_hits {}; size_t _decr_misses {}; size_t _decr_hits {}; size_t _expired {}; size_t _evicted {}; size_t _bytes {}; size_t _resize_failure {}; size_t _size {}; size_t _reclaims{}; void operator+=(const cache_stats& o) { _get_hits += o._get_hits; _get_misses += o._get_misses; _set_adds += o._set_adds; _set_replaces += o._set_replaces; _cas_hits += o._cas_hits; _cas_misses += o._cas_misses; _cas_badval += o._cas_badval; _delete_misses += o._delete_misses; _delete_hits += o._delete_hits; _incr_misses += o._incr_misses; _incr_hits += o._incr_hits; _decr_misses += o._decr_misses; _decr_hits += o._decr_hits; _expired += o._expired; _evicted += o._evicted; _bytes += o._bytes; _resize_failure += o._resize_failure; _size += o._size; _reclaims += o._reclaims; } }; enum class cas_result { not_found, stored, bad_version }; struct remote_origin_tag { template static inline T move_if_local(T& ref) { return ref; } }; struct local_origin_tag { template static inline T move_if_local(T& ref) { return std::move(ref); } }; struct item_insertion_data { item_key key; sstring ascii_prefix; sstring data; expiration expiry; }; class cache { private: using cache_type = bi::unordered_set, bi::power_2_buckets, bi::constant_time_size>; using cache_iterator = typename cache_type::iterator; static constexpr size_t initial_bucket_count = 1 << 10; static constexpr float load_factor = 0.75f; size_t _resize_up_threshold = load_factor * initial_bucket_count; std::vector _buckets; cache_type _cache; seastar::timer_set _alive; timer _timer; // delta in seconds between the current values of a wall clock and a clock_type clock clock_type::duration _wc_to_clock_type_delta; cache_stats _stats; timer _flush_timer; private: size_t item_size(item& item_ref) { constexpr size_t field_alignment = alignof(void*); return sizeof(item) + align_up(item_ref.key_size(), field_alignment) + align_up(item_ref.ascii_prefix_size(), field_alignment) + item_ref.value_size(); } size_t item_size(item_insertion_data& insertion) { constexpr size_t field_alignment = alignof(void*); auto size = sizeof(item) + align_up(insertion.key.key().size(), field_alignment) + align_up(insertion.ascii_prefix.size(), field_alignment) + insertion.data.size(); #ifdef __DEBUG__ static bool print_item_footprint = true; if (print_item_footprint) { print_item_footprint = false; std::cout << __FUNCTION__ << ": " << size << "\n"; std::cout << "sizeof(item) " << sizeof(item) << "\n"; std::cout << "key.size " << insertion.key.key().size() << "\n"; std::cout << "value.size " << insertion.data.size() << "\n"; std::cout << "ascii_prefix.size " << insertion.ascii_prefix.size() << "\n"; } #endif return size; } template void erase(item& item_ref) { if (IsInCache) { _cache.erase(_cache.iterator_to(item_ref)); } if (IsInTimerList) { if (item_ref._expiry.ever_expires()) { _alive.remove(item_ref); } } _stats._bytes -= item_size(item_ref); if (Release) { // memory used by item shouldn't be freed when slab is replacing it with another item. intrusive_ptr_release(&item_ref); } } void expire() { using namespace std::chrono; // // Adjust the delta on every timer event to minimize an error caused // by a wall clock adjustment. // _wc_to_clock_type_delta = duration_cast(clock_type::now().time_since_epoch() - system_clock::now().time_since_epoch()); auto exp = _alive.expire(clock_type::now()); while (!exp.empty()) { auto item = &*exp.begin(); exp.pop_front(); erase(*item); _stats._expired++; } _timer.arm(_alive.get_next_timeout()); } inline cache_iterator find(const item_key& key) { return _cache.find(key, std::hash(), item_key_cmp()); } template inline cache_iterator add_overriding(cache_iterator i, item_insertion_data& insertion) { auto& old_item = *i; uint64_t old_item_version = old_item._version; erase(old_item); size_t size = item_size(insertion); auto new_item = slab->create(size, Origin::move_if_local(insertion.key), Origin::move_if_local(insertion.ascii_prefix), Origin::move_if_local(insertion.data), insertion.expiry, old_item_version + 1); intrusive_ptr_add_ref(new_item); auto insert_result = _cache.insert(*new_item); SEASTAR_ASSERT(insert_result.second); if (insertion.expiry.ever_expires() && _alive.insert(*new_item)) { _timer.rearm(new_item->get_timeout()); } _stats._bytes += size; return insert_result.first; } template inline void add_new(item_insertion_data& insertion) { size_t size = item_size(insertion); auto new_item = slab->create(size, Origin::move_if_local(insertion.key), Origin::move_if_local(insertion.ascii_prefix), Origin::move_if_local(insertion.data), insertion.expiry); intrusive_ptr_add_ref(new_item); auto& item_ref = *new_item; _cache.insert(item_ref); if (insertion.expiry.ever_expires() && _alive.insert(item_ref)) { _timer.rearm(item_ref.get_timeout()); } _stats._bytes += size; maybe_rehash(); } void maybe_rehash() { if (_cache.size() >= _resize_up_threshold) { auto new_size = _cache.bucket_count() * 2; std::vector old_buckets; try { old_buckets = std::exchange(_buckets, std::vector(new_size)); } catch (const std::bad_alloc& e) { _stats._resize_failure++; return; } _cache.rehash(typename cache_type::bucket_traits(_buckets.data(), new_size)); _resize_up_threshold = _cache.bucket_count() * load_factor; } } public: cache(uint64_t per_cpu_slab_size, uint64_t slab_page_size) : _buckets(initial_bucket_count) , _cache(cache_type::bucket_traits(_buckets.data(), initial_bucket_count)) { using namespace std::chrono; _wc_to_clock_type_delta = duration_cast(clock_type::now().time_since_epoch() - system_clock::now().time_since_epoch()); _timer.set_callback([this] { expire(); }); _flush_timer.set_callback([this] { flush_all(); }); // initialize per-thread slab allocator. slab_holder = std::make_unique>(default_slab_growth_factor, per_cpu_slab_size, slab_page_size, [this](item& item_ref) { erase(item_ref); _stats._evicted++; }); slab = slab_holder.get(); #ifdef __DEBUG__ static bool print_slab_classes = true; if (print_slab_classes) { print_slab_classes = false; slab->print_slab_classes(); } #endif } ~cache() { flush_all(); } void flush_all() { _flush_timer.cancel(); _cache.erase_and_dispose(_cache.begin(), _cache.end(), [this] (item* it) { erase(*it); }); } void flush_at(uint32_t time) { auto expiry = expiration(get_wc_to_clock_type_delta(), time); _flush_timer.rearm(expiry.to_time_point()); } template bool set(item_insertion_data& insertion) { auto i = find(insertion.key); if (i != _cache.end()) { add_overriding(i, insertion); _stats._set_replaces++; return true; } else { add_new(insertion); _stats._set_adds++; return false; } } template bool add(item_insertion_data& insertion) { auto i = find(insertion.key); if (i != _cache.end()) { return false; } _stats._set_adds++; add_new(insertion); return true; } template bool replace(item_insertion_data& insertion) { auto i = find(insertion.key); if (i == _cache.end()) { return false; } _stats._set_replaces++; add_overriding(i, insertion); return true; } bool remove(const item_key& key) { auto i = find(key); if (i == _cache.end()) { _stats._delete_misses++; return false; } _stats._delete_hits++; auto& item_ref = *i; erase(item_ref); return true; } item_ptr get(const item_key& key) { auto i = find(key); if (i == _cache.end()) { _stats._get_misses++; return nullptr; } _stats._get_hits++; auto& item_ref = *i; return item_ptr(&item_ref); } template cas_result cas(item_insertion_data& insertion, item::version_type version) { auto i = find(insertion.key); if (i == _cache.end()) { _stats._cas_misses++; return cas_result::not_found; } auto& item_ref = *i; if (item_ref._version != version) { _stats._cas_badval++; return cas_result::bad_version; } _stats._cas_hits++; add_overriding(i, insertion); return cas_result::stored; } size_t size() { return _cache.size(); } size_t bucket_count() { return _cache.bucket_count(); } cache_stats stats() { _stats._size = size(); return _stats; } template std::pair incr(item_key& key, uint64_t delta) { auto i = find(key); if (i == _cache.end()) { _stats._incr_misses++; return {item_ptr{}, false}; } auto& item_ref = *i; _stats._incr_hits++; auto value = item_ref.data_as_integral(); if (!value) { return {boost::intrusive_ptr(&item_ref), false}; } item_insertion_data insertion { .key = Origin::move_if_local(key), .ascii_prefix = sstring(item_ref.ascii_prefix().data(), item_ref.ascii_prefix_size()), .data = to_sstring(*value + delta), .expiry = item_ref._expiry }; i = add_overriding(i, insertion); return {boost::intrusive_ptr(&*i), true}; } template std::pair decr(item_key& key, uint64_t delta) { auto i = find(key); if (i == _cache.end()) { _stats._decr_misses++; return {item_ptr{}, false}; } auto& item_ref = *i; _stats._decr_hits++; auto value = item_ref.data_as_integral(); if (!value) { return {boost::intrusive_ptr(&item_ref), false}; } item_insertion_data insertion { .key = Origin::move_if_local(key), .ascii_prefix = sstring(item_ref.ascii_prefix().data(), item_ref.ascii_prefix_size()), .data = to_sstring(*value - std::min(*value, delta)), .expiry = item_ref._expiry }; i = add_overriding(i, insertion); return {boost::intrusive_ptr(&*i), true}; } std::pair>> print_hash_stats() { static constexpr unsigned bits = sizeof(size_t) * 8; size_t histo[bits + 1] {}; size_t max_size = 0; unsigned max_bucket = 0; for (size_t i = 0; i < _cache.bucket_count(); i++) { size_t size = _cache.bucket_size(i); unsigned bucket; if (size == 0) { bucket = 0; } else { bucket = bits - count_leading_zeros(size); } max_bucket = std::max(max_bucket, bucket); max_size = std::max(max_size, size); histo[bucket]++; } std::stringstream ss; ss << "size: " << _cache.size() << "\n"; ss << "buckets: " << _cache.bucket_count() << "\n"; ss << "load: " << format("{:.2f}", (double)_cache.size() / _cache.bucket_count()) << "\n"; ss << "max bucket occupancy: " << max_size << "\n"; ss << "bucket occupancy histogram:\n"; for (unsigned i = 0; i < (max_bucket + 2); i++) { ss << " "; if (i == 0) { ss << "0: "; } else if (i == 1) { ss << "1: "; } else { ss << (1 << (i - 1)) << "+: "; } ss << histo[i] << "\n"; } return {this_shard_id(), make_foreign(make_lw_shared(ss.str()))}; } future<> stop() { return make_ready_future<>(); } clock_type::duration get_wc_to_clock_type_delta() { return _wc_to_clock_type_delta; } }; class sharded_cache { private: distributed& _peers; inline unsigned get_cpu(const item_key& key) { return std::hash()(key) % smp::count; } public: sharded_cache(distributed& peers) : _peers(peers) {} future<> flush_all() { return _peers.invoke_on_all(&cache::flush_all); } future<> flush_at(uint32_t time) { return _peers.invoke_on_all(&cache::flush_at, time); } auto get_wc_to_clock_type_delta() { return _peers.local().get_wc_to_clock_type_delta(); } // The caller must keep @insertion live until the resulting future resolves. future set(item_insertion_data& insertion) { auto cpu = get_cpu(insertion.key); if (this_shard_id() == cpu) { return make_ready_future(_peers.local().set(insertion)); } return _peers.invoke_on(cpu, &cache::set, std::ref(insertion)); } // The caller must keep @insertion live until the resulting future resolves. future add(item_insertion_data& insertion) { auto cpu = get_cpu(insertion.key); if (this_shard_id() == cpu) { return make_ready_future(_peers.local().add(insertion)); } return _peers.invoke_on(cpu, &cache::add, std::ref(insertion)); } // The caller must keep @insertion live until the resulting future resolves. future replace(item_insertion_data& insertion) { auto cpu = get_cpu(insertion.key); if (this_shard_id() == cpu) { return make_ready_future(_peers.local().replace(insertion)); } return _peers.invoke_on(cpu, &cache::replace, std::ref(insertion)); } // The caller must keep @key live until the resulting future resolves. future remove(const item_key& key) { auto cpu = get_cpu(key); return _peers.invoke_on(cpu, &cache::remove, std::ref(key)); } // The caller must keep @key live until the resulting future resolves. future get(const item_key& key) { auto cpu = get_cpu(key); return _peers.invoke_on(cpu, &cache::get, std::ref(key)); } // The caller must keep @insertion live until the resulting future resolves. future cas(item_insertion_data& insertion, item::version_type version) { auto cpu = get_cpu(insertion.key); if (this_shard_id() == cpu) { return make_ready_future(_peers.local().cas(insertion, version)); } return _peers.invoke_on(cpu, &cache::cas, std::ref(insertion), std::move(version)); } future stats() { return _peers.map_reduce(adder(), &cache::stats); } // The caller must keep @key live until the resulting future resolves. future> incr(item_key& key, uint64_t delta) { auto cpu = get_cpu(key); if (this_shard_id() == cpu) { return make_ready_future>( _peers.local().incr(key, delta)); } return _peers.invoke_on(cpu, &cache::incr, std::ref(key), std::move(delta)); } // The caller must keep @key live until the resulting future resolves. future> decr(item_key& key, uint64_t delta) { auto cpu = get_cpu(key); if (this_shard_id() == cpu) { return make_ready_future>( _peers.local().decr(key, delta)); } return _peers.invoke_on(cpu, &cache::decr, std::ref(key), std::move(delta)); } future<> print_hash_stats(output_stream& out) { return _peers.map_reduce([&out] (std::pair>> data) mutable { return out.write("=== CPU " + std::to_string(data.first) + " ===\r\n") .then([&out, str = std::move(data.second)] { return out.write(*str); }); }, &cache::print_hash_stats); } }; struct system_stats { uint32_t _curr_connections {}; uint32_t _total_connections {}; uint64_t _cmd_get {}; uint64_t _cmd_set {}; uint64_t _cmd_flush {}; clock_type::time_point _start_time; public: system_stats() { _start_time = clock_type::time_point::max(); } system_stats(clock_type::time_point start_time) : _start_time(start_time) { } system_stats self() { return *this; } void operator+=(const system_stats& other) { _curr_connections += other._curr_connections; _total_connections += other._total_connections; _cmd_get += other._cmd_get; _cmd_set += other._cmd_set; _cmd_flush += other._cmd_flush; _start_time = std::min(_start_time, other._start_time); } future<> stop() { return make_ready_future<>(); } }; class ascii_protocol { private: using this_type = ascii_protocol; sharded_cache& _cache; distributed& _system_stats; memcache_ascii_parser _parser; item_key _item_key; item_insertion_data _insertion; std::vector _items; private: static constexpr const char *msg_crlf = "\r\n"; static constexpr const char *msg_error = "ERROR\r\n"; static constexpr const char *msg_stored = "STORED\r\n"; static constexpr const char *msg_not_stored = "NOT_STORED\r\n"; static constexpr const char *msg_end = "END\r\n"; static constexpr const char *msg_value = "VALUE "; static constexpr const char *msg_deleted = "DELETED\r\n"; static constexpr const char *msg_not_found = "NOT_FOUND\r\n"; static constexpr const char *msg_ok = "OK\r\n"; static constexpr const char *msg_version = "VERSION " VERSION_STRING "\r\n"; static constexpr const char *msg_exists = "EXISTS\r\n"; static constexpr const char *msg_stat = "STAT "; static constexpr const char *msg_out_of_memory = "SERVER_ERROR Out of memory allocating new item\r\n"; static constexpr const char *msg_error_non_numeric_value = "CLIENT_ERROR cannot increment or decrement non-numeric value\r\n"; private: template static void append_item(scattered_message& msg, item_ptr item) { if (!item) { return; } msg.append_static("VALUE "); msg.append_static(item->key()); msg.append_static(item->ascii_prefix()); if (WithVersion) { msg.append_static(" "); msg.append(to_sstring(item->version())); } msg.append_static(msg_crlf); msg.append_static(item->value()); msg.append_static(msg_crlf); msg.on_delete([item = std::move(item)] {}); } template future<> handle_get(output_stream& out) { _system_stats.local()._cmd_get++; if (_parser._keys.size() == 1) { return _cache.get(_parser._keys[0]).then([&out] (auto item) -> future<> { scattered_message msg; this_type::append_item(msg, std::move(item)); msg.append_static(msg_end); return out.write(std::move(msg)); }); } else { _items.clear(); return parallel_for_each(_parser._keys.begin(), _parser._keys.end(), [this] (const auto& key) { return _cache.get(key).then([this] (auto item) { _items.emplace_back(std::move(item)); }); }).then([this, &out] () { scattered_message msg; for (auto& item : _items) { append_item(msg, std::move(item)); } msg.append_static(msg_end); return out.write(std::move(msg)); }); } } template static future<> print_stat(output_stream& out, const char* key, Value value) { return out.write(msg_stat) .then([&out, key] { return out.write(key); }) .then([&out] { return out.write(" "); }) .then([&out, value] { return out.write(to_sstring(value)); }) .then([&out] { return out.write(msg_crlf); }); } future<> print_stats(output_stream& out) { return _cache.stats().then([this, &out] (auto stats) { return _system_stats.map_reduce(adder(), &system_stats::self) .then([&out, all_cache_stats = std::move(stats)] (auto all_system_stats) -> future<> { auto now = clock_type::now(); auto total_items = all_cache_stats._set_replaces + all_cache_stats._set_adds + all_cache_stats._cas_hits; return print_stat(out, "pid", getpid()) .then([&out, uptime = now - all_system_stats._start_time] { return print_stat(out, "uptime", std::chrono::duration_cast(uptime).count()); }).then([now, &out] { return print_stat(out, "time", std::chrono::duration_cast(now.time_since_epoch()).count()); }).then([&out] { return print_stat(out, "version", VERSION_STRING); }).then([&out] { return print_stat(out, "pointer_size", sizeof(void*)*8); }).then([&out, v = all_system_stats._curr_connections] { return print_stat(out, "curr_connections", v); }).then([&out, v = all_system_stats._total_connections] { return print_stat(out, "total_connections", v); }).then([&out, v = all_system_stats._curr_connections] { return print_stat(out, "connection_structures", v); }).then([&out, v = all_system_stats._cmd_get] { return print_stat(out, "cmd_get", v); }).then([&out, v = all_system_stats._cmd_set] { return print_stat(out, "cmd_set", v); }).then([&out, v = all_system_stats._cmd_flush] { return print_stat(out, "cmd_flush", v); }).then([&out] { return print_stat(out, "cmd_touch", 0); }).then([&out, v = all_cache_stats._get_hits] { return print_stat(out, "get_hits", v); }).then([&out, v = all_cache_stats._get_misses] { return print_stat(out, "get_misses", v); }).then([&out, v = all_cache_stats._delete_misses] { return print_stat(out, "delete_misses", v); }).then([&out, v = all_cache_stats._delete_hits] { return print_stat(out, "delete_hits", v); }).then([&out, v = all_cache_stats._incr_misses] { return print_stat(out, "incr_misses", v); }).then([&out, v = all_cache_stats._incr_hits] { return print_stat(out, "incr_hits", v); }).then([&out, v = all_cache_stats._decr_misses] { return print_stat(out, "decr_misses", v); }).then([&out, v = all_cache_stats._decr_hits] { return print_stat(out, "decr_hits", v); }).then([&out, v = all_cache_stats._cas_misses] { return print_stat(out, "cas_misses", v); }).then([&out, v = all_cache_stats._cas_hits] { return print_stat(out, "cas_hits", v); }).then([&out, v = all_cache_stats._cas_badval] { return print_stat(out, "cas_badval", v); }).then([&out] { return print_stat(out, "touch_hits", 0); }).then([&out] { return print_stat(out, "touch_misses", 0); }).then([&out] { return print_stat(out, "auth_cmds", 0); }).then([&out] { return print_stat(out, "auth_errors", 0); }).then([&out] { return print_stat(out, "threads", smp::count); }).then([&out, v = all_cache_stats._size] { return print_stat(out, "curr_items", v); }).then([&out, v = total_items] { return print_stat(out, "total_items", v); }).then([&out, v = all_cache_stats._expired] { return print_stat(out, "seastar.expired", v); }).then([&out, v = all_cache_stats._resize_failure] { return print_stat(out, "seastar.resize_failure", v); }).then([&out, v = all_cache_stats._evicted] { return print_stat(out, "evictions", v); }).then([&out, v = all_cache_stats._bytes] { return print_stat(out, "bytes", v); }).then([&out] { return out.write(msg_end); }); }); }); } public: ascii_protocol(sharded_cache& cache, distributed& system_stats) : _cache(cache) , _system_stats(system_stats) {} void prepare_insertion() { _insertion = item_insertion_data{ .key = std::move(_parser._key), .ascii_prefix = make_sstring(" ", _parser._flags_str, " ", _parser._size_str), .data = std::move(_parser._blob), .expiry = expiration(_cache.get_wc_to_clock_type_delta(), _parser._expiration) }; } future<> handle(input_stream& in, output_stream& out) { _parser.init(); return in.consume(_parser).then([this, &out] () -> future<> { switch (_parser._state) { case memcache_ascii_parser::state::eof: return make_ready_future<>(); case memcache_ascii_parser::state::error: return out.write(msg_error); case memcache_ascii_parser::state::cmd_set: { _system_stats.local()._cmd_set++; prepare_insertion(); auto f = _cache.set(_insertion); if (_parser._noreply) { return std::move(f).discard_result(); } return std::move(f).then([&out] (...) { return out.write(msg_stored); }); } case memcache_ascii_parser::state::cmd_cas: { _system_stats.local()._cmd_set++; prepare_insertion(); auto f = _cache.cas(_insertion, _parser._version); if (_parser._noreply) { return std::move(f).discard_result(); } return std::move(f).then([&out] (auto result) { switch (result) { case cas_result::stored: return out.write(msg_stored); case cas_result::not_found: return out.write(msg_not_found); case cas_result::bad_version: return out.write(msg_exists); default: std::abort(); } }); } case memcache_ascii_parser::state::cmd_add: { _system_stats.local()._cmd_set++; prepare_insertion(); auto f = _cache.add(_insertion); if (_parser._noreply) { return std::move(f).discard_result(); } return std::move(f).then([&out] (bool added) { return out.write(added ? msg_stored : msg_not_stored); }); } case memcache_ascii_parser::state::cmd_replace: { _system_stats.local()._cmd_set++; prepare_insertion(); auto f = _cache.replace(_insertion); if (_parser._noreply) { return std::move(f).discard_result(); } return std::move(f).then([&out] (auto replaced) { return out.write(replaced ? msg_stored : msg_not_stored); }); } case memcache_ascii_parser::state::cmd_get: return handle_get(out); case memcache_ascii_parser::state::cmd_gets: return handle_get(out); case memcache_ascii_parser::state::cmd_delete: { auto f = _cache.remove(_parser._key); if (_parser._noreply) { return std::move(f).discard_result(); } return std::move(f).then([&out] (bool removed) { return out.write(removed ? msg_deleted : msg_not_found); }); } case memcache_ascii_parser::state::cmd_flush_all: { _system_stats.local()._cmd_flush++; if (_parser._expiration) { auto f = _cache.flush_at(_parser._expiration); if (_parser._noreply) { return f; } return std::move(f).then([&out] { return out.write(msg_ok); }); } else { auto f = _cache.flush_all(); if (_parser._noreply) { return f; } return std::move(f).then([&out] { return out.write(msg_ok); }); } } case memcache_ascii_parser::state::cmd_version: return out.write(msg_version); case memcache_ascii_parser::state::cmd_stats: return print_stats(out); case memcache_ascii_parser::state::cmd_stats_hash: return _cache.print_hash_stats(out); case memcache_ascii_parser::state::cmd_incr: { auto f = _cache.incr(_parser._key, _parser._u64); if (_parser._noreply) { return std::move(f).discard_result(); } return std::move(f).then([&out] (auto result) { auto item = std::move(result.first); if (!item) { return out.write(msg_not_found); } auto incremented = result.second; if (!incremented) { return out.write(msg_error_non_numeric_value); } return out.write(item->value().data(), item->value_size()).then([&out] { return out.write(msg_crlf); }); }); } case memcache_ascii_parser::state::cmd_decr: { auto f = _cache.decr(_parser._key, _parser._u64); if (_parser._noreply) { return std::move(f).discard_result(); } return std::move(f).then([&out] (auto result) { auto item = std::move(result.first); if (!item) { return out.write(msg_not_found); } auto decremented = result.second; if (!decremented) { return out.write(msg_error_non_numeric_value); } return out.write(item->value().data(), item->value_size()).then([&out] { return out.write(msg_crlf); }); }); } }; std::abort(); }).then_wrapped([this, &out] (auto&& f) -> future<> { // FIXME: then_wrapped() being scheduled even though no exception was triggered has a // performance cost of about 2.6%. Not using it means maintainability penalty. try { f.get(); } catch (std::bad_alloc& e) { if (_parser._noreply) { return make_ready_future<>(); } return out.write(msg_out_of_memory); } return make_ready_future<>(); }); }; }; class udp_server { public: static const size_t default_max_datagram_size = 1400; private: std::optional> _task; sharded_cache& _cache; distributed& _system_stats; udp_channel _chan; uint16_t _port; size_t _max_datagram_size = default_max_datagram_size; struct header { packed _request_id; packed _sequence_number; packed _n; packed _reserved; template auto adjust_endianness(Adjuster a) { return a(_request_id, _sequence_number, _n); } } __attribute__((packed)); struct connection { ipv4_addr _src; uint16_t _request_id; input_stream _in; output_stream _out; std::vector _out_bufs; ascii_protocol _proto; static output_stream_options make_opts() noexcept { output_stream_options opts; opts.trim_to_size = true; return opts; } connection(ipv4_addr src, uint16_t request_id, input_stream&& in, size_t out_size, sharded_cache& c, distributed& system_stats) : _src(src) , _request_id(request_id) , _in(std::move(in)) , _out(output_stream(data_sink(std::make_unique(_out_bufs)), out_size, make_opts())) , _proto(c, system_stats) {} future<> respond(udp_channel& chan) { int i = 0; return do_for_each(_out_bufs.begin(), _out_bufs.end(), [this, i, &chan] (packet& p) mutable { header* out_hdr = p.prepend_header
(0); out_hdr->_request_id = _request_id; out_hdr->_sequence_number = i++; out_hdr->_n = _out_bufs.size(); *out_hdr = hton(*out_hdr); return chan.send(_src, std::move(p)); }); } }; public: udp_server(sharded_cache& c, distributed& system_stats, uint16_t port = 11211) : _cache(c) , _system_stats(system_stats) , _port(port) {} void set_max_datagram_size(size_t max_datagram_size) { _max_datagram_size = max_datagram_size; } void start() { _chan = make_bound_datagram_channel({_port}); // Run in the background. _task = keep_doing([this] { return _chan.receive().then([this](datagram dgram) { packet& p = dgram.get_data(); if (p.len() < sizeof(header)) { // dropping invalid packet return make_ready_future<>(); } header hdr = ntoh(*p.get_header
()); p.trim_front(sizeof(hdr)); auto request_id = hdr._request_id; auto in = as_input_stream(std::move(p)); auto conn = make_lw_shared(dgram.get_src(), request_id, std::move(in), _max_datagram_size - sizeof(header), _cache, _system_stats); if (hdr._n != 1 || hdr._sequence_number != 0) { return conn->_out.write("CLIENT_ERROR only single-datagram requests supported\r\n").then([this, conn] { return conn->_out.flush().then([this, conn] { return conn->respond(_chan).then([conn] {}); }); }); } return conn->_proto.handle(conn->_in, conn->_out).then([this, conn]() mutable { return conn->_out.flush().then([this, conn] { return conn->respond(_chan).then([conn] {}); }); }); }); }); }; future<> stop() { _chan.shutdown_input(); _chan.shutdown_output(); return _task->handle_exception([](std::exception_ptr e) { std::cerr << "exception in udp_server " << e << '\n'; }); } }; class tcp_server { private: std::optional> _task; lw_shared_ptr _listener; sharded_cache& _cache; distributed& _system_stats; uint16_t _port; struct connection { connected_socket _socket; socket_address _addr; input_stream _in; output_stream _out; ascii_protocol _proto; distributed& _system_stats; connection(connected_socket&& socket, socket_address addr, sharded_cache& c, distributed& system_stats) : _socket(std::move(socket)) , _addr(addr) , _in(_socket.input()) , _out(_socket.output()) , _proto(c, system_stats) , _system_stats(system_stats) { _system_stats.local()._curr_connections++; _system_stats.local()._total_connections++; } ~connection() { _system_stats.local()._curr_connections--; } }; public: tcp_server(sharded_cache& cache, distributed& system_stats, uint16_t port = 11211) : _cache(cache) , _system_stats(system_stats) , _port(port) {} void start() { listen_options lo; lo.reuse_address = true; _listener = make_lw_shared(seastar::listen(make_ipv4_address({_port}), lo)); // Run in the background until eof has reached on the input connection. _task = keep_doing([this] { return _listener->accept().then([this] (accept_result ar) mutable { connected_socket fd = std::move(ar.connection); socket_address addr = std::move(ar.remote_address); auto conn = make_lw_shared(std::move(fd), addr, _cache, _system_stats); (void)do_until([conn] { return conn->_in.eof(); }, [conn] { return conn->_proto.handle(conn->_in, conn->_out).then([conn] { return conn->_out.flush(); }); }).finally([conn] { return conn->_out.close().finally([conn]{}); }); }); }); } future<> stop() { _listener->abort_accept(); return _task->handle_exception([](std::exception_ptr e) { std::cerr << "exception in tcp_server " << e << '\n'; }); } }; class stats_printer { private: timer<> _timer; sharded_cache& _cache; public: stats_printer(sharded_cache& cache) : _cache(cache) {} void start() { _timer.set_callback([this] { (void)_cache.stats().then([] (auto stats) { auto gets_total = stats._get_hits + stats._get_misses; auto get_hit_rate = gets_total ? ((double)stats._get_hits * 100 / gets_total) : 0; auto sets_total = stats._set_adds + stats._set_replaces; auto set_replace_rate = sets_total ? ((double)stats._set_replaces * 100/ sets_total) : 0; std::cout << "items: " << stats._size << " " << std::setprecision(2) << std::fixed << "get: " << stats._get_hits << "/" << gets_total << " (" << get_hit_rate << "%) " << "set: " << stats._set_replaces << "/" << sets_total << " (" << set_replace_rate << "%)"; std::cout << std::endl; }); }); _timer.arm_periodic(std::chrono::seconds(1)); } future<> stop() { return make_ready_future<>(); } }; } /* namespace memcache */ int main(int ac, char** av) { distributed cache_peers; memcache::sharded_cache cache(cache_peers); distributed system_stats; distributed udp_server; distributed tcp_server; memcache::stats_printer stats(cache); namespace bpo = boost::program_options; app_template app; app.add_options() ("max-datagram-size", bpo::value()->default_value(memcache::udp_server::default_max_datagram_size), "Maximum size of UDP datagram") ("max-slab-size", bpo::value()->default_value(memcache::default_per_cpu_slab_size/MB), "Maximum memory to be used for items (value in megabytes) (reclaimer is disabled if set)") ("slab-page-size", bpo::value()->default_value(memcache::default_slab_page_size/MB), "Size of slab page (value in megabytes)") ("stats", "Print basic statistics periodically (every second)") ("port", bpo::value()->default_value(11211), "Specify UDP and TCP ports for memcached server to listen on") ; return app.run_deprecated(ac, av, [&] { internal::at_exit([&] { return tcp_server.stop(); }); internal::at_exit([&] { return udp_server.stop(); }); internal::at_exit([&] { return cache_peers.stop(); }); internal::at_exit([&] { return system_stats.stop(); }); auto&& config = app.configuration(); uint16_t port = config["port"].as(); uint64_t per_cpu_slab_size = config["max-slab-size"].as() * MB; uint64_t slab_page_size = config["slab-page-size"].as() * MB; return cache_peers.start(std::move(per_cpu_slab_size), std::move(slab_page_size)).then([&system_stats] { return system_stats.start(memcache::clock_type::now()); }).then([&] { std::cout << PLATFORM << " memcached " << VERSION << "\n"; return make_ready_future<>(); }).then([&, port] { return tcp_server.start(std::ref(cache), std::ref(system_stats), port); }).then([&tcp_server] { return tcp_server.invoke_on_all(&memcache::tcp_server::start); }).then([&, port] { if (engine().net().has_per_core_namespace()) { return udp_server.start(std::ref(cache), std::ref(system_stats), port); } else { return udp_server.start_single(std::ref(cache), std::ref(system_stats), port); } }).then([&] { return udp_server.invoke_on_all(&memcache::udp_server::set_max_datagram_size, (size_t)config["max-datagram-size"].as()); }).then([&] { return udp_server.invoke_on_all(&memcache::udp_server::start); }).then([&stats, start_stats = config.count("stats")] { if (start_stats) { stats.start(); } }); }); } seastar-25.05.0/apps/memcached/memcached.hh000066400000000000000000000033551501510432000204220ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #pragma once #include namespace memcache { using namespace seastar; class item; class cache; class item_key { private: sstring _key; size_t _hash; public: item_key() = default; item_key(item_key&) = default; item_key(sstring key) : _key(key) , _hash(std::hash()(key)) {} item_key(item_key&& other) : _key(std::move(other._key)) , _hash(other._hash) { other._hash = 0; } size_t hash() const { return _hash; } const sstring& key() const { return _key; } bool operator==(const item_key& other) const { return other._hash == _hash && other._key == _key; } void operator=(item_key&& other) { _key = std::move(other._key); _hash = other._hash; other._hash = 0; } }; } namespace std { template <> struct hash { size_t operator()(const memcache::item_key& key) { return key.hash(); } }; } /* namespace std */ seastar-25.05.0/apps/memcached/tests/000077500000000000000000000000001501510432000173275ustar00rootroot00000000000000seastar-25.05.0/apps/memcached/tests/CMakeLists.txt000066400000000000000000000045401501510432000220720ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # if (Seastar_EXECUTE_ONLY_FAST_TESTS) set (memcached_test_args --fast) else () set (memcached_test_args "") endif () add_custom_target (app_memcached_test_memcached_run DEPENDS ${memcached_app} ${CMAKE_CURRENT_SOURCE_DIR}/test.py ${CMAKE_CURRENT_SOURCE_DIR}/test_memcached.py COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/test.py --memcached $ ${memcached_test_args} USES_TERMINAL) add_test ( NAME Seastar.app.memcached.memcached COMMAND ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target app_memcached_test_memcached_run) set_tests_properties (Seastar.app.memcached.memcached PROPERTIES TIMEOUT ${Seastar_TEST_TIMEOUT} ENVIRONMENT "${Seastar_TEST_ENVIRONMENT}") add_executable (app_memcached_test_ascii test_ascii_parser.cc) add_dependencies (app_memcached_test_ascii app_memcached) target_include_directories (app_memcached_test_ascii PRIVATE ${CMAKE_CURRENT_SOURCE_DIR} ${Seastar_APP_MEMCACHED_BINARY_DIR} ${Seastar_APP_MEMCACHED_SOURCE_DIR}) target_compile_definitions (app_memcached_test_ascii PRIVATE SEASTAR_TESTING_MAIN) target_link_libraries (app_memcached_test_ascii PRIVATE seastar_private seastar_testing) add_custom_target (app_memcached_test_ascii_run DEPENDS app_memcached_test_ascii COMMAND app_memcached_test_ascii -- -c 2 USES_TERMINAL) add_test ( NAME Seastar.app.memcached.ascii COMMAND ${CMAKE_COMMAND} --build ${CMAKE_BINARY_DIR} --target app_memcached_test_ascii_run) set_tests_properties (Seastar.app.memcached.ascii PROPERTIES TIMEOUT ${Seastar_TEST_TIMEOUT} ENVIRONMENT "${Seastar_TEST_ENVIRONMENT}") seastar-25.05.0/apps/memcached/tests/test.py000077500000000000000000000031761501510432000206720ustar00rootroot00000000000000#!/usr/bin/env python3 # # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import time import sys import os import argparse import subprocess DIR_PATH = os.path.dirname(os.path.realpath(__file__)) def run(args, cmd): mc = subprocess.Popen([args.memcached, '--smp=2']) print('Memcached started.') try: cmdline = [DIR_PATH + '/test_memcached.py'] + cmd if args.fast: cmdline.append('--fast') print('Running: ' + ' '.join(cmdline)) subprocess.check_call(cmdline) finally: print('Killing memcached...') mc.terminate(); mc.wait() print('Memcached killed.') if __name__ == "__main__": parser = argparse.ArgumentParser(description="Seastar test runner") parser.add_argument('--fast', action="store_true", help="Run only fast tests") parser.add_argument('--memcached', required=True, help='Path of the memcached executable') args = parser.parse_args() run(args, []) run(args, ['-U']) seastar-25.05.0/apps/memcached/tests/test_ascii_parser.cc000066400000000000000000000326471501510432000233550ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include #include #include #include #include "ascii.hh" #include using namespace seastar; using namespace net; using namespace memcache; using parser_type = memcache_ascii_parser; static packet make_packet(std::vector chunks, size_t buffer_size) { packet p; for (auto&& chunk : chunks) { size_t size = chunk.size(); for (size_t pos = 0; pos < size; pos += buffer_size) { auto now = std::min(pos + buffer_size, chunk.size()) - pos; p.append(packet(chunk.data() + pos, now)); } } return p; } static auto make_input_stream(packet&& p) { return input_stream(data_source( std::make_unique(std::move(p)))); } static auto parse(packet&& p) { auto is = make_lw_shared>(make_input_stream(std::move(p))); auto parser = make_lw_shared(); parser->init(); return is->consume(*parser).then([is, parser] { return make_ready_future>(parser); }); } auto for_each_fragment_size = [] (auto&& func) { static std::vector buffer_sizes = { 100000, 1000, 100, 10, 5, 2, 1 }; return do_for_each(buffer_sizes.begin(), buffer_sizes.end(), [func] (size_t buffer_size) { return func([buffer_size] (std::vector chunks) { return make_packet(chunks, buffer_size); }); }); }; SEASTAR_TEST_CASE(test_set_command_is_parsed) { return for_each_fragment_size([] (auto make_packet) { return parse(make_packet({"set key 1 2 3\r\nabc\r\n"})).then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_flags_str == "1"); BOOST_REQUIRE(p->_expiration == 2); BOOST_REQUIRE(p->_size == 3); BOOST_REQUIRE(p->_size_str == "3"); BOOST_REQUIRE(p->_key.key() == "key"); BOOST_REQUIRE(p->_blob == "abc"); }); }); } SEASTAR_TEST_CASE(test_empty_data_is_parsed) { return for_each_fragment_size([] (auto make_packet) { return parse(make_packet({"set key 1 2 0\r\n\r\n"})).then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_flags_str == "1"); BOOST_REQUIRE(p->_expiration == 2); BOOST_REQUIRE(p->_size == 0); BOOST_REQUIRE(p->_size_str == "0"); BOOST_REQUIRE(p->_key.key() == "key"); BOOST_REQUIRE(p->_blob == ""); }); }); } SEASTAR_TEST_CASE(test_superflous_data_is_an_error) { return for_each_fragment_size([] (auto make_packet) { return parse(make_packet({"set key 0 0 0\r\nasd\r\n"})).then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::error); }); }); } SEASTAR_TEST_CASE(test_not_enough_data_is_an_error) { return for_each_fragment_size([] (auto make_packet) { return parse(make_packet({"set key 0 0 3\r\n"})).then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::error); }); }); } SEASTAR_TEST_CASE(test_u32_parsing) { return for_each_fragment_size([] (auto make_packet) { return make_ready_future<>().then([make_packet] { return parse(make_packet({"set key 0 0 0\r\n\r\n"})).then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_flags_str == "0"); }); }).then([make_packet] { return parse(make_packet({"set key 12345 0 0\r\n\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_flags_str == "12345"); }); }).then([make_packet] { return parse(make_packet({"set key -1 0 0\r\n\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::error); }); }).then([make_packet] { return parse(make_packet({"set key 1-1 0 0\r\n\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::error); }); }).then([make_packet] { return parse(make_packet({"set key " + std::to_string(std::numeric_limits::max()) + " 0 0\r\n\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_flags_str == to_sstring(std::numeric_limits::max())); }); }); }); } SEASTAR_TEST_CASE(test_parsing_of_split_data) { return for_each_fragment_size([] (auto make_packet) { return make_ready_future<>() .then([make_packet] { return parse(make_packet({"set key 11", "1 222 3\r\nasd\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_key.key() == "key"); BOOST_REQUIRE(p->_flags_str == "111"); BOOST_REQUIRE(p->_expiration == 222); BOOST_REQUIRE(p->_size == 3); BOOST_REQUIRE(p->_size_str == "3"); BOOST_REQUIRE(p->_blob == "asd"); }); }).then([make_packet] { return parse(make_packet({"set key 11", "1 22", "2 3", "\r\nasd\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_key.key() == "key"); BOOST_REQUIRE(p->_flags_str == "111"); BOOST_REQUIRE(p->_expiration == 222); BOOST_REQUIRE(p->_size == 3); BOOST_REQUIRE(p->_size_str == "3"); BOOST_REQUIRE(p->_blob == "asd"); }); }).then([make_packet] { return parse(make_packet({"set k", "ey 11", "1 2", "2", "2 3", "\r\nasd\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_key.key() == "key"); BOOST_REQUIRE(p->_flags_str == "111"); BOOST_REQUIRE(p->_expiration == 222); BOOST_REQUIRE(p->_size == 3); BOOST_REQUIRE(p->_size_str == "3"); BOOST_REQUIRE(p->_blob == "asd"); }); }).then([make_packet] { return parse(make_packet({"set key 111 222 3\r\n", "asd\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_key.key() == "key"); BOOST_REQUIRE(p->_flags_str == "111"); BOOST_REQUIRE(p->_expiration == 222); BOOST_REQUIRE(p->_size == 3); BOOST_REQUIRE(p->_size_str == "3"); BOOST_REQUIRE(p->_blob == "asd"); }); }).then([make_packet] { return parse(make_packet({"set key 111 222 3\r\na", "sd\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_key.key() == "key"); BOOST_REQUIRE(p->_flags_str == "111"); BOOST_REQUIRE(p->_expiration == 222); BOOST_REQUIRE(p->_size == 3); BOOST_REQUIRE(p->_size_str == "3"); BOOST_REQUIRE(p->_blob == "asd"); }); }).then([make_packet] { return parse(make_packet({"set key 111 222 3\r\nasd", "\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_key.key() == "key"); BOOST_REQUIRE(p->_flags_str == "111"); BOOST_REQUIRE(p->_expiration == 222); BOOST_REQUIRE(p->_size == 3); BOOST_REQUIRE(p->_size_str == "3"); BOOST_REQUIRE(p->_blob == "asd"); }); }).then([make_packet] { return parse(make_packet({"set key 111 222 3\r\nasd\r", "\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_key.key() == "key"); BOOST_REQUIRE(p->_flags_str == "111"); BOOST_REQUIRE(p->_expiration == 222); BOOST_REQUIRE(p->_size == 3); BOOST_REQUIRE(p->_size_str == "3"); BOOST_REQUIRE(p->_blob == "asd"); }); }); }); } static std::vector as_strings(std::vector& keys) { std::vector v; for (auto&& key : keys) { v.push_back(key.key()); } return v; } SEASTAR_TEST_CASE(test_get_parsing) { return for_each_fragment_size([] (auto make_packet) { return make_ready_future<>() .then([make_packet] { return parse(make_packet({"get key1\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_get); BOOST_REQUIRE_EQUAL(as_strings(p->_keys), std::vector({"key1"})); }); }).then([make_packet] { return parse(make_packet({"get key1 key2\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_get); BOOST_REQUIRE_EQUAL(as_strings(p->_keys), std::vector({"key1", "key2"})); }); }).then([make_packet] { return parse(make_packet({"get key1 key2 key3\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::cmd_get); BOOST_REQUIRE_EQUAL(as_strings(p->_keys), std::vector({"key1", "key2", "key3"})); }); }); }); } SEASTAR_TEST_CASE(test_catches_errors_in_get) { return for_each_fragment_size([] (auto make_packet) { return make_ready_future<>() .then([make_packet] { return parse(make_packet({"get\r\n"})) .then([] (auto p) { BOOST_REQUIRE(p->_state == parser_type::state::error); }); }); }); } SEASTAR_TEST_CASE(test_parser_returns_eof_state_when_no_command_follows) { return for_each_fragment_size([] (auto make_packet) { auto p = make_shared(); auto is = make_shared>(make_input_stream(make_packet({"get key\r\n"}))); p->init(); return is->consume(*p).then([p] { BOOST_REQUIRE(p->_state == parser_type::state::cmd_get); }).then([is, p] { p->init(); return is->consume(*p).then([p, is] { BOOST_REQUIRE(p->_state == parser_type::state::eof); }); }); }); } SEASTAR_TEST_CASE(test_incomplete_command_is_an_error) { return for_each_fragment_size([] (auto make_packet) { auto p = make_shared(); auto is = make_shared>(make_input_stream(make_packet({"get"}))); p->init(); return is->consume(*p).then([p] { BOOST_REQUIRE(p->_state == parser_type::state::error); }).then([is, p] { p->init(); return is->consume(*p).then([p, is] { BOOST_REQUIRE(p->_state == parser_type::state::eof); }); }); }); } SEASTAR_TEST_CASE(test_multiple_requests_in_one_stream) { return for_each_fragment_size([] (auto make_packet) { auto p = make_shared(); auto is = make_shared>(make_input_stream(make_packet({"set key1 1 1 5\r\ndata1\r\nset key2 2 2 6\r\ndata2+\r\n"}))); p->init(); return is->consume(*p).then([p] { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_key.key() == "key1"); BOOST_REQUIRE(p->_flags_str == "1"); BOOST_REQUIRE(p->_expiration == 1); BOOST_REQUIRE(p->_size == 5); BOOST_REQUIRE(p->_size_str == "5"); BOOST_REQUIRE(p->_blob == "data1"); }).then([is, p] { p->init(); return is->consume(*p).then([p, is] { BOOST_REQUIRE(p->_state == parser_type::state::cmd_set); BOOST_REQUIRE(p->_key.key() == "key2"); BOOST_REQUIRE(p->_flags_str == "2"); BOOST_REQUIRE(p->_expiration == 2); BOOST_REQUIRE(p->_size == 6); BOOST_REQUIRE(p->_size_str == "6"); BOOST_REQUIRE(p->_blob == "data2+"); }); }); }); } seastar-25.05.0/apps/memcached/tests/test_memcached.py000077500000000000000000000534451501510432000226640ustar00rootroot00000000000000#!/usr/bin/env python3 # # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # from contextlib import contextmanager import socket import struct import sys import random import argparse import time import re import unittest server_addr = None call = None args = None class TimeoutError(Exception): pass @contextmanager def tcp_connection(timeout=1): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(timeout) s.connect(server_addr) def call(msg): s.send(msg.encode()) return s.recv(16*1024) yield call s.close() def slow(f): def wrapper(self): if args.fast: raise unittest.SkipTest('Slow') return f(self) return wrapper def recv_all(s): m = b'' while True: data = s.recv(1024) if not data: break m += data return m def tcp_call(msg, timeout=1): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(timeout) s.connect(server_addr) s.send(msg.encode()) s.shutdown(socket.SHUT_WR) data = recv_all(s) s.close() return data def udp_call_for_fragments(msg, timeout=1): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(timeout) this_req_id = random.randint(-32768, 32767) datagram = struct.pack(">hhhh", this_req_id, 0, 1, 0) + msg.encode() sock.sendto(datagram, server_addr) messages = {} n_determined = None while True: data, addr = sock.recvfrom(1500) req_id, seq, n, res = struct.unpack_from(">hhhh", data) content = data[8:] if n_determined and n_determined != n: raise Exception('Inconsitent number of total messages, %d and %d' % (n_determined, n)) n_determined = n if req_id != this_req_id: raise Exception('Invalid request id: ' + req_id + ', expected ' + this_req_id) if seq in messages: raise Exception('Duplicate message for seq=' + seq) messages[seq] = content if len(messages) == n: break for k, v in sorted(messages.items(), key=lambda e: e[0]): yield v sock.close() def udp_call(msg, **kwargs): return b''.join(udp_call_for_fragments(msg, **kwargs)) class MemcacheTest(unittest.TestCase): def set(self, key, value, flags=0, expiry=0): self.assertEqual(call('set %s %d %d %d\r\n%s\r\n' % (key, flags, expiry, len(value), value)), b'STORED\r\n') def delete(self, key): self.assertEqual(call('delete %s\r\n' % key), b'DELETED\r\n') def assertHasKey(self, key): resp = call('get %s\r\n' % key) if not resp.startswith(('VALUE %s' % key).encode()): self.fail('Key \'%s\' should be present, but got: %s' % (key, resp.decode())) def assertNoKey(self, key): resp = call('get %s\r\n' % key) if resp != b'END\r\n': self.fail('Key \'%s\' should not be present, but got: %s' % (key, resp.decode())) def setKey(self, key): self.set(key, 'some value') def getItemVersion(self, key): m = re.match(r'VALUE %s \d+ \d+ (?P\d+)' % key, call('gets %s\r\n' % key).decode()) return int(m.group('version')) def getStat(self, name, call_fn=None): if not call_fn: call_fn = call resp = call_fn('stats\r\n').decode() m = re.search(r'STAT %s (?P.+)' % re.escape(name), resp, re.MULTILINE) return m.group('value') def flush(self): self.assertEqual(call('flush_all\r\n'), b'OK\r\n') def tearDown(self): self.flush() class TcpSpecificTests(MemcacheTest): def test_recovers_from_errors_in_the_stream(self): with tcp_connection() as conn: self.assertEqual(conn('get\r\n'), b'ERROR\r\n') self.assertEqual(conn('get key\r\n'), b'END\r\n') def test_incomplete_command_results_in_error(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(server_addr) s.send(b'get') s.shutdown(socket.SHUT_WR) self.assertEqual(recv_all(s), b'ERROR\r\n') s.close() def test_stream_closed_results_in_error(self): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(server_addr) s.shutdown(socket.SHUT_WR) self.assertEqual(recv_all(s), b'') s.close() def test_unsuccesful_parsing_does_not_leave_data_behind(self): with tcp_connection() as conn: self.assertEqual(conn('set key 0 0 5\r\nhello\r\n'), b'STORED\r\n') self.assertRegex(conn('delete a b c\r\n'), b'^(CLIENT_)?ERROR.*\r\n$') self.assertEqual(conn('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n') self.assertEqual(conn('delete key\r\n'), b'DELETED\r\n') def test_flush_all_no_reply(self): self.assertEqual(call('flush_all noreply\r\n'), b'') def test_set_no_reply(self): self.assertEqual(call('set key 0 0 5 noreply\r\nhello\r\nget key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n') self.delete('key') def test_delete_no_reply(self): self.setKey('key') self.assertEqual(call('delete key noreply\r\nget key\r\n'), b'END\r\n') def test_add_no_reply(self): self.assertEqual(call('add key 0 0 1 noreply\r\na\r\nget key\r\n'), b'VALUE key 0 1\r\na\r\nEND\r\n') self.delete('key') def test_replace_no_reply(self): self.assertEqual(call('set key 0 0 1\r\na\r\n'), b'STORED\r\n') self.assertEqual(call('replace key 0 0 1 noreply\r\nb\r\nget key\r\n'), b'VALUE key 0 1\r\nb\r\nEND\r\n') self.delete('key') def test_cas_noreply(self): self.assertNoKey('key') self.assertEqual(call('cas key 0 0 1 1 noreply\r\na\r\n'), b'') self.assertNoKey('key') self.assertEqual(call('add key 0 0 5\r\nhello\r\n'), b'STORED\r\n') version = self.getItemVersion('key') self.assertEqual(call('cas key 1 0 5 %d noreply\r\naloha\r\n' % (version + 1)), b'') self.assertEqual(call('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n') self.assertEqual(call('cas key 1 0 5 %d noreply\r\naloha\r\n' % (version)), b'') self.assertEqual(call('get key\r\n'), b'VALUE key 1 5\r\naloha\r\nEND\r\n') self.delete('key') @slow def test_connection_statistics(self): with tcp_connection() as conn: curr_connections = int(self.getStat('curr_connections', call_fn=conn)) total_connections = int(self.getStat('total_connections', call_fn=conn)) with tcp_connection() as conn2: self.assertEqual(curr_connections + 1, int(self.getStat('curr_connections', call_fn=conn))) self.assertEqual(total_connections + 1, int(self.getStat('total_connections', call_fn=conn))) self.assertEqual(total_connections + 1, int(self.getStat('total_connections', call_fn=conn))) time.sleep(0.1) self.assertEqual(curr_connections, int(self.getStat('curr_connections', call_fn=conn))) class UdpSpecificTests(MemcacheTest): def test_large_response_is_split_into_mtu_chunks(self): max_datagram_size = 1400 data = '1' * (max_datagram_size*3) self.set('key', data) chunks = list(udp_call_for_fragments('get key\r\n')) for chunk in chunks: self.assertLessEqual(len(chunk), max_datagram_size) self.assertEqual(b''.join(chunks).decode(), 'VALUE key 0 %d\r\n%s\r\n' \ 'END\r\n' % (len(data), data)) self.delete('key') class TestCommands(MemcacheTest): def test_basic_commands(self): self.assertEqual(call('get key\r\n'), b'END\r\n') self.assertEqual(call('set key 0 0 5\r\nhello\r\n'), b'STORED\r\n') self.assertEqual(call('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n') self.assertEqual(call('delete key\r\n'), b'DELETED\r\n') self.assertEqual(call('delete key\r\n'), b'NOT_FOUND\r\n') self.assertEqual(call('get key\r\n'), b'END\r\n') def test_error_handling(self): self.assertEqual(call('get\r\n'), b'ERROR\r\n') @slow def test_expiry(self): self.assertEqual(call('set key 0 1 5\r\nhello\r\n'), b'STORED\r\n') self.assertEqual(call('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n') time.sleep(2) self.assertEqual(call('get key\r\n'), b'END\r\n') @slow def test_expiry_at_epoch_time(self): expiry = int(time.time()) + 2 self.assertEqual(call('set key 0 %d 5\r\nhello\r\n' % expiry), b'STORED\r\n') self.assertEqual(call('get key\r\n'), b'VALUE key 0 5\r\nhello\r\nEND\r\n') time.sleep(3) self.assertEqual(call('get key\r\n'), b'END\r\n') def test_multiple_keys_in_get(self): self.assertEqual(call('set key1 0 0 2\r\nv1\r\n'), b'STORED\r\n') self.assertEqual(call('set key 0 0 2\r\nv2\r\n'), b'STORED\r\n') resp = call('get key1 key\r\n') self.assertRegex(resp, b'^(VALUE key1 0 2\r\nv1\r\nVALUE key 0 2\r\nv2\r\nEND\r\n)|(VALUE key 0 2\r\nv2\r\nVALUE key1 0 2\r\nv1\r\nEND\r\n)$') self.delete("key") self.delete("key1") def test_flush_all(self): self.set('key', 'value') self.assertEqual(call('flush_all\r\n'), b'OK\r\n') self.assertNoKey('key') def test_keys_set_after_flush_remain(self): self.assertEqual(call('flush_all\r\n'), b'OK\r\n') self.setKey('key') self.assertHasKey('key') self.delete('key') @slow def test_flush_all_with_timeout_flushes_all_keys_even_those_set_after_flush(self): self.setKey('key') self.assertEqual(call('flush_all 2\r\n'), b'OK\r\n') self.assertHasKey('key') self.setKey('key2') time.sleep(3) self.assertNoKey('key') self.assertNoKey('key2') @slow def test_subsequent_flush_is_merged(self): self.setKey('key') self.assertEqual(call('flush_all 2\r\n'), b'OK\r\n') # Can flush in anything between 1-2 self.assertEqual(call('flush_all 4\r\n'), b'OK\r\n') # Can flush in anything between 3-4 time.sleep(3) self.assertHasKey('key') self.setKey('key2') time.sleep(4) self.assertNoKey('key') self.assertNoKey('key2') @slow def test_immediate_flush_cancels_delayed_flush(self): self.assertEqual(call('flush_all 2\r\n'), b'OK\r\n') self.assertEqual(call('flush_all\r\n'), b'OK\r\n') self.setKey('key') time.sleep(1) self.assertHasKey('key') self.delete('key') @slow def test_flushing_in_the_past(self): self.setKey('key1') time.sleep(1) self.setKey('key2') key2_time = int(time.time()) self.assertEqual(call('flush_all %d\r\n' % (key2_time - 1)), b'OK\r\n') time.sleep(1) self.assertNoKey("key1") self.assertNoKey("key2") @slow def test_memcache_does_not_crash_when_flushing_with_already_expred_items(self): self.assertEqual(call('set key1 0 2 5\r\nhello\r\n'), b'STORED\r\n') time.sleep(1) self.assertEqual(call('flush_all\r\n'), b'OK\r\n') def test_response_spanning_many_datagrams(self): key1_data = '1' * 1000 key2_data = '2' * 1000 key3_data = '3' * 1000 self.set('key1', key1_data) self.set('key2', key2_data) self.set('key3', key3_data) resp = call('get key1 key2 key3\r\n').decode() pattern = '^VALUE (?P.*?\r\n.*?)\r\nVALUE (?P.*?\r\n.*?)\r\nVALUE (?P.*?\r\n.*?)\r\nEND\r\n$' self.assertRegex(resp, pattern) m = re.match(pattern, resp) self.assertEqual(set([m.group('v1'), m.group('v2'), m.group('v3')]), set(['key1 0 %d\r\n%s' % (len(key1_data), key1_data), 'key2 0 %d\r\n%s' % (len(key2_data), key2_data), 'key3 0 %d\r\n%s' % (len(key3_data), key3_data)])) self.delete('key1') self.delete('key2') self.delete('key3') def test_version(self): self.assertRegex(call('version\r\n'), b'^VERSION .*\r\n$') def test_add(self): self.assertEqual(call('add key 0 0 1\r\na\r\n'), b'STORED\r\n') self.assertEqual(call('add key 0 0 1\r\na\r\n'), b'NOT_STORED\r\n') self.delete('key') def test_replace(self): self.assertEqual(call('add key 0 0 1\r\na\r\n'), b'STORED\r\n') self.assertEqual(call('replace key 0 0 1\r\na\r\n'), b'STORED\r\n') self.delete('key') self.assertEqual(call('replace key 0 0 1\r\na\r\n'), b'NOT_STORED\r\n') def test_cas_and_gets(self): self.assertEqual(call('cas key 0 0 1 1\r\na\r\n'), b'NOT_FOUND\r\n') self.assertEqual(call('add key 0 0 5\r\nhello\r\n'), b'STORED\r\n') version = self.getItemVersion('key') self.assertEqual(call('set key 1 0 5\r\nhello\r\n'), b'STORED\r\n') self.assertEqual(call('gets key\r\n').decode(), 'VALUE key 1 5 %d\r\nhello\r\nEND\r\n' % (version + 1)) self.assertEqual(call('cas key 0 0 5 %d\r\nhello\r\n' % (version)), b'EXISTS\r\n') self.assertEqual(call('cas key 0 0 5 %d\r\naloha\r\n' % (version + 1)), b'STORED\r\n') self.assertEqual(call('gets key\r\n').decode(), 'VALUE key 0 5 %d\r\naloha\r\nEND\r\n' % (version + 2)) self.delete('key') def test_curr_items_stat(self): self.assertEqual(0, int(self.getStat('curr_items'))) self.setKey('key') self.assertEqual(1, int(self.getStat('curr_items'))) self.delete('key') self.assertEqual(0, int(self.getStat('curr_items'))) def test_how_stats_change_with_different_commands(self): get_count = int(self.getStat('cmd_get')) set_count = int(self.getStat('cmd_set')) flush_count = int(self.getStat('cmd_flush')) total_items = int(self.getStat('total_items')) get_misses = int(self.getStat('get_misses')) get_hits = int(self.getStat('get_hits')) cas_hits = int(self.getStat('cas_hits')) cas_badval = int(self.getStat('cas_badval')) cas_misses = int(self.getStat('cas_misses')) delete_misses = int(self.getStat('delete_misses')) delete_hits = int(self.getStat('delete_hits')) curr_connections = int(self.getStat('curr_connections')) incr_hits = int(self.getStat('incr_hits')) incr_misses = int(self.getStat('incr_misses')) decr_hits = int(self.getStat('decr_hits')) decr_misses = int(self.getStat('decr_misses')) call('get key\r\n') get_count += 1 get_misses += 1 call('gets key\r\n') get_count += 1 get_misses += 1 call('set key1 0 0 1\r\na\r\n') set_count += 1 total_items += 1 call('get key1\r\n') get_count += 1 get_hits += 1 call('add key1 0 0 1\r\na\r\n') set_count += 1 call('add key2 0 0 1\r\na\r\n') set_count += 1 total_items += 1 call('replace key1 0 0 1\r\na\r\n') set_count += 1 total_items += 1 call('replace key3 0 0 1\r\na\r\n') set_count += 1 call('cas key4 0 0 1 1\r\na\r\n') set_count += 1 cas_misses += 1 call('cas key1 0 0 1 %d\r\na\r\n' % self.getItemVersion('key1')) set_count += 1 get_count += 1 get_hits += 1 cas_hits += 1 total_items += 1 call('cas key1 0 0 1 %d\r\na\r\n' % (self.getItemVersion('key1') + 1)) set_count += 1 get_count += 1 get_hits += 1 cas_badval += 1 call('delete key1\r\n') delete_hits += 1 call('delete key1\r\n') delete_misses += 1 call('incr num 1\r\n') incr_misses += 1 call('decr num 1\r\n') decr_misses += 1 call('set num 0 0 1\r\n0\r\n') set_count += 1 total_items += 1 call('incr num 1\r\n') incr_hits += 1 call('decr num 1\r\n') decr_hits += 1 self.flush() flush_count += 1 self.assertEqual(get_count, int(self.getStat('cmd_get'))) self.assertEqual(set_count, int(self.getStat('cmd_set'))) self.assertEqual(flush_count, int(self.getStat('cmd_flush'))) self.assertEqual(total_items, int(self.getStat('total_items'))) self.assertEqual(get_hits, int(self.getStat('get_hits'))) self.assertEqual(get_misses, int(self.getStat('get_misses'))) self.assertEqual(cas_misses, int(self.getStat('cas_misses'))) self.assertEqual(cas_hits, int(self.getStat('cas_hits'))) self.assertEqual(cas_badval, int(self.getStat('cas_badval'))) self.assertEqual(delete_misses, int(self.getStat('delete_misses'))) self.assertEqual(delete_hits, int(self.getStat('delete_hits'))) self.assertEqual(0, int(self.getStat('curr_items'))) self.assertEqual(curr_connections, int(self.getStat('curr_connections'))) self.assertEqual(incr_misses, int(self.getStat('incr_misses'))) self.assertEqual(incr_hits, int(self.getStat('incr_hits'))) self.assertEqual(decr_misses, int(self.getStat('decr_misses'))) self.assertEqual(decr_hits, int(self.getStat('decr_hits'))) def test_incr(self): self.assertEqual(call('incr key 0\r\n'), b'NOT_FOUND\r\n') self.assertEqual(call('set key 0 0 1\r\n0\r\n'), b'STORED\r\n') self.assertEqual(call('incr key 0\r\n'), b'0\r\n') self.assertEqual(call('get key\r\n'), b'VALUE key 0 1\r\n0\r\nEND\r\n') self.assertEqual(call('incr key 1\r\n'), b'1\r\n') self.assertEqual(call('incr key 2\r\n'), b'3\r\n') self.assertEqual(call('incr key %d\r\n' % (pow(2, 64) - 1)), b'2\r\n') self.assertEqual(call('incr key %d\r\n' % (pow(2, 64) - 3)), b'18446744073709551615\r\n') self.assertRegex(call('incr key 1\r\n').decode(), r'0(\w+)?\r\n') self.assertEqual(call('set key 0 0 2\r\n1 \r\n'), b'STORED\r\n') self.assertEqual(call('incr key 1\r\n'), b'2\r\n') self.assertEqual(call('set key 0 0 2\r\n09\r\n'), b'STORED\r\n') self.assertEqual(call('incr key 1\r\n'), b'10\r\n') def test_decr(self): self.assertEqual(call('decr key 0\r\n'), b'NOT_FOUND\r\n') self.assertEqual(call('set key 0 0 1\r\n7\r\n'), b'STORED\r\n') self.assertEqual(call('decr key 1\r\n'), b'6\r\n') self.assertEqual(call('get key\r\n'), b'VALUE key 0 1\r\n6\r\nEND\r\n') self.assertEqual(call('decr key 6\r\n'), b'0\r\n') self.assertEqual(call('decr key 2\r\n'), b'0\r\n') self.assertEqual(call('set key 0 0 2\r\n20\r\n'), b'STORED\r\n') self.assertRegex(call('decr key 11\r\n').decode(), r'^9( )?\r\n$') self.assertEqual(call('set key 0 0 3\r\n100\r\n'), b'STORED\r\n') self.assertRegex(call('decr key 91\r\n').decode(), r'^9( )?\r\n$') self.assertEqual(call('set key 0 0 2\r\n1 \r\n'), b'STORED\r\n') self.assertEqual(call('decr key 1\r\n'), b'0\r\n') self.assertEqual(call('set key 0 0 2\r\n09\r\n'), b'STORED\r\n') self.assertEqual(call('decr key 1\r\n'), b'8\r\n') def test_incr_and_decr_on_invalid_input(self): error_msg = b'CLIENT_ERROR cannot increment or decrement non-numeric value\r\n' for cmd in ['incr', 'decr']: for value in ['', '-1', 'a', '0x1', '18446744073709551616']: self.assertEqual(call('set key 0 0 %d\r\n%s\r\n' % (len(value), value)), b'STORED\r\n') prev = call('get key\r\n') self.assertEqual(call(cmd + ' key 1\r\n'), error_msg, "cmd=%s, value=%s" % (cmd, value)) self.assertEqual(call('get key\r\n'), prev) self.delete('key') def wait_for_memcache_tcp(timeout=4): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) timeout_at = time.time() + timeout while True: if time.time() >= timeout_at: raise TimeoutError() try: s.connect(server_addr) s.close() break except ConnectionRefusedError: time.sleep(0.1) def wait_for_memcache_udp(timeout=4): timeout_at = time.time() + timeout while True: if time.time() >= timeout_at: raise TimeoutError() try: udp_call('version\r\n', timeout=0.2) break except socket.timeout: pass if __name__ == '__main__': parser = argparse.ArgumentParser(description="memcache protocol tests") parser.add_argument('--server', '-s', action="store", help="server adddress in : format", default="localhost:11211") parser.add_argument('--udp', '-U', action="store_true", help="Use UDP protocol") parser.add_argument('--fast', action="store_true", help="Run only fast tests") args = parser.parse_args() host, port = args.server.split(':') server_addr = (host, int(port)) if args.udp: call = udp_call wait_for_memcache_udp() else: call = tcp_call wait_for_memcache_tcp() runner = unittest.TextTestRunner() loader = unittest.TestLoader() suite = unittest.TestSuite() suite.addTest(loader.loadTestsFromTestCase(TestCommands)) if args.udp: suite.addTest(loader.loadTestsFromTestCase(UdpSpecificTests)) else: suite.addTest(loader.loadTestsFromTestCase(TcpSpecificTests)) result = runner.run(suite) if not result.wasSuccessful(): sys.exit(1) seastar-25.05.0/apps/rpc_tester/000077500000000000000000000000001501510432000164315ustar00rootroot00000000000000seastar-25.05.0/apps/rpc_tester/CMakeLists.txt000066400000000000000000000015611501510432000211740ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2022 Scylladb, Ltd. # seastar_add_app (rpc_tester SOURCES rpc_tester.cc) target_link_libraries (app_rpc_tester PRIVATE yaml-cpp::yaml-cpp) seastar-25.05.0/apps/rpc_tester/rpc_tester.cc000066400000000000000000000615141501510432000211210ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2022 ScyllaDB */ #include #include #include #include #include #include #include #pragma GCC diagnostic push // see https://github.com/boostorg/accumulators/pull/54 #pragma GCC diagnostic ignored "-Wuninitialized" #include #include #include #include #include #include #include #pragma GCC diagnostic pop #include #include #include #include #include #include #include using namespace seastar; using namespace boost::accumulators; using namespace std::chrono_literals; struct serializer {}; template inline void write_arithmetic_type(Output& out, T v) { static_assert(std::is_arithmetic_v, "must be arithmetic type"); return out.write(reinterpret_cast(&v), sizeof(T)); } template inline T read_arithmetic_type(Input& in) { static_assert(std::is_arithmetic_v, "must be arithmetic type"); T v; in.read(reinterpret_cast(&v), sizeof(T)); return v; } template inline void write(serializer, Output& output, int32_t v) { return write_arithmetic_type(output, v); } template inline void write(serializer, Output& output, uint32_t v) { return write_arithmetic_type(output, v); } template inline void write(serializer, Output& output, int64_t v) { return write_arithmetic_type(output, v); } template inline void write(serializer, Output& output, uint64_t v) { return write_arithmetic_type(output, v); } template inline void write(serializer, Output& output, double v) { return write_arithmetic_type(output, v); } template inline int32_t read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline uint32_t read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline uint64_t read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline uint64_t read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline double read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline void write(serializer, Output& out, const sstring& v) { write_arithmetic_type(out, uint32_t(v.size())); out.write(v.c_str(), v.size()); } template inline sstring read(serializer, Input& in, rpc::type) { auto size = read_arithmetic_type(in); sstring ret = uninitialized_string(size); in.read(ret.data(), size); return ret; } using payload_t = std::vector; template inline void write(serializer, Output& out, const payload_t& v) { write_arithmetic_type(out, uint32_t(v.size())); out.write((const char*)v.data(), v.size() * sizeof(payload_t::value_type)); } template inline payload_t read(serializer, Input& in, rpc::type) { auto size = read_arithmetic_type(in); payload_t ret; ret.resize(size); in.read((char*)ret.data(), size * sizeof(payload_t::value_type)); return ret; } class pause_distribution { public: virtual std::chrono::duration get() = 0; template Dur get_as() { return std::chrono::duration_cast(get()); } virtual ~pause_distribution() {} }; class steady_process : public pause_distribution { std::chrono::duration _pause; public: steady_process(std::chrono::duration period) : _pause(period) { } std::chrono::duration get() override { return _pause; } }; std::unique_ptr make_steady_pause(std::chrono::duration d) { return std::make_unique(d); } class uniform_process : public pause_distribution { std::random_device _rd; std::mt19937 _rng; std::uniform_real_distribution _range; public: uniform_process(std::chrono::duration min, std::chrono::duration max) : _rng(_rd()) , _range(min.count(), max.count()) { } std::chrono::duration get() override { return std::chrono::duration(_range(_rng)); } }; struct duration_range { std::chrono::duration min; std::chrono::duration max; }; std::unique_ptr make_uniform_pause(duration_range range) { return std::make_unique(range.min, range.max); } struct client_config { bool nodelay = true; }; struct server_config { bool nodelay = true; }; struct job_config { std::string name; std::string type; std::string verb; unsigned parallelism; unsigned shares = 100; std::chrono::duration exec_time; std::optional exec_time_range; std::optional> sleep_time; std::optional sleep_time_range; std::optional> timeout; size_t payload; bool client = false; bool server = false; std::chrono::seconds duration; std::string sg_name; scheduling_group sg = default_scheduling_group(); }; struct config { client_config client; server_config server; std::vector jobs; }; struct duration_time { std::chrono::duration time; }; struct byte_size { uint64_t size; }; namespace YAML { template<> struct convert { static bool decode(const Node& node, client_config& cfg) { if (node["nodelay"]) { cfg.nodelay = node["nodelay"].as(); } return true; } }; template<> struct convert { static bool decode(const Node& node, server_config& cfg) { if (node["nodelay"]) { cfg.nodelay = node["nodelay"].as(); } return true; } }; template <> struct convert { static bool decode(const Node& node, job_config& cfg) { cfg.name = node["name"].as(); cfg.type = node["type"].as(); cfg.parallelism = node["parallelism"].as(); if (cfg.type == "rpc") { cfg.verb = node["verb"].as(); cfg.payload = node["payload"].as().size; cfg.client = true; if (node["sleep_time"]) { cfg.sleep_time = node["sleep_time"].as().time; } if (node["timeout"]) { cfg.timeout = node["timeout"].as().time; } } else if (cfg.type == "cpu") { if (node["execution_time"]) { cfg.exec_time = node["execution_time"].as().time; } else { duration_range r; r.min = node["execution_time_min"].as().time; r.max = node["execution_time_max"].as().time; cfg.exec_time_range = r; } if (node["sleep_time"]) { cfg.sleep_time = node["sleep_time"].as().time; } else if (node["sleep_time_min"] && node["sleep_time_max"]) { duration_range r; r.min = node["sleep_time_min"].as().time; r.max = node["sleep_time_max"].as().time; cfg.sleep_time_range = r; } cfg.client = !node["side"] || (node["side"].as() == "client"); cfg.server = !node["side"] || (node["side"].as() == "server"); } if (node["shares"]) { cfg.shares = node["shares"].as(); } if (node["sched_group"]) { cfg.sg_name = node["sched_group"].as(); } else { cfg.sg_name = cfg.name; } return true; } }; template<> struct convert { static bool decode(const Node& node, config& cfg) { if (node["client"]) { cfg.client = node["client"].as(); } if (node["server"]) { cfg.server = node["server"].as(); } if (node["jobs"]) { cfg.jobs = node["jobs"].as>(); } return true; } }; template<> struct convert { static bool decode(const Node& node, duration_time& dt) { auto str = node.as(); if (str == "0") { dt.time = 0ns; return true; } if (str.back() != 's') { return false; } str.pop_back(); std::chrono::duration unit; if (str.back() == 'm') { unit = 1ms; str.pop_back(); } else if (str.back() == 'u') { unit = 1us; str.pop_back(); } else if (str.back() == 'n') { unit = 1ns; str.pop_back(); } else { unit = 1s; } dt.time = boost::lexical_cast(str) * unit; return true; } }; template<> struct convert { static bool decode(const Node& node, byte_size& bs) { auto str = node.as(); unsigned shift = 0; if (str.back() == 'B') { str.pop_back(); if (str.back() != 'k') { return false; } str.pop_back(); shift = 10; } bs.size = (boost::lexical_cast(str) << shift); return bs.size; } }; } // YAML namespace enum class rpc_verb : int32_t { HELLO = 0, BYE = 1, ECHO = 2, WRITE = 3, }; using rpc_protocol = rpc::protocol; static std::array quantiles = { 0.5, 0.95, 0.99, 0.999}; class job { public: virtual std::string name() const = 0; virtual future<> run() = 0; virtual void emit_result(YAML::Emitter& out) const = 0; virtual ~job() {} }; class job_rpc : public job { using accumulator_type = accumulator_set>; job_config _cfg; socket_address _caddr; client_config _ccfg; rpc_protocol& _rpc; std::unique_ptr _client; std::function(unsigned)> _call; std::chrono::steady_clock::time_point _stop; uint64_t _total_messages = 0; accumulator_type _latencies; future<> call_echo(unsigned dummy) { auto cln = _rpc.make_client(rpc_verb::ECHO); if (_cfg.timeout) { return cln(*_client, std::chrono::duration_cast(*_cfg.timeout), dummy).discard_result(); } else { return cln(*_client, dummy).discard_result(); } } future<> call_write(unsigned dummy, const payload_t& pl) { return _rpc.make_client(rpc_verb::WRITE)(*_client, pl).then([exp = pl.size()] (auto res) { SEASTAR_ASSERT(res == exp); return make_ready_future<>(); }); } public: job_rpc(job_config cfg, rpc_protocol& rpc, client_config ccfg, socket_address caddr) : _cfg(cfg) , _caddr(std::move(caddr)) , _ccfg(ccfg) , _rpc(rpc) , _stop(std::chrono::steady_clock::now() + _cfg.duration) , _latencies(extended_p_square_probabilities = quantiles) { if (_cfg.verb == "echo") { _call = [this] (unsigned x) { return call_echo(x); }; } else if (_cfg.verb == "write") { payload_t payload; payload.resize(_cfg.payload / sizeof(payload_t::value_type), 0); _call = [this, payload = std::move(payload)] (unsigned x) { return call_write(x, payload); }; } else if (_cfg.verb == "vecho") { _call = [this] (unsigned x) { fmt::print("{}.{} send echo\n", this_shard_id(), x); return call_echo(x).then([x] { fmt::print("{}.{} got response\n", this_shard_id(), x); }).handle_exception([x] (auto ex) { fmt::print("{}.{} got error {}\n", this_shard_id(), x, ex); }); }; } else { throw std::runtime_error("unknown verb"); } } virtual std::string name() const override { return _cfg.name; } virtual future<> run() override { return with_scheduling_group(_cfg.sg, [this] { rpc::client_options co; co.tcp_nodelay = _ccfg.nodelay; co.isolation_cookie = _cfg.sg_name; _client = std::make_unique(_rpc, co, _caddr); return parallel_for_each(std::views::iota(0u, _cfg.parallelism), [this] (auto dummy) { auto f = make_ready_future<>(); if (_cfg.sleep_time) { // Do initial small delay to de-synchronize fibers f = seastar::sleep(std::chrono::duration_cast(*_cfg.sleep_time / _cfg.parallelism * dummy)); } return std::move(f).then([this, dummy] { return do_until([this] { return std::chrono::steady_clock::now() > _stop; }, [this, dummy] { _total_messages++; auto now = std::chrono::steady_clock::now(); return _call(dummy).then([this, start = now] { std::chrono::microseconds lat = std::chrono::duration_cast(std::chrono::steady_clock::now() - start); _latencies(lat.count()); }).then([this] { if (_cfg.sleep_time) { return seastar::sleep(std::chrono::duration_cast(*_cfg.sleep_time)); } else { return make_ready_future<>(); } }); }); }); }).finally([this] { return _client->stop(); }); }); } virtual void emit_result(YAML::Emitter& out) const override { out << YAML::Key << "messages" << YAML::Value << _total_messages; out << YAML::Key << "latencies" << YAML::Comment("usec"); out << YAML::BeginMap; out << YAML::Key << "average" << YAML::Value << (uint64_t)mean(_latencies); for (auto& q: quantiles) { out << YAML::Key << fmt::format("p{}", q) << YAML::Value << (uint64_t)quantile(_latencies, quantile_probability = q); } out << YAML::Key << "max" << YAML::Value << (uint64_t)max(_latencies); out << YAML::EndMap; } }; class job_cpu : public job { job_config _cfg; std::chrono::steady_clock::time_point _stop; uint64_t _total_invocations = 0; std::unique_ptr _pause; std::unique_ptr _sleep; std::unique_ptr make_pause() { if (_cfg.exec_time_range) { return make_uniform_pause(*_cfg.exec_time_range); } else { return make_steady_pause(_cfg.exec_time); } } std::unique_ptr make_sleep() { if (_cfg.sleep_time) { return make_steady_pause(*_cfg.sleep_time); } if (_cfg.sleep_time_range) { return make_uniform_pause(*_cfg.sleep_time_range); } return nullptr; } public: job_cpu(job_config cfg) : _cfg(cfg) , _pause(make_pause()) , _sleep(make_sleep()) { } virtual std::string name() const override { return _cfg.name; } virtual void emit_result(YAML::Emitter& out) const override { out << YAML::Key << "total" << YAML::Value << _total_invocations; } virtual future<> run() override { _stop = std::chrono::steady_clock::now() + _cfg.duration; return with_scheduling_group(_cfg.sg, [this] { return parallel_for_each(std::views::iota(0u, _cfg.parallelism), [this] (auto dummy) { return do_until([this] { return std::chrono::steady_clock::now() > _stop; }, [this] { _total_invocations++; auto start = std::chrono::steady_clock::now(); auto pause = _pause->get(); while ((std::chrono::steady_clock::now() - start) < pause); if (!_sleep) { return make_ready_future<>(); } else { auto sleep = std::chrono::duration_cast(_sleep->get()); return seastar::sleep(sleep); } }); }); }); } }; class context { std::unique_ptr _rpc; std::unique_ptr _server; std::unique_ptr _client; promise<> _bye; promise<> _server_jobs; config _cfg; std::vector> _jobs; std::unordered_map _sched_groups; std::unique_ptr make_job(job_config cfg, std::optional caddr) { if (cfg.type == "rpc") { return std::make_unique(cfg, *_rpc, _cfg.client, *caddr); } if (cfg.type == "cpu") { return std::make_unique(cfg); } throw std::runtime_error("unknown job type"); } future<> run_jobs() { return parallel_for_each(_jobs, [] (auto& job) { return job->run(); }); } rpc::isolation_config isolate_connection(std::string group_name) { rpc::isolation_config cfg; if (group_name != "") { cfg.sched_group = _sched_groups[group_name]; } return cfg; } public: context(std::optional laddr, std::optional caddr, uint16_t port, config cfg, std::unordered_map groups) : _rpc(std::make_unique(serializer{})) , _cfg(cfg) , _sched_groups(std::move(groups)) { _rpc->register_handler(rpc_verb::HELLO, [this] { fmt::print("Got HELLO message from client\n"); run_jobs().discard_result().forward_to(std::move(_server_jobs)); }); _rpc->register_handler(rpc_verb::BYE, [this] { fmt::print("Got BYE message from client, exiting\n"); _bye.set_value(); }); _rpc->register_handler(rpc_verb::ECHO, [] (uint64_t val) { return make_ready_future(val); }); _rpc->register_handler(rpc_verb::WRITE, [] (payload_t val) { return make_ready_future(val.size()); }); if (laddr) { rpc::server_options so; so.tcp_nodelay = _cfg.server.nodelay; rpc::resource_limits limits; limits.isolate_connection = [this] (sstring cookie) { return isolate_connection(cookie); }; _server = std::make_unique(*_rpc, so, *laddr, limits); for (auto&& jc : _cfg.jobs) { if (jc.server) { _jobs.push_back(make_job(jc, {})); } } } if (caddr) { rpc::client_options co; co.tcp_nodelay = _cfg.client.nodelay; _client = std::make_unique(*_rpc, co, *caddr); for (auto&& jc : _cfg.jobs) { if (jc.client) { _jobs.push_back(make_job(jc, *caddr)); } } } } future<> start() { if (_client) { return _rpc->make_client(rpc_verb::HELLO)(*_client); } return make_ready_future<>(); } future<> stop() { if (_client) { return _rpc->make_client(rpc_verb::BYE)(*_client).finally([this] { return _client->stop(); }); } if (_server) { return _server->stop(); } return make_ready_future<>(); } future<> run() { if (_client) { return run_jobs(); } if (_server) { return when_all(_bye.get_future(), _server_jobs.get_future()).discard_result(); } return make_ready_future<>(); } future<> emit_result(YAML::Emitter& out) const { for (const auto& job : _jobs) { out << YAML::Key << job->name(); out << YAML::BeginMap; job->emit_result(out); out << YAML::EndMap; } return make_ready_future<>(); } }; int main(int ac, char** av) { namespace bpo = boost::program_options; app_template app; auto opt_add = app.add_options(); opt_add ("listen", bpo::value()->default_value(""), "address to start server on") ("connect", bpo::value()->default_value(""), "address to connect client to") ("port", bpo::value()->default_value(9123), "port to listen on or connect to") ("conf", bpo::value()->default_value("./conf.yaml"), "config with jobs and options") ("duration", bpo::value()->default_value(30), "duration in seconds") ; sharded ctx; return app.run(ac, av, [&] { return seastar::async([&] { auto& opts = app.configuration(); auto& listen = opts["listen"].as(); auto& connect = opts["connect"].as(); auto& port = opts["port"].as(); auto& conf = opts["conf"].as(); auto duration = std::chrono::seconds(opts["duration"].as()); std::optional laddr; if (listen != "") { if (listen[0] == '.' || listen[0] == '/') { unix_domain_addr addr(listen); laddr.emplace(std::move(addr)); } else { ipv4_addr addr(listen, port); laddr.emplace(std::move(addr)); } } std::optional caddr; if (connect != "") { if (connect[0] == '.' || connect[0] == '/') { unix_domain_addr addr(connect); caddr.emplace(std::move(addr)); } else { ipv4_addr addr(connect, port); caddr.emplace(std::move(addr)); } } YAML::Node doc = YAML::LoadFile(conf); auto cfg = doc.as(); std::unordered_map groups; for (auto&& jc : cfg.jobs) { jc.duration = duration; if (groups.count(jc.sg_name) == 0) { fmt::print("Make sched group {}, {} shares\n", jc.sg_name, jc.shares); groups[jc.sg_name] = create_scheduling_group(jc.sg_name, jc.shares).get(); } jc.sg = groups[jc.sg_name]; } ctx.start(laddr, caddr, port, cfg, groups).get(); ctx.invoke_on_all(&context::start).get(); ctx.invoke_on_all(&context::run).get(); YAML::Emitter out; out << YAML::BeginDoc; out << YAML::BeginSeq; for (unsigned i = 0; i < smp::count; i++) { out << YAML::BeginMap; out << YAML::Key << "shard" << YAML::Value << i; ctx.invoke_on(i, [&out] (auto& c) { return c.emit_result(out); }).get(); out << YAML::EndMap; } out << YAML::EndSeq; out << YAML::EndDoc; std::cout << out.c_str(); ctx.stop().get(); }); }); } seastar-25.05.0/apps/rpc_tester/sample-conf.yaml000066400000000000000000000013651501510432000215260ustar00rootroot00000000000000client: nodelay: # bool, whether or not to set tcp_nodelay option server: nodelay: # bool, whether or not to set tcp_nodelay option jobs: - name: # any parseable string type: rpc verb: # string, one of: echo, vecho, write parallelism: # number of verbs to send simultaneously shares: # sched group shares (100 by default) payload: # number of bytes in the payload for write verb, accepts kB suffix sleep_time: # optional inactivity pause between sending messages timeout: # optional rpc send timeout duration - name: type: cpu execution_time: # time in [0-9]+[mun]?s format sleep_time: # optional inactivity pause between burning cpu side: # optional, 'client' or 'server' to specify which side to run on seastar-25.05.0/apps/seawreck/000077500000000000000000000000001501510432000160635ustar00rootroot00000000000000seastar-25.05.0/apps/seawreck/CMakeLists.txt000066400000000000000000000014501501510432000206230ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # seastar_add_app (seawreck SOURCES seawreck.cc) seastar-25.05.0/apps/seawreck/seawreck.cc000066400000000000000000000220011501510432000201710ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2015 Cloudius Systems, Ltd. */ #include #include #include #include #include #include #include #include using namespace seastar; template void http_debug(const char* fmt, Args&&... args) { #if HTTP_DEBUG print(fmt, std::forward(args)...); #endif } class http_client { private: unsigned _duration; unsigned _conn_per_core; unsigned _reqs_per_conn; std::vector _sockets; semaphore _conn_connected{0}; semaphore _conn_finished{0}; timer<> _run_timer; bool _timer_based; bool _timer_done{false}; uint64_t _total_reqs{0}; public: http_client(unsigned duration, unsigned total_conn, unsigned reqs_per_conn) : _duration(duration) , _conn_per_core(total_conn / smp::count) , _reqs_per_conn(reqs_per_conn) , _run_timer([this] { _timer_done = true; }) , _timer_based(reqs_per_conn == 0) { } class connection { private: connected_socket _fd; input_stream _read_buf; output_stream _write_buf; http_response_parser _parser; http_client* _http_client; uint64_t _nr_done{0}; public: connection(connected_socket&& fd, http_client* client) : _fd(std::move(fd)) , _read_buf(_fd.input()) , _write_buf(_fd.output()) , _http_client(client){ } uint64_t nr_done() { return _nr_done; } future<> do_req() { return _write_buf.write("GET / HTTP/1.1\r\nHost: 127.0.0.1:10000\r\n\r\n").then([this] { return _write_buf.flush(); }).then([this] { _parser.init(); return _read_buf.consume(_parser).then([this] { // Read HTTP response header first if (_parser.eof()) { return make_ready_future<>(); } auto _rsp = _parser.get_parsed_response(); auto it = _rsp->_headers.find("Content-Length"); if (it == _rsp->_headers.end()) { fmt::print("Error: HTTP response does not contain: Content-Length\n"); return make_ready_future<>(); } auto content_len = std::stoi(it->second); http_debug("Content-Length = %d\n", content_len); // Read HTTP response body return _read_buf.read_exactly(content_len).then([this] (temporary_buffer buf) { _nr_done++; http_debug("%s\n", buf.get()); if (_http_client->done(_nr_done)) { return make_ready_future(); } else { return do_req(); } }); }); }); } }; future total_reqs() { fmt::print("Requests on cpu {:2d}: {:d}\n", this_shard_id(), _total_reqs); return make_ready_future(_total_reqs); } bool done(uint64_t nr_done) { if (_timer_based) { return _timer_done; } else { return nr_done >= _reqs_per_conn; } } future<> connect(ipv4_addr server_addr) { // Establish all the TCP connections first for (unsigned i = 0; i < _conn_per_core; i++) { // Connect in the background, signal _conn_connected when done. (void)seastar::connect(make_ipv4_address(server_addr)).then([this] (connected_socket fd) { _sockets.push_back(std::move(fd)); http_debug("Established connection %6d on cpu %3d\n", _conn_connected.current(), this_shard_id()); _conn_connected.signal(); }).or_terminate(); } return _conn_connected.wait(_conn_per_core); } future<> run() { // All connected, start HTTP request http_debug("Established all %6d tcp connections on cpu %3d\n", _conn_per_core, this_shard_id()); if (_timer_based) { _run_timer.arm(std::chrono::seconds(_duration)); } for (auto&& fd : _sockets) { auto conn = new connection(std::move(fd), this); // Run in the background, signal _conn_finished when done. (void)conn->do_req().then_wrapped([this, conn] (auto&& f) { http_debug("Finished connection %6d on cpu %3d\n", _conn_finished.current(), this_shard_id()); _total_reqs += conn->nr_done(); _conn_finished.signal(); delete conn; // FIXME: should _conn_finished.signal be called only after this? // nothing seems to synchronize with this background work. try { f.get(); } catch (std::exception& ex) { fmt::print("http request error: {}\n", ex.what()); } }); } // All finished return _conn_finished.wait(_conn_per_core); } future<> stop() { return make_ready_future(); } }; namespace bpo = boost::program_options; int main(int ac, char** av) { app_template::config app_cfg; app_cfg.auto_handle_sigint_sigterm = false; app_template app(std::move(app_cfg)); app.add_options() ("server,s", bpo::value()->default_value("192.168.66.100:10000"), "Server address") ("conn,c", bpo::value()->default_value(100), "total connections") ("reqs,r", bpo::value()->default_value(0), "reqs per connection") ("duration,d", bpo::value()->default_value(10), "duration of the test in seconds)"); return app.run(ac, av, [&app] () -> future { auto& config = app.configuration(); auto server = config["server"].as(); auto reqs_per_conn = config["reqs"].as(); auto total_conn= config["conn"].as(); auto duration = config["duration"].as(); if (total_conn % smp::count != 0) { fmt::print("Error: conn needs to be n * cpu_nr\n"); return make_ready_future(-1); } auto http_clients = new distributed; // Start http requests on all the cores auto started = steady_clock_type::now(); fmt::print("========== http_client ============\n"); fmt::print("Server: {}\n", server); fmt::print("Connections: {:d}\n", total_conn); fmt::print("Requests/connection: {}\n", reqs_per_conn == 0 ? "dynamic (timer based)" : std::to_string(reqs_per_conn)); return http_clients->start(std::move(duration), std::move(total_conn), std::move(reqs_per_conn)).then([http_clients, server] { return http_clients->invoke_on_all(&http_client::connect, ipv4_addr{server}); }).then([http_clients] { return http_clients->invoke_on_all(&http_client::run); }).then([http_clients] { return http_clients->map_reduce(adder(), &http_client::total_reqs); }).then([http_clients, started] (auto total_reqs) { // All the http requests are finished auto finished = steady_clock_type::now(); auto elapsed = finished - started; auto secs = static_cast(elapsed.count() / 1000000000.0); fmt::print("Total cpus: {:d}\n", smp::count); fmt::print("Total requests: {:d}\n", total_reqs); fmt::print("Total time: {:f}\n", secs); fmt::print("Requests/sec: {:f}\n", static_cast(total_reqs) / secs); fmt::print("========== done ============\n"); return http_clients->stop().then([http_clients] { // FIXME: If we call engine().exit(0) here to exit when // requests are done. The tcp connection will not be closed // properly, becasue we exit too earily and the FIN packets are // not exchanged. delete http_clients; return make_ready_future(0); }); }); }); } seastar-25.05.0/build.ninja000066400000000000000000000002641501510432000154360ustar00rootroot00000000000000rule display_help command = echo 'Error: Execute Ninja in a build directory: `ninja -C build/release`' description = Help the poor user build help: display_help default help seastar-25.05.0/cmake/000077500000000000000000000000001501510432000143745ustar00rootroot00000000000000seastar-25.05.0/cmake/CheckGcc107852.cmake000066400000000000000000000012321501510432000175150ustar00rootroot00000000000000include (CheckCXXSourceCompiles) include (CMakePushCheckState) cmake_push_check_state (RESET) # these options are included by -Wall, which is in turn included by # Seastar_PRIVATE_CXX_FLAGS, which is not applied to CMAKE_CXX_FLAGS, so # let's apply them explicitly. set (CMAKE_REQUIRED_FLAGS "-Werror=stringop-overflow -Werror=array-bound") set (CMAKE_REQUIRED_LIBRARIES fmt::fmt) # see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107852 check_cxx_source_compiles (" #include int main() { float grades[] = {3.14}; fmt::print(\"{}\", grades); }" Cxx_Compiler_BZ107852_Free FAIL_REGEX "is out of the bounds") cmake_pop_check_state () seastar-25.05.0/cmake/CheckHeaders.cmake000066400000000000000000000111371501510432000177120ustar00rootroot00000000000000# seastar_check_self_contained() checks if the headers listed as the source of # a target are self-contained. # # Header files should be self-contained. In general, the source files should # not have to adhere to special conditions to include them. For instance, # they don't need to include other header files for using a header file, or # to define certain macro(s) for using it. But the macros are allowed to be # used to its behavior though. # # seastar_check_self_contained() is created to perform a minimal check on the # specified set of header files by compiling each of them. # # Please note, there are chances that a symbol declaration could be included # indirectly by an (indirectly) included header file even if that symbol is not # a part of the public interface of that header file, so this dependency is # a little bit fragile. seastar_check_self_contained()" does not warn at seeing # the indirect dependency, it just check if the preprocessed header file # *contains* the declarations of the symbols so that any source file including # it can be compiled as well. The check performed by the CMake function allows # the indirect inclusion of the used symbols. For instance, if "juno.h" # references a symbol named "Jupiter", which is declared in "jupiter.h". So, # strictly speaking, "juno.h" should include "jupiter.h" for accessing this # symbol. But "juno.h" happens to include "solar.h", which in turn includes # "jupiter.h". So "seastar_check_self_contained()" accepts "juno.h", while a # tool like iwyu would complain at seeing it. # # You can also use CMAKE_CXX_INCLUDE_WHAT_YOU_USE for using an external tool # for performing a similar check. see # https://cmake.org/cmake/help/latest/prop_tgt/LANG_INCLUDE_WHAT_YOU_USE.html function (seastar_check_self_contained target library) cmake_parse_arguments ( parsed_args "" "" "EXCLUDE;INCLUDE" ${ARGN}) get_target_property (sources ${library} SOURCES) list (FILTER sources INCLUDE REGEX "${parsed_args_INCLUDE}") list (FILTER sources EXCLUDE REGEX "${parsed_args_EXCLUDE}") foreach (fn ${sources}) get_filename_component (file_ext ${fn} EXT) if (NOT file_ext STREQUAL ".hh") message (SEND_ERROR "Only headers are checked if they are self-contained, while ${fn} is not a header.") elseif (IS_ABSOLUTE ${fn}) # the header specified with absolute path is likely to be generated, this # is not our focus at this moment. continue () endif () get_filename_component (file_dir ${fn} DIRECTORY) list (APPEND includes "${file_dir}") set (src_dir "${CMAKE_BINARY_DIR}/${target}/${file_dir}") file (MAKE_DIRECTORY "${src_dir}") get_filename_component (file_name ${fn} NAME) set (src "${src_dir}/${file_name}.cc") # CMake refuses to compile .hh files, so we need to rename them first. add_custom_command ( OUTPUT ${src} DEPENDS ${fn} # silence "-Wpragma-once-outside-header" COMMAND sed -e "s/^#pragma once//" "${fn}" > "${src}" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" VERBATIM) list (APPEND srcs "${src}") endforeach () if (NOT srcs) # library's SOURCES does not contain any header return () endif () set (check_lib "${target}-${library}") add_library (${check_lib} EXCLUDE_FROM_ALL) target_sources (${check_lib} PRIVATE ${srcs}) # use ${library} as an interface library by consuming all of its # compile time options get_target_property (libraries ${library} LINK_LIBRARIES) if (libraries) target_link_libraries (${check_lib} PRIVATE ${libraries}) endif () # if header includes other header files with relative path, # we should satisfy it. list (REMOVE_DUPLICATES includes) target_include_directories (${check_lib} PRIVATE ${includes}) get_target_property (includes ${library} INCLUDE_DIRECTORIES) if (includes) target_include_directories (${check_lib} PRIVATE ${includes}) endif () get_target_property (compile_options ${library} COMPILE_OPTIONS) if (compile_options) target_compile_options (${check_lib} PRIVATE ${compile_options}) endif () # symbols in header file should always be referenced, but these # are just pure headers, so unused variables should be tolerated. target_compile_options (${check_lib} PRIVATE -Wno-unused-const-variable -Wno-unused-function -Wno-unused-variable) get_target_property (compile_definitions ${library} COMPILE_DEFINITIONS) if (compile_definitions) target_compile_definitions (${check_lib} PRIVATE ${compile_definitions}) endif () add_dependencies (${target} ${check_lib}) endfunction () seastar-25.05.0/cmake/CheckIncludeStyle.cmake000066400000000000000000000013551501510432000207440ustar00rootroot00000000000000# seastar_check_include_style() enforces that all source and header files under # specified directories include the headers with predefined list of prefixes # with angle brackets instead of quotes. find_package (Python3 COMPONENTS Interpreter) function (seastar_check_include_style target library) get_target_property (sources ${library} SOURCES) set (check-target "${target}-${library}") add_custom_target("${check-target}" COMMAND Python3::Interpreter ${CMAKE_CURRENT_LIST_DIR}/cmake/check-seastar-include-style.py ${sources} WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}" COMMENT "Checking include directive styles for ${library} source files" USES_TERMINAL) add_dependencies (${target} ${check-target}) endfunction () seastar-25.05.0/cmake/CheckLibc.cmake000066400000000000000000000024151501510432000172070ustar00rootroot00000000000000# check for the bits in different standard C library implementations we # care about include (CheckCXXSourceCompiles) file (READ ${CMAKE_CURRENT_LIST_DIR}/code_tests/stdout_test.cc _stdout_test_code) check_cxx_source_compiles ("${_stdout_test_code}" Stdout_Can_Be_Used_As_Identifier) if (Stdout_Can_Be_Used_As_Identifier) # "stdout" is defined as a macro by the C++ standard, so we cannot assume # that the macro is always expanded into an identifier which can be re-used # to name a enumerator in the declaration of an enumeration. target_compile_definitions (seastar PUBLIC SEASTAR_LOGGER_TYPE_STDOUT) endif () check_cxx_source_compiles (" #include int main() { char buf; char* a = strerror_r(1, &buf, 0); static_cast(a); }" Strerror_R_Returns_Char_P) if (Strerror_R_Returns_Char_P) # define SEASTAR_STRERROR_R_CHAR_P if strerror_r() is GNU-specific version, # which returns a "char*" not "int". target_compile_definitions (seastar PRIVATE SEASTAR_STRERROR_R_CHAR_P) endif () include (CheckFunctionExists) check_function_exists (pthread_attr_setaffinity_np Pthread_Attr_Setaffinity_Np) if (Pthread_Attr_Setaffinity_Np) target_compile_definitions (seastar PRIVATE SEASTAR_PTHREAD_ATTR_SETAFFINITY_NP) endif () seastar-25.05.0/cmake/CheckP2582R1.cmake000066400000000000000000000010621501510432000172560ustar00rootroot00000000000000include (CheckCXXSourceCompiles) include (CMakePushCheckState) cmake_push_check_state (RESET) set (CMAKE_REQUIRED_FLAGS "-std=c++23") # check if the compiler implements the inherited vs non-inherited guide # tiebreaker specified by P2582R1, see https://wg21.link/P2582R1 check_cxx_source_compiles (" template struct B { B(T...) {} }; template struct C : public B { using B::B; C(B) {} }; B(int) -> B; C c2(42); int main() {} " Cxx_Compiler_IMPLEMENTS_P2581R1) cmake_pop_check_state () seastar-25.05.0/cmake/CxxModulesRules.cmake000066400000000000000000000020071501510432000205030ustar00rootroot00000000000000if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 16) message (FATAL_ERROR "C++20 module needs Clang++-16 or up") endif () elseif (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 14) message (FATAL_ERROR "C++20 module needs g++-14 or up") endif () else () message (FATAL_ERROR "Unsupported compiler: ${CMAKE_CXX_COMPILER_ID}") endif () if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.28) # CMake 3.28 has official support of C++20 modules elseif (CMAKE_VERSION VERSION_GREATER_EQUAL 3.27) set (CMAKE_EXPERIMENTAL_CXX_MODULE_CMAKE_API "aa1f7df0-828a-4fcd-9afc-2dc80491aca7") elseif (CMAKE_VERSION VERSION_GREATER_EQUAL 3.26) set (CMAKE_EXPERIMENTAL_CXX_SCANDEP_SOURCE "") set (CMAKE_EXPERIMENTAL_CXX_MODULE_DYNDEP 1) set (CMAKE_EXPERIMENTAL_CXX_MODULE_CMAKE_API "2182bf5c-ef0d-489a-91da-49dbc3090d2a") endif () set (CMAKE_CXX_STANDARD_REQUIRED ON) # C++ extension does work with C++ module support so far set (CMAKE_CXX_EXTENSIONS OFF) seastar-25.05.0/cmake/FindGnuTLS.cmake000066400000000000000000000032241501510432000173140ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # find_package (PkgConfig REQUIRED) pkg_check_modules (PC_GnuTLS QUIET gnutls) find_library (GnuTLS_LIBRARY NAMES gnutls HINTS ${PC_GnuTLS_LIBDIR} ${PC_GnuTLS_LIBRARY_DIRS}) find_path (GnuTLS_INCLUDE_DIR NAMES gnutls/gnutls.h HINTS ${PC_GnuTLS_INCLUDEDIR} ${PC_GnuTLS_INCLUDE_DIRS}) mark_as_advanced ( GnuTLS_LIBRARY GnuTLS_INCLUDE_DIR) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (GnuTLS REQUIRED_VARS GnuTLS_LIBRARY GnuTLS_INCLUDE_DIR VERSION_VAR PC_GnuTLS_VERSION) if (GnuTLS_FOUND) set (GnuTLS_LIBRARIES ${GnuTLS_LIBRARY}) set (GnuTLS_INCLUDE_DIRS ${GnuTLS_INCLUDE_DIR}) if (NOT (TARGET GnuTLS::gnutls)) add_library (GnuTLS::gnutls UNKNOWN IMPORTED) set_target_properties (GnuTLS::gnutls PROPERTIES IMPORTED_LOCATION ${GnuTLS_LIBRARY} INTERFACE_INCLUDE_DIRECTORIES ${GnuTLS_INCLUDE_DIRS}) endif () endif () seastar-25.05.0/cmake/FindLibUring.cmake000066400000000000000000000037531501510432000177220ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2022 ScyllaDB # find_package (PkgConfig REQUIRED) pkg_check_modules (PC_URING QUIET liburing) find_library (URING_LIBRARY NAMES uring HINTS ${PC_URING_LIBDIR} ${PC_URING_LIBRARY_DIRS}) find_path (URING_INCLUDE_DIR NAMES liburing.h HINTS ${PC_URING_INCLUDEDIR} ${PC_URING_INCLUDE_DIRS}) if (URING_INCLUDE_DIR) include (CheckStructHasMember) include (CMakePushCheckState) cmake_push_check_state (RESET) list(APPEND CMAKE_REQUIRED_INCLUDES ${URING_INCLUDE_DIR}) CHECK_STRUCT_HAS_MEMBER ("struct io_uring" features liburing.h HAVE_IOURING_FEATURES LANGUAGE CXX) cmake_pop_check_state () endif () mark_as_advanced ( URING_LIBRARY URING_INCLUDE_DIR HAVE_IOURING_FEATURES) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (LibUring REQUIRED_VARS URING_LIBRARY URING_INCLUDE_DIR HAVE_IOURING_FEATURES VERSION_VAR PC_URING_VERSION) if (LibUring_FOUND) set (URING_LIBRARIES ${URING_LIBRARY}) set (URING_INCLUDE_DIRS ${URING_INCLUDE_DIR}) if (NOT (TARGET URING::uring)) add_library (URING::uring UNKNOWN IMPORTED) set_target_properties (URING::uring PROPERTIES IMPORTED_LOCATION ${URING_LIBRARY} INTERFACE_INCLUDE_DIRECTORIES ${URING_INCLUDE_DIRS}) endif () endif () seastar-25.05.0/cmake/FindLinuxMembarrier.cmake000066400000000000000000000026261501510432000213120ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # find_path (LinuxMembarrier_INCLUDE_DIR NAMES linux/membarrier.h) include (CheckCXXSourceCompiles) file (READ ${CMAKE_CURRENT_LIST_DIR}/code_tests/LinuxMembarrier_test.cc _linuxmembarrier_test_code) check_cxx_source_compiles ("${_linuxmembarrier_test_code}" LinuxMembarrier_FOUND) if (LinuxMembarrier_FOUND) set (LinuxMembarrier_INCLUDE_DIRS ${LinuxMembarrier_INCLUDE_DIR}) endif () if (LinuxMembarrier_FOUND AND NOT (TARGET LinuxMembarrier::membarrier)) add_library (LinuxMembarrier::membarrier INTERFACE IMPORTED) set_target_properties (LinuxMembarrier::membarrier PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${LinuxMembarrier_INCLUDE_DIRS}) endif () seastar-25.05.0/cmake/FindPthreadSetName.cmake000066400000000000000000000006261501510432000210470ustar00rootroot00000000000000include (CheckSymbolExists) include (CMakePushCheckState) cmake_push_check_state (RESET) set (CMAKE_REQUIRED_FLAGS "-pthread") set (CMAKE_REQUIRED_DEFINITIONS "-D_GNU_SOURCE") check_symbol_exists (pthread_setname_np pthread.h HAVE_PTHREAD_SETNAME_NP) cmake_pop_check_state () find_package_handle_standard_args (PthreadSetName FOUND_VAR PthreadSetName_FOUND REQUIRED_VARS HAVE_PTHREAD_SETNAME_NP) seastar-25.05.0/cmake/FindSanitizers.cmake000066400000000000000000000054221501510432000203350ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # cmake_policy (PUSH) cmake_policy (SET CMP0057 NEW) if(NOT Sanitizers_FIND_COMPONENTS) set(Sanitizers_FIND_COMPONENTS address undefined_behavior) endif() foreach (component ${Sanitizers_FIND_COMPONENTS}) string (TOUPPER ${component} COMPONENT) set (compile_options "Sanitizers_${COMPONENT}_COMPILE_OPTIONS") if (component STREQUAL "address") list (APPEND ${compile_options} -fsanitize=address) elseif (component STREQUAL "undefined_behavior") list (APPEND ${compile_options} -fsanitize=undefined) else () message (FATAL_ERROR "Unsupported sanitizer: ${component}") endif () list(APPEND Sanitizers_COMPILE_OPTIONS "${${compile_options}}") unset (compile_options) endforeach () include(CheckCXXSourceCompiles) include(CMakePushCheckState) # -fsanitize=address cannot be combined with -fsanitize=thread, so let's test # the combination of the compiler options. cmake_push_check_state() string (REPLACE ";" " " CMAKE_REQUIRED_FLAGS "${Sanitizers_COMPILE_OPTIONS}") check_cxx_source_compiles("int main() {}" Sanitizers_SUPPORTED) if (Sanitizers_SUPPORTED) if ("address" IN_LIST Sanitizers_FIND_COMPONENTS) file (READ ${CMAKE_CURRENT_LIST_DIR}/code_tests/Sanitizers_fiber_test.cc _sanitizers_fiber_test_code) check_cxx_source_compiles ("${_sanitizers_fiber_test_code}" Sanitizers_FIBER_SUPPORT) endif () endif () cmake_pop_check_state() include (FindPackageHandleStandardArgs) find_package_handle_standard_args (Sanitizers REQUIRED_VARS Sanitizers_COMPILE_OPTIONS Sanitizers_SUPPORTED) if (Sanitizers_FOUND) foreach (component ${Sanitizers_FIND_COMPONENTS}) string (TOUPPER ${component} COMPONENT) set (library Sanitizers::${component}) if (NOT TARGET ${library}) add_library (${library} INTERFACE IMPORTED) set_target_properties (${library} PROPERTIES INTERFACE_COMPILE_OPTIONS "${Sanitizers_${COMPONENT}_COMPILE_OPTIONS}" INTERFACE_LINK_LIBRARIES "${Sanitizers_${COMPONENT}_COMPILE_OPTIONS}") endif () endforeach () endif () cmake_policy (POP) seastar-25.05.0/cmake/FindSourceLocation.cmake000066400000000000000000000036061501510432000211350ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2022 Kefu Chai ( tchaikov@gmail.com ) # include (CheckCXXSourceCompiles) include (CheckCXXSourceRuns) include (CMakePushCheckState) cmake_push_check_state () file (READ ${CMAKE_CURRENT_LIST_DIR}/code_tests/Source_location_test.cc _source_location_test_code) set(CMAKE_REQUIRED_FLAGS "${CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION}") check_cxx_source_compiles ("${_source_location_test_code}" CxxSourceLocation_SUPPORTED) file (READ ${CMAKE_CURRENT_LIST_DIR}/code_tests/Source_location_default_argument.cc _source_location_test_code) set(CMAKE_REQUIRED_FLAGS "${CMAKE_CXX${CMAKE_CXX_STANDARD}_STANDARD_COMPILE_OPTION}") # see also https://cplusplus.github.io/CWG/issues/2631.html check_cxx_source_runs ("${_source_location_test_code}" CxxSourceLocation_IMPLEMENTS_CWG2631) cmake_pop_check_state () if (NOT (TARGET SourceLocation::source_location)) add_library (SourceLocation::source_location INTERFACE IMPORTED) if ((NOT CxxSourceLocation_SUPPORTED) OR (NOT CxxSourceLocation_IMPLEMENTS_CWG2631)) set_target_properties (SourceLocation::source_location PROPERTIES INTERFACE_COMPILE_DEFINITIONS SEASTAR_BROKEN_SOURCE_LOCATION) endif () endif () seastar-25.05.0/cmake/FindStdAtomic.cmake000066400000000000000000000030501501510432000200640ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2019 Scylladb, Ltd. # function (_stdatomic_can_link var) include (CheckCXXSourceCompiles) set (test_code "int main() {}") set (CMAKE_REQUIRED_LIBRARIES -latomic) check_cxx_source_compiles ("${test_code}" ${var}) endfunction () _stdatomic_can_link (StdAtomic_EXPLICIT_LINK) # # If linking against `-latomic` is successful, then do it unconditionally. # if (StdAtomic_EXPLICIT_LINK) set (StdAtomic_LIBRARY_NAME atomic) set (StdAtomic_LIBRARIES -l${StdAtomic_LIBRARY_NAME}) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (StdAtomic REQUIRED_VARS StdAtomic_LIBRARIES) endif () if (NOT (TARGET StdAtomic::atomic)) add_library (StdAtomic::atomic INTERFACE IMPORTED) set_target_properties (StdAtomic::atomic PROPERTIES INTERFACE_LINK_LIBRARIES "${StdAtomic_LIBRARIES}") endif () seastar-25.05.0/cmake/FindSystemTap-SDT.cmake000066400000000000000000000022721501510432000205630ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2023 Scylladb, Ltd. # find_path (SystemTap-SDT_INCLUDE_DIR NAMES sys/sdt.h) mark_as_advanced ( SystemTap-SDT_INCLUDE_DIR) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (SystemTap-SDT REQUIRED_VARS SystemTap-SDT_INCLUDE_DIR) if (NOT TARGET SystemTap::SDT) add_library (SystemTap::SDT INTERFACE IMPORTED) set_target_properties (SystemTap::SDT PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${SystemTap-SDT_INCLUDE_DIR}) endif () seastar-25.05.0/cmake/FindValgrind.cmake000066400000000000000000000027131501510432000177500ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2022 Kefu Chai ( tchaikov@gmail.com ) # find_package (PkgConfig REQUIRED) pkg_check_modules (PC_valgrind QUIET valgrind) find_path (Valgrind_INCLUDE_DIR NAMES valgrind/valgrind.h HINTS ${PC_valgrind_INCLUDEDIR} ${PC_valgrind_INCLUDE_DIRS}) mark_as_advanced ( Valgrind_INCLUDE_DIR) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (Valgrind REQUIRED_VARS Valgrind_INCLUDE_DIR) if (Valgrind_FOUND) set (Valgrind_INCLUDE_DIRS ${Valgrind_INCLUDE_DIR}) if (NOT (TARGET Valgrind::valgrind)) add_library (Valgrind::valgrind INTERFACE IMPORTED) set_target_properties (Valgrind::valgrind PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${Valgrind_INCLUDE_DIRS}) endif () endif () seastar-25.05.0/cmake/Findc-ares.cmake000066400000000000000000000041371501510432000173560ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # find_package (PkgConfig REQUIRED) pkg_check_modules (PC_c-ares QUIET libcares) find_library (c-ares_LIBRARY NAMES cares HINTS ${PC_c-ares_LIBDIR} ${PC_c-ares_LIBRARY_DIRS}) find_path (c-ares_INCLUDE_DIR NAMES ares_dns.h HINTS ${PC_c-ares_INCLUDEDIR} ${PC_c-ares_INCLUDE_DIRS}) if (c-ares_INCLUDE_DIR) foreach (v MAJOR MINOR PATCH) file(STRINGS "${c-ares_INCLUDE_DIR}/ares_version.h" ares_VERSION_LINE REGEX "^#define[ \t]+ARES_VERSION_${v}[ \t]+[0-9]+$") if (ares_VERSION_LINE MATCHES "ARES_VERSION_${v} ([0-9]+)") set (c-ares_VERSION_${v} "${CMAKE_MATCH_1}") endif () unset (ares_VERSION_LINE) endforeach () set (c-ares_VERSION ${c-ares_VERSION_MAJOR}.${c-ares_VERSION_MINOR}.${c-ares_VERSION_PATCH}) endif () mark_as_advanced ( c-ares_LIBRARY c-ares_INCLUDE_DIR) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (c-ares REQUIRED_VARS c-ares_LIBRARY c-ares_INCLUDE_DIR VERSION_VAR c-ares_VERSION) if (c-ares_FOUND) set (c-ares_LIBRARIES ${c-ares_LIBRARY}) set (c-ares_INCLUDE_DIRS ${c-ares_INCLUDE_DIR}) if (NOT (TARGET c-ares::cares)) add_library (c-ares::cares UNKNOWN IMPORTED) set_target_properties (c-ares::cares PROPERTIES IMPORTED_LOCATION ${c-ares_LIBRARY} INTERFACE_INCLUDE_DIRECTORIES ${c-ares_INCLUDE_DIRS}) endif () endif () seastar-25.05.0/cmake/Finddpdk.cmake000066400000000000000000000145641501510432000171330ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # find_package (PkgConfig REQUIRED) pkg_check_modules (dpdk_PC libdpdk) # we cannot use ${dpdk_PC_STATIC_LDFLAGS} directly, because we want to # export DPDK as a bundle of static libraries, so need to find the # individual paths to all .a files find_path (dpdk_INCLUDE_DIR NAMES rte_atomic.h HINTS ${dpdk_PC_INCLUDE_DIRS} PATH_SUFFIXES dpdk) if (dpdk_INCLUDE_DIR AND EXISTS "${dpdk_INCLUDE_DIR}/rte_build_config.h") file (STRINGS "${dpdk_INCLUDE_DIR}/rte_build_config.h" rte_mbuf_refcnt_atomic REGEX "^#define[ \t ]+RTE_MBUF_REFCNT_ATOMIC") if (rte_mbuf_refcnt_atomic) message (WARNING "DPDK is configured with RTE_MBUF_REFCNT_ATOMIC enabled, " "please disable this option and recompile DPDK for better performance.") endif () endif () set(rte_libs bus_pci bus_vdev cfgfile cmdline cryptodev eal ethdev hash kvargs mbuf mempool mempool_ring net net_bnxt net_cxgbe net_e1000 net_ena net_enic net_i40e net_ixgbe net_nfp net_qede net_ring net_sfc net_vmxnet3 pci rcu ring security telemetry timer) # sfc_efx driver can only build on x86 and aarch64 if (CMAKE_SYSTEM_PROCESSOR MATCHES "amd64|x86_64|aarch64") list (APPEND rte_libs common_sfc_efx) endif () list (APPEND dpdk_REQUIRED dpdk_INCLUDE_DIR) # we prefer static library over the shared library, so just find the # static libraries first. set (_cmake_find_library_suffixes_saved ${CMAKE_FIND_LIBRARY_SUFFIXES}) set (CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_STATIC_LIBRARY_SUFFIX} ${CMAKE_SHARED_LIBRARY_SUFFIX}) foreach (lib ${rte_libs}) string(TOUPPER ${lib} upper_lib) set(library_name "dpdk_${upper_lib}_LIBRARY") find_library (${library_name} NAME rte_${lib} HINTS ${dpdk_PC_STATIC_LIBRARY_DIRS}) list (APPEND dpdk_REQUIRED ${library_name}) list (APPEND dpdk_LIBRARIES ${library_name}) if (NOT ${library_name}) continue() endif () set (library_path ${${library_name}}) list (APPEND _dpdk_linker_files ${library_path}) set (dpdk_lib dpdk::${lib}) list (APPEND _dpdk_libraries ${dpdk_lib}) if (dpdk_INCLUDE_DIR AND NOT (TARGET ${dpdk_lib})) add_library (${dpdk_lib} UNKNOWN IMPORTED) set_target_properties (${dpdk_lib} PROPERTIES IMPORTED_LOCATION ${library_path} INTERFACE_INCLUDE_DIRECTORIES ${dpdk_INCLUDE_DIR}) endif () endforeach () # restore the previous saved suffixes set (CMAKE_FIND_LIBRARY_SUFFIXES ${_cmake_find_library_suffixes_saved}) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (dpdk REQUIRED_VARS ${dpdk_REQUIRED}) # DPDK's build system adds certain dependencies conditionally based on what's available # at build time. While most libraries from dpdk_PC_LIBRARIES are handled through the # rte_libs logic elsewhere, external dependencies ('bsd' and 'numa' in this case) are # explicitly handled below. This foreach loop checks if these specific libraries are # present in dpdk_PC_LIBRARIES and adds them to the dpdk_dependencies list if found. foreach (lib "bsd" "numa") if (lib IN_LIST dpdk_PC_STATIC_LIBRARIES) list (APPEND dpdk_dependencies ${lib}) endif() endforeach () # As of DPDK 23.07, if libarchive-dev is present, it will make DPDK depend on the library. # Unfortunately DPDK also has a bug in its .pc file generation and will not include libarchive # dependency under any circumstance. Accordingly, the dependency is added explicitly if libarchive # exists. pkg_check_modules (libarchive_PC QUIET libarchive) list(APPEND dpdk_dependencies ${libarchive_PC_LIBRARIES}) if (dpdk_FOUND AND NOT (TARGET dpdk)) get_filename_component (library_suffix "${dpdk_EAL_LIBRARY}" LAST_EXT) # strictly speaking, we should have being using check_c_compiler_flag() # here, but we claim Seastar as a project written in CXX language, and # C is not enabled, so CXX is used here instead. include(CheckCXXCompilerFlag) check_cxx_compiler_flag("-Wno-volatile" _warning_supported_volatile) if(_warning_supported_volatile) # include/generic/rte_spinlock.h increments volatiled-qualified type with # "++". but this is deprecated by GCC, so silence it. set(compile_options INTERFACE_COMPILE_OPTIONS "-Wno-volatile") endif() if (library_suffix STREQUAL CMAKE_STATIC_LIBRARY_SUFFIX) # No pmd driver code will be pulled in without "--whole-archive". To # avoid exposing that to seastar users, combine dpdk into a single # .o file. set (dpdk_object_path "${CMAKE_BINARY_DIR}/dpdk.o") add_custom_command ( OUTPUT ${dpdk_object_path} COMMAND ${CMAKE_CXX_COMPILER} -r # create a relocatable object -o ${dpdk_object_path} -Wl,--whole-archive ${_dpdk_linker_files} DEPENDS ${_dpdk_linker_files}) add_custom_target (dpdk_object DEPENDS ${dpdk_object_path}) add_library (dpdk OBJECT IMPORTED) add_dependencies (dpdk dpdk_object) set_target_properties (dpdk PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${dpdk_INCLUDE_DIR} INTERFACE_LINK_LIBRARIES "${dpdk_dependencies}" IMPORTED_OBJECTS ${dpdk_object_path} ${compile_options}) # we include dpdk in seastar already, but we need to pull in the # dependency libraries linked by dpdk list(TRANSFORM dpdk_dependencies PREPEND "-l" OUTPUT_VARIABLE dpdk_LIBRARIES) add_library (DPDK::dpdk ALIAS dpdk) else () set (dpdk_LIBRARIES ${dpdk_PC_LDFLAGS}) add_library (DPDK::dpdk INTERFACE IMPORTED) set_target_properties (DPDK::dpdk PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${dpdk_PC_INCLUDE_DIRS}" INTERFACE_LINK_LIBRARIES "${_dpdk_libraries};${dpdk_dependencies}" ${compile_options}) endif() endif () seastar-25.05.0/cmake/Findhwloc.cmake000066400000000000000000000031551501510432000173170ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # find_package (PkgConfig REQUIRED) pkg_search_module (PC_hwloc QUIET hwloc) find_library (hwloc_LIBRARY NAMES hwloc HINTS ${PC_hwloc_LIBDIR} ${PC_hwloc_LIBRARY_DIRS}) find_path (hwloc_INCLUDE_DIR NAMES hwloc.h HINTS ${PC_hwloc_INCLUDEDIR} ${PC_hwloc_INCLUDE_DIRS}) mark_as_advanced ( hwloc_LIBRARY hwloc_INCLUDE_DIR) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (hwloc REQUIRED_VARS hwloc_LIBRARY hwloc_INCLUDE_DIR VERSION_VAR hwloc_VERSION) if (hwloc_FOUND) set (hwloc_LIBRARIES ${hwloc_LIBRARY}) set (hwloc_INCLUDE_DIRS ${hwloc_INCLUDE_DIR}) if (NOT (TARGET hwloc::hwloc)) add_library (hwloc::hwloc UNKNOWN IMPORTED) set_target_properties (hwloc::hwloc PROPERTIES IMPORTED_LOCATION ${hwloc_LIBRARY} INTERFACE_INCLUDE_DIRECTORIES ${hwloc_INCLUDE_DIRS}) endif () endif () seastar-25.05.0/cmake/Findlksctp-tools.cmake000066400000000000000000000031101501510432000206300ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # find_library (lksctp-tools_LIBRARY NAMES sctp) find_path (lksctp-tools_INCLUDE_DIR NAMES netinet/sctp.h) mark_as_advanced ( lksctp-tools_LIBRARY lksctp-tools_INCLUDE_DIR) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (lksctp-tools REQUIRED_VARS lksctp-tools_LIBRARY lksctp-tools_INCLUDE_DIR) set (lksctp-tools_LIBRARIES ${lksctp-tools_LIBRARY}) set (lksctp-tools_INCLUDE_DIRS ${lksctp-tools_INCLUDE_DIR}) if (lksctp-tools_FOUND AND NOT (TARGET lksctp-tools::lksctp-tools)) add_library (lksctp-tools::lksctp-tools UNKNOWN IMPORTED) set_target_properties (lksctp-tools::lksctp-tools PROPERTIES IMPORTED_LOCATION ${lksctp-tools_LIBRARIES} INTERFACE_INCLUDE_DIRECTORIES ${lksctp-tools_INCLUDE_DIRS}) endif () mark_as_advanced ( lksctp-tools_INCLUDE_DIR lksctp-tools_LIBRARY) seastar-25.05.0/cmake/Findlz4.cmake000066400000000000000000000031531501510432000167120ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # find_package (PkgConfig REQUIRED) pkg_search_module (PC_lz4 QUIET liblz4) find_library (lz4_LIBRARY NAMES lz4 HINTS ${PC_lz4_LIBDIR} ${PC_lz4_LIBRARY_DIRS}) find_path (lz4_INCLUDE_DIR NAMES lz4.h HINTS ${PC_lz4_INCLUDEDIR} ${PC_lz4_INCLUDE_DIRS}) mark_as_advanced ( lz4_LIBRARY lz4_INCLUDE_DIR) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (lz4 REQUIRED_VARS lz4_LIBRARY lz4_INCLUDE_DIR VERSION_VAR PC_lz4_VERSION) if (lz4_FOUND) set (CMAKE_REQUIRED_LIBRARIES ${lz4_LIBRARY}) set (lz4_LIBRARIES ${lz4_LIBRARY}) set (lz4_INCLUDE_DIRS ${lz4_INCLUDE_DIR}) if (NOT (TARGET lz4::lz4)) add_library (lz4::lz4 UNKNOWN IMPORTED) set_target_properties (lz4::lz4 PROPERTIES IMPORTED_LOCATION ${lz4_LIBRARY} INTERFACE_INCLUDE_DIRECTORIES ${lz4_INCLUDE_DIRS}) endif () endif () seastar-25.05.0/cmake/Findragel.cmake000066400000000000000000000026401501510432000172730ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # find_program ( ragel_RAGEL_EXECUTABLE ragel) if (NOT ragel_RAGEL_EXECUTABLE) message (FATAL_ERROR "ragel is required for processing .rl source files!") endif () mark_as_advanced (ragel_RAGEL_EXECUTABLE) set (_ragel_version_pattern "[0-9]+\\.[0-9]+\\.[0-9]+(\\.[0-9]+)?") if (ragel_RAGEL_EXECUTABLE) set (ragel_FOUND ON) execute_process (COMMAND ${ragel_RAGEL_EXECUTABLE} -v OUTPUT_VARIABLE _ragel_version_output) if (${_ragel_version_output} MATCHES "version (${_ragel_version_pattern})") set (ragel_VERSION ${CMAKE_MATCH_1}) endif () endif () find_package_handle_standard_args (ragel REQUIRED_VARS ragel_RAGEL_EXECUTABLE VERSION_VAR ragel_VERSION) seastar-25.05.0/cmake/Findrt.cmake000066400000000000000000000030551501510432000166270ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # set (_rt_test_source ${CMAKE_CURRENT_LIST_DIR}/code_tests/rt_test.cc) # Try to compile without the library first. try_compile (rt_NO_EXPLICIT_LINK ${CMAKE_CURRENT_BINARY_DIR} SOURCES ${_rt_test_source}) if (rt_NO_EXPLICIT_LINK) set (rt_FOUND yes) else () # The `rt` library is required. try_compile (_rt_test ${CMAKE_CURRENT_BINARY_DIR} SOURCES ${_rt_test_source} LINK_LIBRARIES rt) if (_rt_test) set (rt_LIBRARY_NAME rt) set (rt_LIBRARIES -l${rt_LIBRARY_NAME}) endif () include (FindPackageHandleStandardArgs) find_package_handle_standard_args (rt REQUIRED_VARS rt_LIBRARIES) endif () if (rt_FOUND AND NOT (TARGET rt::rt)) add_library (rt::rt INTERFACE IMPORTED) set_target_properties (rt::rt PROPERTIES INTERFACE_LINK_LIBRARIES "${rt_LIBRARIES}") endif () seastar-25.05.0/cmake/Finducontext.cmake000066400000000000000000000031331501510432000200500ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2023 Scylladb, Ltd. # # Try to compile without the library first. include (CheckFunctionExists) check_function_exists (getcontext ucontext_NO_EXPLICIT_LINK) if (ucontext_NO_EXPLICIT_LINK) set (ucontext_FOUND yes) else () # The `libucontext` library is required. find_package (PkgConfig QUIET REQUIRED) pkg_check_modules (PC_ucontext QUIET ucontext) find_library (ucontext_LIBRARY NAMES ucontext HINTS ${PC_ucontext_LIBDIR} ${PC_ucontext_LIBRARY_DIRS}) mark_as_advanced (ucontext_LIBRARY) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (ucontext REQUIRED_VARS ucontext_LIBRARY) endif () if (ucontext_FOUND AND NOT (TARGET ucontext::ucontext)) add_library (ucontext::ucontext INTERFACE IMPORTED) set_target_properties (ucontext::ucontext PROPERTIES INTERFACE_LINK_LIBRARIES "${ucontext_LIBRARY}") endif () seastar-25.05.0/cmake/Findyaml-cpp.cmake000066400000000000000000000046451501510432000177320ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # find_package (PkgConfig REQUIRED) pkg_search_module (PC_yaml-cpp QUIET yaml-cpp) find_library (yaml-cpp_LIBRARY_RELEASE NAMES yaml-cpp HINTS ${PC_yaml-cpp_LIBDIR} ${PC_yaml-cpp_LIBRARY_DIRS}) find_library (yaml-cpp_LIBRARY_DEBUG NAMES yaml-cppd HINTS ${PC_yaml-cpp_LIBDIR} ${PC_yaml-cpp_LIBRARY_DIRS}) include (SelectLibraryConfigurations) select_library_configurations (yaml-cpp) find_path (yaml-cpp_INCLUDE_DIR NAMES yaml-cpp/yaml.h PATH_SUFFIXES yaml-cpp HINTS ${PC_yaml-cpp_INCLUDEDIR} ${PC_yaml-cpp_INCLUDE_DIRS}) mark_as_advanced ( yaml-cpp_LIBRARY_RELEASE yaml-cpp_LIBRARY_DEBUG yaml-cpp_INCLUDE_DIR) include (FindPackageHandleStandardArgs) find_package_handle_standard_args (yaml-cpp REQUIRED_VARS yaml-cpp_LIBRARY yaml-cpp_INCLUDE_DIR VERSION_VAR yaml-cpp_VERSION) if (yaml-cpp_FOUND) set (yaml-cpp_LIBRARIES ${yaml-cpp_LIBRARY}) set (yaml-cpp_INCLUDE_DIRS ${yaml-cpp_INCLUDE_DIR}) if (NOT (TARGET yaml-cpp::yaml-cpp)) add_library (yaml-cpp::yaml-cpp UNKNOWN IMPORTED) set_target_properties (yaml-cpp::yaml-cpp PROPERTIES INTERFACE_INCLUDE_DIRECTORIES ${yaml-cpp_INCLUDE_DIRS}) if (EXISTS "${yaml-cpp_LIBRARY}") set_target_properties (yaml-cpp::yaml-cpp PROPERTIES IMPORTED_LOCATION "${yaml-cpp_LIBRARY}") endif () foreach (build "RELEASE" "DEBUG") if (yaml-cpp_LIBRARY_${build}) set_property (TARGET yaml-cpp::yaml-cpp APPEND PROPERTY IMPORTED_CONFIGURATIONS "${build}") set_target_properties (yaml-cpp::yaml-cpp PROPERTIES IMPORTED_LOCATION_${build} "${yaml-cpp_LIBRARY_${build}}") endif () endforeach () endif () endif () seastar-25.05.0/cmake/SeastarConfig.cmake.in000066400000000000000000000043271501510432000205410ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # list (APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_LIST_DIR}) if (CMAKE_CXX_STANDARD) set (current_cxx_standard ${CMAKE_CXX_STANDARD}) else () set (current_cxx_standard ${CMAKE_CXX_STANDARD_DEFAULT}) endif () if (NOT (current_cxx_standard STREQUAL @CMAKE_CXX_STANDARD@)) message(WARNING "C++ Standard mismatch detected: - Seastar was compiled with: C++@CMAKE_CXX_STANDARD@ - This project is configured to use: C++${current_cxx_standard} This mismatch may lead to build failures due to differences in the supported \ features of these two standards. Please adjust your project's C++ standard to \ match Seastar.") endif () if (NOT ((CMAKE_CXX_COMPILER_ID STREQUAL @CMAKE_CXX_COMPILER_ID@) AND (CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL @CMAKE_CXX_COMPILER_VERSION@))) message (WARNING "Compiler mismatch detected: - Seastar was compiled with: @CMAKE_CXX_COMPILER_ID@ @CMAKE_CXX_COMPILER_VERSION@ \ - This project is configured to use: ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION} This mismatch may lead to build failures due to differences in supported features and ABI compatibility. \ Please configure your project to use the same C++ compiler to match Seastar.") endif () # # Dependencies. # include (SeastarDependencies) set (Seastar_DPDK @Seastar_DPDK@) set (Seastar_IO_URING @Seastar_IO_URING@) set (Seastar_HWLOC @Seastar_HWLOC@) seastar_find_dependencies () if (NOT TARGET Seastar::seastar) include ("${CMAKE_CURRENT_LIST_DIR}/SeastarTargets.cmake") endif () seastar-25.05.0/cmake/SeastarDependencies.cmake000066400000000000000000000107021501510432000213070ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2019 Scylladb, Ltd. # include(CMakeParseArguments) # This is required because cmake-boost may return to Boost_{component}_LIBRARY: # - /usr/lib64/libboost_foo.so # - Boost::foo # While pkgconf's .pc file consumers expect argument which can be passed as # part of the command line arguments set (Boost_NO_BOOST_CMAKE ON) # for including the fix of https://github.com/boostorg/test/pull/252 set (_seastar_boost_version 1.73.0) # This is the minimum version of Boost we need the CMake-bundled `FindBoost.cmake` to know about. find_package (Boost ${_seastar_boost_version}) if (Boost_VERSION_STRING VERSION_LESS 1.81.0) set_target_properties (Boost::boost PROPERTIES INTERFACE_COMPILE_DEFINITIONS "BOOST_NO_CXX98_FUNCTION_BASE") endif () if (CMAKE_FIND_PACKAGE_NAME) # used inside find_package(Seastar) include (CMakeFindDependencyMacro) macro (seastar_find_dep package) cmake_parse_arguments(args "REQUIRED" "" "" ${ARGN}) if (arg_REQUIRED) find_dependency (${package} ${arg_UNPARSED_ARGUMENTS}) else () # some packages are not REQUIRED, so we just check for them instead of # populating "REQUIRED" from the original find_package() call. find_package (${package} ${ARGN}) endif () endmacro () else() macro (seastar_find_dep package) # used when configuring Seastar find_package (${package} ${ARGN}) endmacro () endif () macro (seastar_find_dependencies) # # List of Seastar dependencies that is meant to be used # both in Seastar configuration and by clients which # consume Seastar via SeastarConfig.cmake. # # `unit_test_framework` is not required in the case we are building Seastar # without the testing library, however the component is always specified as required # to keep the CMake code minimalistic and easy-to-use. seastar_find_dep (Boost ${_seastar_boost_version} REQUIRED COMPONENTS filesystem program_options thread unit_test_framework) seastar_find_dep (c-ares 1.13 REQUIRED) if (c-ares_VERSION VERSION_GREATER_EQUAL 1.33.0 AND c-ares_VERSION VERSION_LESS 1.34.1) # https://github.com/scylladb/seastar/issues/2472 message (FATAL_ERROR "c-ares ${c-ares_VERSION} is not supported. " "Seastar requires c-ares version <1.33 or >=1.34.1 ") endif () if (Seastar_DPDK) seastar_find_dep (dpdk) endif() seastar_find_dep (fmt 8.1.1 REQUIRED) seastar_find_dep (lz4 1.7.3 REQUIRED) seastar_find_dep (GnuTLS 3.3.26 REQUIRED) if (Seastar_IO_URING) seastar_find_dep (LibUring 2.0 REQUIRED) endif() seastar_find_dep (LinuxMembarrier) seastar_find_dep (Sanitizers) seastar_find_dep (SourceLocation) seastar_find_dep (StdAtomic REQUIRED) seastar_find_dep (SystemTap-SDT) if (Seastar_HWLOC) seastar_find_dep (hwloc 1.11.2 REQUIRED) endif() seastar_find_dep (lksctp-tools REQUIRED) seastar_find_dep (rt REQUIRED) seastar_find_dep (ucontext REQUIRED) seastar_find_dep (yaml-cpp REQUIRED VERSION 0.5.1) # workaround for https://gitlab.kitware.com/cmake/cmake/-/issues/25079 # since protobuf v22.0, it started using abseil, see # https://github.com/protocolbuffers/protobuf/releases/tag/v22.0 . # but due to https://gitlab.kitware.com/cmake/cmake/-/issues/25079, # CMake's FindProtobuf does add this linkage yet. fortunately, # ProtobufConfig.cmake provided by protobuf defines this linkage. so we try # the CMake package configuration file first, and fall back to CMake's # FindProtobuf module. find_package (Protobuf QUIET CONFIG) if (Protobuf_FOUND AND Protobuf_VERSION VERSION_GREATER_EQUAL 2.5.0) # do it again, so the message is printed when the package is found seastar_find_dep (Protobuf CONFIG REQUIRED) else () seastar_find_dep (Protobuf 2.5.0 REQUIRED) endif () endmacro () seastar-25.05.0/cmake/TriStateOption.cmake000066400000000000000000000024051501510432000203270ustar00rootroot00000000000000# the "option()" defined by CMake represents a boolean. but somtimes, we want # to enable/disable it depending on the CMAKE_BUILD_TYPE, if user leaves the # option unset. function (tri_state_option option) cmake_parse_arguments ( parsed_args "" "CONDITION" "DEFAULT_BUILD_TYPES" ${ARGN}) get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) if (is_multi_config) set (all_build_types ${CMAKE_CONFIGURATION_TYPES}) else () set (all_build_types ${CMAKE_BUILD_TYPE}) endif () # generic boolean values passed as string, potentially from configure.py set (True_STRING_VALUES "ON" "yes" "Yes" "YES" "true" "True" "TRUE") set (Default_STRING_VALUES "DEFAULT" "default" "Default") if ("${option}" IN_LIST True_STRING_VALUES) set (enabled_types ${all_build_types}) elseif ("${option}" IN_LIST Default_STRING_VALUES) set (enabled_types ${parsed_args_DEFAULT_BUILD_TYPES}) else () set (enabled_types "") endif () if (is_multi_config) set (${parsed_args_CONDITION} "$,${enabled_types}>" PARENT_SCOPE) elseif (CMAKE_BUILD_TYPE IN_LIST enabled_types) set (${parsed_args_CONDITION} 1 PARENT_SCOPE) else () set (${parsed_args_CONDITION} 0 PARENT_SCOPE) endif () endfunction () seastar-25.05.0/cmake/check-seastar-include-style.py000077500000000000000000000031731501510432000222510ustar00rootroot00000000000000#!/usr/bin/env python3 # # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import fileinput import os.path import re import sys def check_includes(files, dirname): # Check for include directives with quotes for specified dirname incorrect_include = re.compile(rf'#include\s+"({dirname}/[^\"]+)"') num_errors = 0 for line in fileinput.input(files=files, encoding="utf-8"): # Look for #include \"seastar/...\" pattern if matched := incorrect_include.match(line): location = f"{fileinput.filename()}:{fileinput.lineno()}" header = matched.group(1) print(f"{location}: warning: please include seastar headers using: #include <{header}>") num_errors += 1 return num_errors def main(): # If any incorrect includes are found, fail the check files = [fn for fn in sys.argv[1:] if os.path.exists(fn)] if check_includes(files, "seastar") > 0: sys.exit(1) if __name__ == '__main__': main() seastar-25.05.0/cmake/code_tests/000077500000000000000000000000001501510432000165305ustar00rootroot00000000000000seastar-25.05.0/cmake/code_tests/LinuxMembarrier_test.cc000066400000000000000000000002441501510432000232030ustar00rootroot00000000000000extern "C" { #include } int main() { int x = MEMBARRIER_CMD_PRIVATE_EXPEDITED | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED; (void)x; } seastar-25.05.0/cmake/code_tests/Sanitizers_fiber_test.cc000066400000000000000000000004721501510432000234030ustar00rootroot00000000000000#include extern "C" { void __sanitizer_start_switch_fiber(void**, const void*, size_t); void __sanitizer_finish_switch_fiber(void*, const void**, size_t*); } int main() { __sanitizer_start_switch_fiber(nullptr, nullptr, 0); __sanitizer_finish_switch_fiber(nullptr, nullptr, nullptr); } seastar-25.05.0/cmake/code_tests/Source_location_default_argument.cc000066400000000000000000000003711501510432000255760ustar00rootroot00000000000000#include int test_source_location(int line, std::source_location loc = std::source_location::current()) { return line == loc.line() ? 0 : 1; } int main() { return test_source_location(__LINE__); } seastar-25.05.0/cmake/code_tests/Source_location_test.cc000066400000000000000000000011611501510432000232250ustar00rootroot00000000000000#if __has_include() #include #endif #ifdef __cpp_lib_source_location using source_location = std::source_location; #elif __has_include() #include using source_location = std::experimental::source_location; #endif #if defined(__cpp_lib_source_location) || defined(__cpp_lib_experimental_source_location) struct format_info { format_info(source_location loc = source_location::current()) noexcept : loc(loc) { } source_location loc; }; #else struct format_info { }; #endif int main() { format_info fi; } seastar-25.05.0/cmake/code_tests/rt_test.cc000066400000000000000000000002341501510432000205220ustar00rootroot00000000000000extern "C" { #include #include } int main() { timer_t td; struct sigevent sev; timer_create(CLOCK_MONOTONIC, &sev, &td); } seastar-25.05.0/cmake/code_tests/stdout_test.cc000066400000000000000000000001221501510432000214130ustar00rootroot00000000000000#include enum class logger_type { stdout, stderr, }; int main() {} seastar-25.05.0/coding-style.md000066400000000000000000000126221501510432000162420ustar00rootroot00000000000000# Seastar Coding Style ## Files Header files have the `.hh` extension, source files use the `.cc` extension. All files must have a license and copyright blurb. Use `#pragma once` instead of an include guard. Header files which contain a public part of the interface of Seastar go in the `include` directory. Internal header and source files which are private to the implementation go in the `src` directory. ## Whitespace Use spaces only; NEVER tabs. Rationale: tabs render differently on each system. An _indent_ is four spaces. A double indent is eight spaces, a half-indent is two spaces. ## Naming We follow the C++ and Boost naming conventions: class names, variables, functions, and concepts are `words_separated_by_whitespace`. Private data members are prefixed by an underscore: ```c++ class my_class { int _a_member; public: void foo() { _a_member = 3; } }; ``` Think of the leading underscore as a shorthand for `this->`. Template parameters use `CamelCase` Note: because the Concept Technical Specification used CamelCase for concepts, some Seastar concepts alse use CamelCase. These will be gradually deprecated and replaced with snake_case names. New concepts should use snake_case. ## Including header files In any file, to include a public header file (one in the `include` directory), use an absolute path with `<>` like this: ```c++ #include ``` In any private file, to include a private header file (one in the `src` directory), use an absolute path with `""` like this: ```c++ #include "core/future_impl.hh" ``` Header files in Seastar must be self-contained, i.e., each can be included without having to include specific other headers first. To verify that your change did not break this property, run `ninja checkheaders` in the build directory. ## Braced blocks All nested scopes are braced, even when the language allows omitting the braces (such as an if-statement), this makes patches simpler and is more consistent. The opening brace is merged with the line that opens the scope (class definition, function definition, if statement, etc.) and the body is indented. ```c++ void a_function() { if (some condition) { stmt; } else { stmt; } } ``` An exception is namespaces -- the body is _not_ indented, to prevent files that are almost 100% whitespace left margin. When making a change, if you need to insert an indentation level, you can temporarily break the rules by insering a half-indent, so that the patch is easily reviewable: ```c++ void a_function() { while (something) { // new line - half indent if (some condition) { stmt; } else { stmt; } } // new line } ``` A follow-up patch can restore the indents without any functional changes. ## Function parameters Avoid output parameters; use return values instead. In/out parameters are tricky, but in some cases they are relatively standard, such as serialization/deserialization. If a function accepts a lambda or an `std::function`, make it the last argument, so that it can be easily provided inline: ```c++ template void function_accepting_a_lambda(int a, int b, Func func); int f() { return function_accepting_a_lambda(2, 3, [] (int x, int y) { return x + y; }); } ``` ## Complex return types If a function returns a complicated return type, put its return type on a separate line, otherwise it becomes hard to see where the return type ends and where the function name begins: ```c++ template template std::vector::some_nested_class> // I'm the return type a_struct::a_function(T3 a, T4 b) { // And I'm the function name // ... } ``` ## Whitespace around operators Whitespace around operators should match their precedence: high precedence = no spaces, low precedency = add spaces: ```c++ return *a + *b; // good return * a+* b; // bad ``` `if`, `while`, `return` (and `template`) are not function calls, so they get a space after the keyword. ## Long lines If a line becomes excessively long (>160 characters?), or is just complicated, break it into two or more lines. The second (and succeeding lines) are _continuation lines_, and have a double indent: ```c++ if ((some_condition && some_other_condition) || (more complicated stuff here...) // continuation line, double indent || (even more complicated stuff)) { // another continuation line do_something(); // back to single indent } ``` Of course, long lines or complex conditions may indicate that refactoring is in order. ## Generic lambdas and types Generic lambdas (`[] (auto param)`) are discouraged where the type is known. Generic lambdas reduce the compiler's and other tools' ability to reason about the code. In case the actual type of `param` doesn't match the programmers expectations, the compiler will only detect an error in the lambda body, or perhaps even lower down the stack if more generic functions are called. In the case of an IDE, most of its functionality is disabled in a generic lambda, since it can't assume anything about that parameter. Of course, when there is a need to support multiple types, genericity is the correct tool. Even then, type parameters should be constrained with concepts, in order to catch type mismatches early rather than deep in the instantiation chain. seastar-25.05.0/configure.py000077500000000000000000000270261501510432000156610ustar00rootroot00000000000000#!/usr/bin/env python3 # # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import argparse import os import seastar_cmake import subprocess import tempfile tempfile.tempdir = "./build/tmp" def add_tristate(arg_parser, name, dest, help, default=None): arg_parser.add_argument('--enable-' + name, dest=dest, action='store_true', default=default, help='Enable ' + help + ' [default]' if default else '') arg_parser.add_argument('--disable-' + name, dest=dest, action='store_false', default=None, help='Disable ' + help) def try_compile(compiler, source='', flags=[]): return try_compile_and_link(compiler, source, flags=flags + ['-c']) def ensure_tmp_dir_exists(): if not os.path.exists(tempfile.tempdir): os.makedirs(tempfile.tempdir) def try_compile_and_link(compiler, source='', flags=[]): ensure_tmp_dir_exists() with tempfile.NamedTemporaryFile() as sfile: ofd, ofile = tempfile.mkstemp() os.close(ofd) try: sfile.file.write(bytes(source, 'utf-8')) sfile.file.flush() # We can't write to /dev/null, since in some cases (-ftest-coverage) gcc will create an auxiliary # output file based on the name of the output file, and "/dev/null.gcsa" is not a good name return subprocess.call([compiler, '-x', 'c++', '-o', ofile, sfile.name] + flags, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) == 0 finally: if os.path.exists(ofile): os.unlink(ofile) def standard_supported(standard, compiler='g++'): return try_compile(compiler=compiler, source='', flags=['-std=' + standard]) arg_parser = argparse.ArgumentParser('Configure seastar') arg_parser.add_argument('--mode', action='store', choices=seastar_cmake.SUPPORTED_MODES + ['all'], default='all') arg_parser.add_argument('--build-root', action='store', default=seastar_cmake.DEFAULT_BUILD_ROOT, type=str, help='The name of the build root build directoy: using a different name allows multiple ' 'configurations to co-exist in the same repository') arg_parser.add_argument('--cflags', action = 'store', dest='user_cflags', default='', help='Extra flags for the C++ compiler') arg_parser.add_argument('--ldflags', action='store', dest='user_ldflags', default='', help='Extra flags for the linker') arg_parser.add_argument('--optflags', action='store', dest='user_optflags', default='', help='Extra optimization flags for the release mode') arg_parser.add_argument('--api-level', action='store', dest='api_level', default='7', help='Compatibility API level (7=latest)') arg_parser.add_argument('--compiler', action='store', dest='cxx', default='g++', help='C++ compiler path') arg_parser.add_argument('--c-compiler', action='store', dest='cc', default='gcc', help='C compiler path (for bundled libraries such as dpdk)') arg_parser.add_argument('--ccache', nargs='?', const='ccache', default='', metavar='CCACHE_BINARY_PATH', help='Use ccache to cache compilation (and optionally provide a path to ccache binary)') arg_parser.add_argument('--c++-standard', action='store', dest='cpp_standard', default='', help='C++ standard to build with') arg_parser.add_argument('--cook', action='append', dest='cook', default=[], help='Supply this dependency locally for development via `cmake-cooking` (can be repeated)') arg_parser.add_argument('--verbose', dest='verbose', action='store_true', help='Make configure output more verbose.') arg_parser.add_argument('--scheduling-groups-count', action='store', dest='scheduling_groups_count', default='16', help='Number of available scheduling groups in the reactor') add_tristate( arg_parser, name='dpdk', dest='dpdk', help='DPDK support') add_tristate( arg_parser, name='cxx-modules', dest='cxx_modules', help='build as C++20 module') add_tristate( arg_parser, name='hwloc', dest='hwloc', help='hwloc support') add_tristate( arg_parser, name='alloc-failure-injector', dest='alloc_failure_injection', help='allocation failure injection') add_tristate( arg_parser, name='task-backtrace', dest='task_backtrace', help='Collect backtrace at deferring points') add_tristate( arg_parser, name='unused-result-error', dest="unused_result_error", help='Make [[nodiscard]] violations an error') add_tristate( arg_parser, name='debug-shared-ptr', dest="debug_shared_ptr", help='Debug shared_ptr') add_tristate( arg_parser, name='io_uring', dest='io_uring', help='Support io_uring via liburing') arg_parser.add_argument('--allocator-page-size', dest='alloc_page_size', type=int, help='override allocator page size') arg_parser.add_argument('--without-tests', dest='exclude_tests', action='store_true', help='Do not build tests by default') arg_parser.add_argument('--without-apps', dest='exclude_apps', action='store_true', help='Do not build applications by default') arg_parser.add_argument('--without-demos', dest='exclude_demos', action='store_true', help='Do not build demonstrations by default') arg_parser.add_argument('--split-dwarf', dest='split_dwarf', action='store_true', default=False, help='use of split dwarf (https://gcc.gnu.org/wiki/DebugFission) to speed up linking') arg_parser.add_argument('--compile-commands-json', dest='cc_json', action='store_true', help='Generate a compile_commands.json file for integration with clangd and other tools.') arg_parser.add_argument('--heap-profiling', dest='heap_profiling', action='store_true', default=False, help='Enable heap profiling') arg_parser.add_argument('--dpdk-machine', default='native', help='Specify the target architecture') add_tristate(arg_parser, name='deferred-action-require-noexcept', dest='deferred_action_require_noexcept', help='noexcept requirement for deferred actions', default=True) arg_parser.add_argument('--prefix', dest='install_prefix', default='/usr/local', help='Root installation path of Seastar files') args = arg_parser.parse_args() def identify_best_standard(cpp_standards, compiler): """Returns the first C++ standard accepted by the compiler in the sequence, assuming the "best" standards appear first. If no standards are accepted, we fail configure.py. There is not point of letting the user attempt to build with a standard that is known not to be supported. """ for std in cpp_standards: if standard_supported('c++{}'.format(std), compiler): return std raise Exception(f"{compiler} does not seem to support any of Seastar's preferred C++ standards - {cpp_standards}. Please upgrade your compiler.") if not args.cpp_standard: cpp_standards = ['23', '20'] args.cpp_standard = identify_best_standard(cpp_standards, compiler=args.cxx) MODES = seastar_cmake.SUPPORTED_MODES if args.mode == 'all' else [args.mode] # For convenience. tr = seastar_cmake.translate_arg MODE_TO_CMAKE_BUILD_TYPE = {'release': 'RelWithDebInfo', 'debug': 'Debug', 'dev': 'Dev', 'sanitize': 'Sanitize' } def configure_mode(mode): BUILD_PATH = seastar_cmake.build_path(mode, build_root=args.build_root) CFLAGS = seastar_cmake.convert_strings_to_cmake_list( args.user_cflags, args.user_optflags if seastar_cmake.is_release_mode(mode) else '') LDFLAGS = seastar_cmake.convert_strings_to_cmake_list(args.user_ldflags) TRANSLATED_ARGS = [ '-DCMAKE_BUILD_TYPE={}'.format(MODE_TO_CMAKE_BUILD_TYPE[mode]), '-DCMAKE_CXX_COMPILER={}'.format(args.cxx), '-DCMAKE_CXX_STANDARD={}'.format(args.cpp_standard), '-DCMAKE_CXX_COMPILER_LAUNCHER={}'.format(args.ccache), '-DCMAKE_INSTALL_PREFIX={}'.format(args.install_prefix), '-DCMAKE_EXPORT_COMPILE_COMMANDS={}'.format('yes' if args.cc_json else 'no'), '-DBUILD_SHARED_LIBS={}'.format('yes' if mode in ('debug', 'dev') else 'no'), '-DSeastar_API_LEVEL={}'.format(args.api_level), '-DSeastar_SCHEDULING_GROUPS_COUNT={}'.format(args.scheduling_groups_count), tr(args.exclude_tests, 'EXCLUDE_TESTS_FROM_ALL'), tr(args.exclude_apps, 'EXCLUDE_APPS_FROM_ALL'), tr(args.exclude_demos, 'EXCLUDE_DEMOS_FROM_ALL'), tr(CFLAGS, 'CXX_FLAGS'), tr(LDFLAGS, 'LD_FLAGS'), tr(args.cxx_modules, 'MODULE'), tr(args.dpdk, 'DPDK'), tr(args.dpdk_machine, 'DPDK_MACHINE'), tr(args.hwloc, 'HWLOC', value_when_none='yes'), tr(args.io_uring, 'IO_URING', value_when_none=None), tr(args.alloc_failure_injection, 'ALLOC_FAILURE_INJECTION', value_when_none='DEFAULT'), tr(args.task_backtrace, 'TASK_BACKTRACE'), tr(args.alloc_page_size, 'ALLOC_PAGE_SIZE'), tr(args.split_dwarf, 'SPLIT_DWARF'), tr(args.heap_profiling, 'HEAP_PROFILING'), tr(args.deferred_action_require_noexcept, 'DEFERRED_ACTION_REQUIRE_NOEXCEPT'), tr(args.unused_result_error, 'UNUSED_RESULT_ERROR'), tr(args.debug_shared_ptr, 'DEBUG_SHARED_PTR', value_when_none='default'), ] ingredients_to_cook = set(args.cook) if args.dpdk: ingredients_to_cook.add('dpdk') # Generate a new build by pointing to the source directory. if ingredients_to_cook: # the C compiler is only used when building ingredients. TRANSLATED_ARGS.append(f'-DCMAKE_C_COMPILER={args.cc}') # We need to use cmake-cooking for some dependencies. inclusion_arguments = [] for ingredient in ingredients_to_cook: inclusion_arguments.extend(['-i', ingredient]) ARGS = seastar_cmake.COOKING_BASIC_ARGS + inclusion_arguments if args.user_cflags: ARGS += ['-s', f'CXXFLAGS={args.user_cflags}'] if args.user_ldflags: ARGS += ['-s', f'LDFLAGS={args.user_ldflags}'] ARGS += ['-d', BUILD_PATH, '--'] dir = seastar_cmake.ROOT_PATH else: # When building without cooked dependencies, we can invoke cmake directly. We can't call # cooking.sh, because without any -i parameters, it will try to build # everything. root_relative_to_build = os.path.relpath(seastar_cmake.ROOT_PATH, BUILD_PATH) ARGS = ['cmake', '-G', 'Ninja', root_relative_to_build] dir = BUILD_PATH # filter out empty args, their values are actually "guess", # CMake should be able to figure it out. ARGS += filter(lambda arg: arg, TRANSLATED_ARGS) if args.verbose: print("Running CMake in '{}' ...".format(dir)) print(" \\\n ".join(ARGS)) os.makedirs(BUILD_PATH, exist_ok=True) subprocess.check_call(ARGS, shell=False, cwd=dir) for mode in MODES: configure_mode(mode) seastar-25.05.0/cooking.sh000077500000000000000000000635501501510432000153150ustar00rootroot00000000000000#!/bin/bash # # Copyright 2018 Jesse Haber-Kucharsky # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # This is cmake-cooking v0.10.0 # The home of cmake-cooking is https://github.com/hakuch/CMakeCooking # set -e CMAKE=${CMAKE:-cmake} invoked_args=("$@") source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" initial_wd=$(pwd) memory_file="${initial_wd}/.cooking_memory" recipe="${source_dir}/cooking_recipe.cmake" declare -a excluded_ingredients declare -a included_ingredients build_dir="${initial_wd}/build" build_type="Debug" # Depends on `build_dir`. ingredients_dir="" generator="Ninja" list_only="" nested="" usage() { cat <&2 } while getopts "ar:e:i:d:p:t:g:s:f:lhx" arg; do case "${arg}" in a) if [ ! -f "${memory_file}" ]; then echo "No previous invocation found to recall!" >&2 exit 1 fi source "${memory_file}" run_previous && exit 0 ;; r) if [[ "${OPTARG}" = /* ]]; then recipe=${OPTARG} else recipe="${source_dir}/${OPTARG}" fi ;; e) if [[ ${#included_ingredients[@]} -ne 0 ]]; then yell_include_exclude_mutually_exclusive exit 1 fi excluded_ingredients+=(${OPTARG}) ;; i) if [[ ${#excluded_ingredients[@]} -ne 0 ]]; then yell_include_exclude_mutually_exclusive exit 1 fi included_ingredients+=(${OPTARG}) ;; d) build_dir=$(realpath "${OPTARG}") ;; p) ingredients_dir=$(realpath "${OPTARG}") ;; t) build_type=${OPTARG} ;; g) generator=${OPTARG} ;; s) parse_assignment "${OPTARG}" ;; f) export_dir=$(realpath "${OPTARG}") ;; l) list_only="1" ;; h) usage; exit 0 ;; x) nested="1" ;; *) usage; exit 1 ;; esac done shift $((OPTIND - 1)) cooking_dir="${build_dir}/_cooking" cache_file="${build_dir}/CMakeCache.txt" ingredients_ready_file="${cooking_dir}/ready.txt" if [ -z "${ingredients_dir}" ]; then ingredients_dir="${cooking_dir}/installed" fi mkdir -p "${build_dir}" cat <<'EOF' > "${build_dir}/Cooking.cmake" # # Copyright 2018 Jesse Haber-Kucharsky # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # This file was generated by cmake-cooking v0.10.0 # The home of cmake-cooking is https://github.com/hakuch/CMakeCooking # macro (project name) set (_cooking_dir ${CMAKE_CURRENT_BINARY_DIR}/_cooking) if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR) set (_cooking_root ON) else () set (_cooking_root OFF) endif () find_program (Cooking_STOW_EXECUTABLE stow "Executable path of GNU Stow.") if (NOT Cooking_STOW_EXECUTABLE) message (FATAL_ERROR "Cooking: GNU Stow is required!") endif () set (Cooking_INGREDIENTS_DIR ${_cooking_dir}/installed CACHE PATH "Directory where ingredients will be installed.") set (Cooking_EXCLUDED_INGREDIENTS "" CACHE STRING "Semicolon-separated list of ingredients that are not provided by Cooking.") set (Cooking_INCLUDED_INGREDIENTS "" CACHE STRING "Semicolon-separated list of ingredients that are provided by Cooking.") option (Cooking_LIST_ONLY "Available ingredients will be listed and nothing will be installed." OFF) set (Cooking_RECIPE "" CACHE STRING "Configure ${name}'s dependencies according to the named recipe.") if ((NOT DEFINED Cooking_EXCLUDED_INGREDIENTS) OR (Cooking_EXCLUDED_INGREDIENTS STREQUAL "")) set (_cooking_is_excluding OFF) else () set (_cooking_is_excluding ON) endif () if ((NOT DEFINED Cooking_INCLUDED_INGREDIENTS) OR (Cooking_INCLUDED_INGREDIENTS STREQUAL "")) set (_cooking_is_including OFF) else () set (_cooking_is_including ON) endif () if (_cooking_is_excluding AND _cooking_is_including) message ( FATAL_ERROR "Cooking: The EXCLUDED_INGREDIENTS and INCLUDED_INGREDIENTS lists are mutually exclusive options!") endif () if (_cooking_root) _project (${name} ${ARGN}) if (NOT ("${Cooking_RECIPE}" STREQUAL "")) add_custom_target (_cooking_ingredients) set (_cooking_ready_marker_file ${_cooking_dir}/ready.txt) add_custom_command ( OUTPUT ${_cooking_ready_marker_file} DEPENDS _cooking_ingredients COMMAND ${CMAKE_COMMAND} -E touch ${_cooking_ready_marker_file}) add_custom_target (_cooking_ingredients_ready DEPENDS ${_cooking_ready_marker_file}) set (_cooking_local_synchronize_marker_file ${Cooking_INGREDIENTS_DIR}/.cooking_local_synchronize) add_custom_command ( OUTPUT ${_cooking_local_synchronize_marker_file} COMMAND ${CMAKE_COMMAND} -E touch ${_cooking_local_synchronize_marker_file}) add_custom_target (_cooking_marked_for_local_synchronization DEPENDS ${_cooking_local_synchronize_marker_file}) list (APPEND CMAKE_PREFIX_PATH ${Cooking_INGREDIENTS_DIR}) include (${Cooking_RECIPE}) if (NOT EXISTS ${_cooking_ready_marker_file}) return () endif () endif () endif () endmacro () function (_cooking_set_union x y var) set (r ${${x}}) foreach (e ${${y}}) list (APPEND r ${e}) endforeach () list (REMOVE_DUPLICATES r) set (${var} ${r} PARENT_SCOPE) endfunction () function (_cooking_set_difference x y var) set (r ${${x}}) foreach (e ${${y}}) if (${e} IN_LIST ${x}) list (REMOVE_ITEM r ${e}) endif () endforeach () set (${var} ${r} PARENT_SCOPE) endfunction () function (_cooking_set_intersection x y var) set (r "") foreach (e ${${y}}) if (${e} IN_LIST ${x}) list (APPEND r ${e}) endif () endforeach () list (REMOVE_DUPLICATES r) set (${var} ${r} PARENT_SCOPE) endfunction () function (_cooking_query_by_key list key var) list (FIND ${list} ${key} index) if (${index} EQUAL "-1") set (value NOTFOUND) else () math (EXPR value_index "${index} + 1") list (GET ${list} ${value_index} value) endif () set (${var} ${value} PARENT_SCOPE) endfunction () function (_cooking_populate_ep_parameter) cmake_parse_arguments ( pa "" "EXTERNAL_PROJECT_ARGS_LIST;PARAMETER;DEFAULT_VALUE" "" ${ARGN}) string (TOLOWER ${pa_PARAMETER} parameter_lower) _cooking_query_by_key (${pa_EXTERNAL_PROJECT_ARGS_LIST} ${pa_PARAMETER} ${parameter_lower}) set (value ${${parameter_lower}}) set (var_name _cooking_${parameter_lower}) set (ep_var_name _cooking_ep_${parameter_lower}) if (NOT value) set (${var_name} ${pa_DEFAULT_VALUE} PARENT_SCOPE) set (${ep_var_name} ${pa_PARAMETER} ${pa_DEFAULT_VALUE} PARENT_SCOPE) else () set (${var_name} ${value} PARENT_SCOPE) set (${ep_var_name} "" PARENT_SCOPE) endif () endfunction () function (_cooking_define_listing_targets) cmake_parse_arguments ( pa "" "NAME;SOURCE_DIR;RECIPE" "REQUIRES" ${ARGN}) set (stale_file ${Cooking_INGREDIENTS_DIR}/.cooking_stale_ingredient_${pa_NAME}) add_custom_command ( OUTPUT ${stale_file} COMMAND ${CMAKE_COMMAND} -E touch ${stale_file}) add_custom_target (_cooking_ingredient_${pa_NAME}_stale DEPENDS ${stale_file}) set (commands COMMAND ${CMAKE_COMMAND} -E touch ${Cooking_INGREDIENTS_DIR}/.cooking_ingredient_${pa_NAME}) if (pa_RECIPE) if (pa_RECIPE STREQUAL ) set (recipe_args "") else () set (recipe_args -r ${pa_RECIPE}) endif () list (INSERT commands 0 COMMAND ${pa_SOURCE_DIR}/cooking.sh ${recipe_args} -p ${Cooking_INGREDIENTS_DIR} -g ${CMAKE_GENERATOR} -x -l) endif () add_custom_command ( OUTPUT ${Cooking_INGREDIENTS_DIR}/.cooking_ingredient_${pa_NAME} DEPENDS _cooking_ingredient_${pa_NAME}_stale ${stale_file} ${commands}) add_custom_target (_cooking_ingredient_${pa_NAME}_listed DEPENDS ${Cooking_INGREDIENTS_DIR}/.cooking_ingredient_${pa_NAME}) foreach (d ${pa_REQUIRES}) add_dependencies (_cooking_ingredient_${pa_NAME}_listed _cooking_ingredient_${d}_listed) endforeach () add_dependencies (_cooking_ingredients _cooking_ingredient_${pa_NAME}_listed) endfunction () function (_cooking_adjust_requirements) cmake_parse_arguments ( pa "" "IS_EXCLUDING;IS_INCLUDING;OUTPUT_LIST" "REQUIREMENTS" ${ARGN}) if (pa_IS_EXCLUDING) # Strip out any dependencies that are excluded. _cooking_set_difference ( pa_REQUIREMENTS Cooking_EXCLUDED_INGREDIENTS pa_REQUIREMENTS) elseif (_cooking_is_including) # Eliminate dependencies that have not been included. _cooking_set_intersection ( pa_REQUIREMENTS Cooking_INCLUDED_INGREDIENTS pa_REQUIREMENTS) endif () set (${pa_OUTPUT_LIST} ${pa_REQUIREMENTS} PARENT_SCOPE) endfunction () function (_cooking_populate_ep_depends) cmake_parse_arguments ( pa "" "" "REQUIREMENTS" ${ARGN}) if (pa_REQUIREMENTS) set (value DEPENDS) foreach (d ${pa_REQUIREMENTS}) list (APPEND value ingredient_${d}) endforeach () else () set (value "") endif () set (_cooking_ep_depends ${value} PARENT_SCOPE) endfunction () function (_cooking_prepare_restrictions_arguments) cmake_parse_arguments ( pa "" "IS_EXCLUDING;IS_INCLUDING;OUTPUT_LIST" "REQUIREMENTS" ${ARGN}) set (args "") if (pa_IS_INCLUDING) _cooking_set_difference ( Cooking_INCLUDED_INGREDIENTS pa_REQUIREMENTS included) foreach (x ${included}) list (APPEND args -i ${x}) endforeach () elseif (pa_IS_EXCLUDING) _cooking_set_union ( Cooking_EXCLUDED_INGREDIENTS pa_REQUIREMENTS excluded) foreach (x ${excluded}) list (APPEND args -e ${x}) endforeach () else () foreach (x ${pa_REQUIREMENTS}) list (APPEND args -e ${x}) endforeach () endif () set (${pa_OUTPUT_LIST} ${args} PARENT_SCOPE) endfunction () function (_cooking_determine_common_cmake_args output) string (REPLACE ";" ":::" prefix_path_with_colons "${CMAKE_PREFIX_PATH}") set (cmake_args "-G" "${CMAKE_GENERATOR}") if (CMAKE_CXX_FLAGS) list(APPEND cmake_args -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}) endif () if (CMAKE_C_FLAGS) list(APPEND cmake_args -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}) endif () list (APPEND cmake_args -DCMAKE_INSTALL_PREFIX= -DCMAKE_PREFIX_PATH=${prefix_path_with_colons} -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} -DBUILD_SHARED_LIBS=${BUILD_SHARED_LIBS}) set (${output} ${cmake_args} PARENT_SCOPE) endfunction () function (_cooking_populate_ep_configure_command) cmake_parse_arguments ( pa "" "IS_EXCLUDING;IS_INCLUDING;RECIPE;EXTERNAL_PROJECT_ARGS_LIST" "REQUIREMENTS;CMAKE_ARGS;COOKING_CMAKE_ARGS" ${ARGN}) if (pa_RECIPE) if (pa_RECIPE STREQUAL ) set (recipe_args "") else () set (recipe_args -r ${pa_RECIPE}) endif () _cooking_prepare_restrictions_arguments ( IS_EXCLUDING ${pa_IS_EXCLUDING} IS_INCLUDING ${pa_IS_INCLUDING} REQUIREMENTS ${pa_REQUIREMENTS} OUTPUT_LIST restrictions_args) set (value CONFIGURE_COMMAND /cooking.sh ${recipe_args} -d -p ${Cooking_INGREDIENTS_DIR} -g ${CMAKE_GENERATOR} -x ${restrictions_args} -- ${pa_COOKING_CMAKE_ARGS}) elseif (NOT (CONFIGURE_COMMAND IN_LIST ${pa_EXTERNAL_PROJECT_ARGS_LIST})) set (value CONFIGURE_COMMAND ${CMAKE_COMMAND} ${pa_CMAKE_ARGS} ) else () set (value "") endif () set (_cooking_ep_configure_command ${value} PARENT_SCOPE) endfunction () function (_cooking_populate_ep_build_command ep_args_list) if (NOT (BUILD_COMMAND IN_LIST ${ep_args_list})) set (value BUILD_COMMAND ${CMAKE_COMMAND} --build ) else () set (value "") endif () set (_cooking_ep_build_command ${value} PARENT_SCOPE) endfunction () function (_cooking_populate_ep_install_command ep_args_list) if (NOT (INSTALL_COMMAND IN_LIST ${ep_args_list})) set (value INSTALL_COMMAND ${CMAKE_COMMAND} --build --target install) else () set (value "") endif () set (_cooking_ep_install_command ${value} PARENT_SCOPE) endfunction () function (_cooking_define_ep) cmake_parse_arguments ( pa "" "NAME;SOURCE_DIR;BINARY_DIR;EXTERNAL_PROJECT_ARGS_LIST;RECIPE;INGREDIENT_DIR;STOW_DIR;LOCAL_RECONFIGURE;LOCAL_REBUILD" "DEPENDS;CONFIGURE_COMMAND;BUILD_COMMAND;INSTALL_COMMAND;CMAKE_ARGS" ${ARGN}) string (REPLACE "" "" forwarded_ep_args "${${pa_EXTERNAL_PROJECT_ARGS_LIST}}") set (ep_name ingredient_${pa_NAME}) include (ExternalProject) set (stamp_dir ${pa_INGREDIENT_DIR}/stamp) ExternalProject_add (${ep_name} DEPENDS ${pa_DEPENDS} SOURCE_DIR ${pa_SOURCE_DIR} BINARY_DIR ${pa_BINARY_DIR} CONFIGURE_COMMAND ${pa_CONFIGURE_COMMAND} BUILD_COMMAND ${pa_BUILD_COMMAND} INSTALL_COMMAND ${pa_INSTALL_COMMAND} PREFIX ${pa_INGREDIENT_DIR} STAMP_DIR ${stamp_dir} INSTALL_DIR ${pa_STOW_DIR}/${pa_NAME} CMAKE_ARGS ${pa_CMAKE_ARGS} LIST_SEPARATOR ::: STEP_TARGETS install "${forwarded_ep_args}") set (stow_marker_file ${Cooking_INGREDIENTS_DIR}/.cooking_ingredient_${pa_NAME}) set (lock_file ${Cooking_INGREDIENTS_DIR}/.cooking_stow.lock) add_custom_command ( OUTPUT ${stow_marker_file} DEPENDS ${ep_name}-install ${stamp_dir}/ingredient_${pa_NAME}-install COMMAND flock --wait 30 ${lock_file} ${Cooking_STOW_EXECUTABLE} -t ${Cooking_INGREDIENTS_DIR} -d ${pa_STOW_DIR} ${pa_NAME} COMMAND ${CMAKE_COMMAND} -E touch ${stow_marker_file}) add_custom_target (_cooking_ingredient_${pa_NAME}_stowed DEPENDS ${stow_marker_file}) if (pa_RECIPE) set (reconfigure_marker_file ${Cooking_INGREDIENTS_DIR}/.cooking_reconfigure_ingredient_${pa_NAME}) add_custom_command ( OUTPUT ${reconfigure_marker_file} COMMAND ${CMAKE_COMMAND} -E touch ${reconfigure_marker_file}) add_custom_target (_cooking_ingredient_${pa_NAME}_marked_for_reconfigure DEPENDS ${reconfigure_marker_file}) ExternalProject_add_step (${ep_name} cooking-reconfigure DEPENDERS configure DEPENDS ${reconfigure_marker_file} COMMAND ${CMAKE_COMMAND} -E echo_append) ExternalProject_add_stepdependencies (${ep_name} cooking-reconfigure _cooking_ingredient_${pa_NAME}_marked_for_reconfigure) endif () foreach (d ${pa_DEPENDS}) ExternalProject_add_stepdependencies (${ep_name} configure _cooking_${d}_stowed) endforeach () add_dependencies (_cooking_ingredients _cooking_ingredient_${pa_NAME}_stowed) if (pa_LOCAL_RECONFIGURE OR pa_LOCAL_REBUILD) if (pa_LOCAL_RECONFIGURE) set (step configure) else () set (step build) endif () ExternalProject_add_step (${ep_name} cooking-local-${step} DEPENDERS ${step} DEPENDS ${_cooking_local_synchronize_marker_file} COMMAND ${CMAKE_COMMAND} -E echo_append) ExternalProject_add_stepdependencies (${ep_name} cooking-local-${step} _cooking_marked_for_local_synchronization) endif () endfunction () macro (cooking_ingredient name) set (_cooking_args "${ARGN}") if ((_cooking_is_excluding AND (${name} IN_LIST Cooking_EXCLUDED_INGREDIENTS)) OR (_cooking_is_including AND (NOT (${name} IN_LIST Cooking_INCLUDED_INGREDIENTS)))) # Nothing. else () set (_cooking_ingredient_dir ${_cooking_dir}/ingredient/${name}) cmake_parse_arguments ( _cooking_pa "LOCAL_RECONFIGURE;LOCAL_REBUILD" "COOKING_RECIPE" "CMAKE_ARGS;COOKING_CMAKE_ARGS;EXTERNAL_PROJECT_ARGS;REQUIRES" ${_cooking_args}) _cooking_populate_ep_parameter ( EXTERNAL_PROJECT_ARGS_LIST _cooking_pa_EXTERNAL_PROJECT_ARGS PARAMETER SOURCE_DIR DEFAULT_VALUE ${_cooking_ingredient_dir}/src) _cooking_populate_ep_parameter ( EXTERNAL_PROJECT_ARGS_LIST _cooking_pa_EXTERNAL_PROJECT_ARGS PARAMETER BINARY_DIR DEFAULT_VALUE ${_cooking_ingredient_dir}/build) _cooking_populate_ep_parameter ( EXTERNAL_PROJECT_ARGS_LIST _cooking_pa_EXTERNAL_PROJECT_ARGS PARAMETER BUILD_IN_SOURCE DEFAULT_VALUE OFF) if (_cooking_build_in_source) set (_cooking_ep_binary_dir "") endif () if (Cooking_LIST_ONLY) _cooking_define_listing_targets ( NAME ${name} SOURCE_DIR ${_cooking_source_dir} RECIPE ${_cooking_pa_COOKING_RECIPE} REQUIRES ${_cooking_pa_REQUIRES}) else () _cooking_adjust_requirements ( IS_EXCLUDING ${_cooking_is_excluding} IS_INCLUDING ${_cooking_is_including} REQUIREMENTS ${_cooking_pa_REQUIRES} OUTPUT_LIST _cooking_pa_REQUIRES) _cooking_populate_ep_depends ( REQUIREMENTS ${_cooking_pa_REQUIRES}) _cooking_determine_common_cmake_args (_cooking_common_cmake_args) _cooking_populate_ep_configure_command ( IS_EXCLUDING ${_cooking_is_excluding} IS_INCLUDING ${_cooking_is_including} RECIPE ${_cooking_pa_COOKING_RECIPE} REQUIREMENTS ${_cooking_pa_REQUIRES} EXTERNAL_PROJECT_ARGS_LIST _cooking_pa_EXTERNAL_PROJECT_ARGS CMAKE_ARGS ${_cooking_common_cmake_args} ${_cooking_pa_CMAKE_ARGS} COOKING_CMAKE_ARGS ${_cooking_common_cmake_args} ${_cooking_pa_COOKING_CMAKE_ARGS}) _cooking_populate_ep_build_command (_cooking_pa_EXTERNAL_PROJECT_ARGS) _cooking_populate_ep_install_command (_cooking_pa_EXTERNAL_PROJECT_ARGS) _cooking_define_ep ( NAME ${name} RECIPE ${_cooking_pa_COOKING_RECIPE} DEPENDS ${_cooking_ep_depends} SOURCE_DIR ${_cooking_ep_source_dir} BINARY_DIR ${_cooking_ep_binary_dir} CONFIGURE_COMMAND ${_cooking_ep_configure_command} BUILD_COMMAND ${_cooking_ep_build_command} INSTALL_COMMAND ${_cooking_ep_install_command} INGREDIENT_DIR ${_cooking_ingredient_dir} STOW_DIR ${_cooking_dir}/stow CMAKE_ARGS ${_cooking_common_cmake_args} EXTERNAL_PROJECT_ARGS_LIST _cooking_pa_EXTERNAL_PROJECT_ARGS LOCAL_RECONFIGURE ${_cooking_pa_LOCAL_RECONFIGURE} LOCAL_REBUILD ${_cooking_pa_LOCAL_REBUILD}) endif () endif () endmacro () EOF cmake_cooking_args=( "-DCooking_INGREDIENTS_DIR=${ingredients_dir}" "-DCooking_RECIPE=${recipe}" ) # # Remove any `Cooking.cmake` file from the source directory. We now generate this file in the build directory, and old # copies will cause conflicts. # old_cooking_file="${source_dir}/cmake/Cooking.cmake" if [ -f "${old_cooking_file}" ]; then grep 'This file was generated by cmake-cooking' "${old_cooking_file}" > /dev/null && rm "${old_cooking_file}" fi # # Clean-up from a previous run. # if [ -e "${ingredients_ready_file}" ]; then rm "${ingredients_ready_file}" fi if [ -e "${cache_file}" ]; then rm "${cache_file}" fi if [ -d "${ingredients_dir}" -a -z "${nested}" ]; then rm -r --preserve-root "${ingredients_dir}" fi mkdir -p "${ingredients_dir}" # # Validate recipe. # if [ -n "${recipe}" ]; then if [ ! -f "${recipe}" ]; then echo "Cooking: The '${recipe}' recipe does not exist!" >&2 exit 1 fi fi # # Prepare lists of included and excluded ingredients. # if [ -n "${excluded_ingredients}" ] && [ -z "${list_only}" ]; then cmake_cooking_args+=( -DCooking_EXCLUDED_INGREDIENTS=$(printf "%s;" "${excluded_ingredients[@]}") -DCooking_INCLUDED_INGREDIENTS= ) fi if [ -n "${included_ingredients}" ] && [ -z "${list_only}" ]; then cmake_cooking_args+=( -DCooking_EXCLUDED_INGREDIENTS= -DCooking_INCLUDED_INGREDIENTS=$(printf "%s;" "${included_ingredients[@]}") ) fi # # Configure and build ingredients. # mkdir -p "${cooking_dir}"/stow touch "${cooking_dir}"/stow/.stow cd "${build_dir}" declare -a build_args if [ "${generator}" == "Ninja" ]; then build_args+=(-v) fi if [ -n "${list_only}" ]; then cmake_cooking_args+=("-DCooking_LIST_ONLY=ON") fi ${CMAKE} -DCMAKE_BUILD_TYPE="${build_type}" "${cmake_cooking_args[@]}" -G "${generator}" "${source_dir}" "${@}" if [ -n "${recipe}" ]; then ${CMAKE} --build . --target _cooking_ingredients_ready -- "${build_args[@]}" # # Report what we've done (if we're not nested). # if [ -z "${nested}" ]; then ingredients=($(find "${ingredients_dir}" -name '.cooking_ingredient_*' -printf '%f\n' | sed -r 's/\.cooking_ingredient_(.+)/\1/')) if [ -z "${list_only}" ]; then printf "\nCooking: Installed the following ingredients:\n" else printf "\nCooking: The following ingredients are necessary for this recipe:\n" fi for ingredient in "${ingredients[@]}"; do echo " - ${ingredient}" done printf '\n' fi if [ -n "${list_only}" ]; then exit 0 fi # # Configure the project, expecting all requirements satisfied. # ${CMAKE} -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON "${@}" . # # Optionally export the installed files. # if [ -n "${export_dir}" ]; then rsync "${ingredients_dir}/" "${export_dir}" -a --copy-links printf "\nCooking: Exported ingredients to ${export_dir}\n" fi fi # # Save invocation information. # cd "${initial_wd}" cat < "${memory_file}" run_previous() { "${0}" ${invoked_args[@]@Q} } EOF seastar-25.05.0/cooking_recipe.cmake000066400000000000000000000243711501510432000173050ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # # # Useful definitions for `cmake -E env`. # set (amended_PATH PATH=${Cooking_INGREDIENTS_DIR}/bin:$ENV{PATH}) set (PKG_CONFIG_PATH PKG_CONFIG_PATH=${Cooking_INGREDIENTS_DIR}/lib/pkgconfig) # # Some Autotools ingredients need this information because they don't use pkgconfig. # set (autotools_ingredients_flags CFLAGS=-I${Cooking_INGREDIENTS_DIR}/include CXXFLAGS=-I${Cooking_INGREDIENTS_DIR}/include LDFLAGS=-L${Cooking_INGREDIENTS_DIR}/lib) # # Some Autotools projects amend the info file instead of making a package-specific one. # This doesn't play nicely with GNU Stow. # # Just append the name of the ingredient, like # # ${info_dir}/gmp # set (info_dir --infodir=/share/info) # # Build-concurrency. # cmake_host_system_information ( RESULT build_concurrency_factor QUERY NUMBER_OF_LOGICAL_CORES) set (make_command make -j ${build_concurrency_factor}) # # All the ingredients. # ## ## Dependencies of dependencies of dependencies. ## cooking_ingredient (gmp EXTERNAL_PROJECT_ARGS URL https://gmplib.org/download/gmp/gmp-6.1.2.tar.bz2 URL_MD5 8ddbb26dc3bd4e2302984debba1406a5 CONFIGURE_COMMAND /configure --prefix= --srcdir= ${info_dir}/gmp BUILD_COMMAND INSTALL_COMMAND ${make_command} install) ## ## Dependencies of dependencies. ## cooking_ingredient (colm EXTERNAL_PROJECT_ARGS URL http://www.colm.net/files/colm/colm-0.13.0.6.tar.gz URL_MD5 16aaf566cbcfe9a06154e094638ac709 # This is upsetting. BUILD_IN_SOURCE YES CONFIGURE_COMMAND ./configure --prefix= BUILD_COMMAND INSTALL_COMMAND ${make_command} install) cooking_ingredient (libpciaccess EXTERNAL_PROJECT_ARGS URL https://www.x.org/releases/individual/lib/libpciaccess-0.13.4.tar.gz URL_MD5 cc1fad87da60682af1d5fa43a5da45a4 CONFIGURE_COMMAND /configure --prefix= --srcdir= BUILD_COMMAND INSTALL_COMMAND ${make_command} install) cooking_ingredient (nettle REQUIRES gmp EXTERNAL_PROJECT_ARGS URL https://ftp.gnu.org/gnu/nettle/nettle-3.4.tar.gz URL_MD5 dc0f13028264992f58e67b4e8915f53d CONFIGURE_COMMAND /configure --prefix= --srcdir= --libdir=/lib ${info_dir}/nettle ${autotools_ingredients_flags} BUILD_COMMAND INSTALL_COMMAND ${make_command} install) # A dependency of DPDK. cooking_ingredient (numactl EXTERNAL_PROJECT_ARGS URL https://github.com/numactl/numactl/releases/download/v2.0.12/numactl-2.0.12.tar.gz URL_MD5 2ba9777d78bfd7d408a387e53bc33ebc CONFIGURE_COMMAND /configure --prefix= --srcdir= BUILD_COMMAND INSTALL_COMMAND ${make_command} install) cooking_ingredient (zlib EXTERNAL_PROJECT_ARGS URL https://github.com/madler/zlib/releases/download/v1.2.13/zlib-1.2.13.tar.gz URL_MD5 9b8aa094c4e5765dabf4da391f00d15c CONFIGURE_COMMAND /configure --prefix= BUILD_COMMAND INSTALL_COMMAND ${make_command} install) ## ## Private and private/public dependencies. ## if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") set (boost_toolset gcc) elseif (CMAKE_CXX_COMPILER_ID STREQUAL "Clang") set (boost_toolset clang) else () set(boost_toolset "cook_cxx") endif () set (boost_user_config "${CMAKE_CURRENT_BINARY_DIR}/cook_boost.jam") if (CMAKE_C_FLAGS) string (JOIN " " boost_cflags "${CMAKE_C_FLAGS}") endif () if (CMAKE_CXX_FLAGS) string (JOIN " " boost_cxxflags "${CMAKE_CXX_FLAGS}") endif () file (WRITE "${boost_user_config}" "using ${boost_toolset}" " : " # toolset's version " : ${CMAKE_CXX_COMPILER}" " : ${boost_cflags}${boost_cxxflags} -std=c++${CMAKE_CXX_STANDARD}" " ;\n") cooking_ingredient (Boost EXTERNAL_PROJECT_ARGS URL https://archives.boost.io/release/1.81.0/source/boost_1_81_0.tar.bz2 URL_HASH SHA256=71feeed900fbccca04a3b4f2f84a7c217186f28a940ed8b7ed4725986baf99fa PATCH_COMMAND ./bootstrap.sh --prefix= --with-libraries=atomic,chrono,date_time,filesystem,program_options,system,test,thread --with-toolset=${boost_toolset} CONFIGURE_COMMAND BUILD_COMMAND INSTALL_COMMAND ${CMAKE_COMMAND} -E chdir ./b2 -j ${build_concurrency_factor} --layout=system --build-dir= --user-config=${boost_user_config} install toolset=${boost_toolset} variant=debug link=shared threading=multi hardcode-dll-paths=true dll-path=/lib) cooking_ingredient (GnuTLS REQUIRES gmp nettle EXTERNAL_PROJECT_ARGS URL https://www.gnupg.org/ftp/gcrypt/gnutls/v3.5/gnutls-3.5.18.tar.xz URL_MD5 c2d93d305ecbc55939bc2a8ed4a76a3d CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env ${PKG_CONFIG_PATH} /configure --prefix= --srcdir= --with-included-unistring --with-included-libtasn1 --without-p11-kit # https://lists.gnupg.org/pipermail/gnutls-help/2016-February/004085.html --disable-non-suiteb-curves --disable-doc ${autotools_ingredients_flags} BUILD_COMMAND INSTALL_COMMAND ${make_command} install) cooking_ingredient (Protobuf REQUIRES zlib EXTERNAL_PROJECT_ARGS URL https://github.com/protocolbuffers/protobuf/releases/download/v21.11//protobuf-cpp-3.21.11.tar.gz URL_MD5 e2cf711edae444bba0da199bc034e031 CMAKE_ARGS -Dprotobuf_BUILD_TESTS=OFF) cooking_ingredient (hwloc REQUIRES numactl libpciaccess EXTERNAL_PROJECT_ARGS URL https://download.open-mpi.org/release/hwloc/v2.2/hwloc-2.2.0.tar.gz URL_MD5 762c93cdca3249eed4627c4a160192bd CONFIGURE_COMMAND /configure --prefix= --srcdir= BUILD_COMMAND INSTALL_COMMAND ${make_command} install) cooking_ingredient (ragel REQUIRES colm EXTERNAL_PROJECT_ARGS URL http://www.colm.net/files/ragel/ragel-6.10.tar.gz URL_MD5 748cae8b50cffe9efcaa5acebc6abf0d PATCH_COMMAND sed -i "s/ CHAR_M/ SCHAR_M/g" ragel/common.cpp # This is upsetting. BUILD_IN_SOURCE YES CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env ${amended_PATH} ./configure --prefix= # This is even more upsetting. ${autotools_ingredients_flags} BUILD_COMMAND INSTALL_COMMAND ${make_command} install) cooking_ingredient (lksctp-tools EXTERNAL_PROJECT_ARGS URL https://sourceforge.net/projects/lksctp/files/lksctp-tools/lksctp-tools-1.0.16.tar.gz URL_MD5 708bb0b5a6806ad6e8d13c55b067518e PATCH_COMMAND ./bootstrap CONFIGURE_COMMAND /configure --prefix= --srcdir= BUILD_COMMAND INSTALL_COMMAND ${make_command} install) cooking_ingredient (yaml-cpp CMAKE_ARGS -DYAML_CPP_BUILD_TESTS=OFF -DYAML_BUILD_SHARED_LIBS=ON EXTERNAL_PROJECT_ARGS URL https://github.com/jbeder/yaml-cpp/archive/yaml-cpp-0.7.0.tar.gz URL_MD5 74d646a3cc1b5d519829441db96744f0) ## ## Public dependencies. ## cooking_ingredient (c-ares EXTERNAL_PROJECT_ARGS URL https://github.com/c-ares/c-ares/releases/download/v1.32.3/c-ares-1.32.3.tar.gz URL_MD5 d5ed5967bc3a74191c051ce81ffe02fc CONFIGURE_COMMAND /configure --prefix= --srcdir= BUILD_COMMAND INSTALL_COMMAND ${make_command} install) set (dpdk_args --default-library=static -Dc_args="-Wno-error" -Denable_docs=false -Denable_apps=dpdk-testpmd -Dtests=false -Dexamples= -Dmbuf_refcnt_atomic=false -Dmax_memseg_lists=8192 -Ddisable_drivers="net/softnic,net/bonding" -Ddisable_libs="jobstats,power,port,table,pipeline,member" -Dcpu_instruction_set=${Seastar_DPDK_MACHINE}) if (CMAKE_BUILD_TYPE STREQUAL Debug) list (APPEND dpdk_args -Dbuildtype=debug) endif () find_program (Meson_EXECUTABLE meson) if (NOT Meson_EXECUTABLE) message (FATAL_ERROR "Cooking: Meson is required!") endif () find_program (Ninja_EXECUTABLE ninja) if (NOT Ninja_EXECUTABLE) message (FATAL_ERROR "Cooking: Ninja is required!") endif () cooking_ingredient (dpdk EXTERNAL_PROJECT_ARGS SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/dpdk CONFIGURE_COMMAND env CC=${CMAKE_C_COMPILER} ${Meson_EXECUTABLE} setup ${dpdk_args} --prefix= BUILD_COMMAND ${Ninja_EXECUTABLE} -C INSTALL_COMMAND ${Ninja_EXECUTABLE} -C install) cooking_ingredient (fmt EXTERNAL_PROJECT_ARGS URL https://github.com/fmtlib/fmt/archive/9.1.0.tar.gz URL_MD5 21fac48cae8f3b4a5783ae06b443973a CMAKE_ARGS -DFMT_DOC=OFF -DFMT_TEST=OFF) cooking_ingredient (liburing EXTERNAL_PROJECT_ARGS URL https://github.com/axboe/liburing/archive/liburing-2.1.tar.gz URL_MD5 78f13d9861b334b9a9ca0d12cf2a6d3c CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env CC=${CMAKE_C_COMPILER} CXX=${CMAKE_CXX_COMPILER} /configure --prefix= BUILD_COMMAND BUILD_BYPRODUCTS "/src/liburing.a" BUILD_IN_SOURCE ON INSTALL_COMMAND ${make_command} -s install) cooking_ingredient (lz4 EXTERNAL_PROJECT_ARGS URL https://github.com/lz4/lz4/archive/v1.8.0.tar.gz URL_MD5 6247bf0e955899969d1600ff34baed6b # This is upsetting. BUILD_IN_SOURCE ON CONFIGURE_COMMAND BUILD_COMMAND INSTALL_COMMAND ${make_command} PREFIX= install) seastar-25.05.0/debug/000077500000000000000000000000001501510432000144025ustar00rootroot00000000000000seastar-25.05.0/debug/task-latency.stap000077500000000000000000000010631501510432000176750ustar00rootroot00000000000000#!/usr/bin/stap # usage: task_latency.stap process_name latency_threshold_ms global start_time probe process(@1).mark("reactor_run_tasks_single_start") { start_time[tid()] = gettimeofday_us() } probe process(@1).mark("reactor_run_tasks_single_end") { delete start_time[tid()] } probe timer.profile { if ([tid()] in start_time) { now = gettimeofday_us() start = start_time[tid()] if ((now - start) > $2 * 1000) { printf("detected tasks running for >%sms\n", @2) print_usyms(ubacktrace()) } } } seastar-25.05.0/demos/000077500000000000000000000000001501510432000144235ustar00rootroot00000000000000seastar-25.05.0/demos/CMakeLists.txt000066400000000000000000000057041501510432000171710ustar00rootroot00000000000000# # This file is open source software, licensed to you under the terms # of the Apache License, Version 2.0 (the "License"). See the NOTICE file # distributed with this work for additional information regarding copyright # ownership. You may not use this file except in compliance with the License. # # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # # Copyright (C) 2018 Scylladb, Ltd. # # Logical target for all demos. add_custom_target (demos) macro (seastar_add_demo name) set (args ${ARGN}) cmake_parse_arguments ( parsed_args "" "" "SOURCES" ${args}) set (target demo_${name}) add_executable (${target} ${parsed_args_SOURCES}) target_include_directories (${target} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries (${target} PRIVATE Boost::program_options seastar_private) set_target_properties (${target} PROPERTIES OUTPUT_NAME ${name}_demo) add_dependencies (demos ${target}) endmacro () seastar_add_demo (block_discard SOURCES block_discard_demo.cc) if (${Seastar_API_LEVEL} GREATER_EQUAL 3) seastar_add_demo (coroutines SOURCES coroutines_demo.cc) endif () seastar_add_demo (hello-world SOURCES hello-world.cc) seastar_add_demo (websocket_server SOURCES websocket_server_demo.cc) seastar_add_demo (echo SOURCES echo_demo.cc) seastar_add_demo (ip SOURCES ip_demo.cc) seastar_add_demo (line_count SOURCES line_count_demo.cc) seastar_add_demo (l3 SOURCES l3_demo.cc) seastar_add_demo (rpc SOURCES rpc_demo.cc) seastar_add_demo (scheduling_group SOURCES scheduling_group_demo.cc) seastar_add_demo (tcp SOURCES tcp_demo.cc) seastar_add_demo (tcp_sctp_client SOURCES tcp_sctp_client_demo.cc) seastar_add_demo (tcp_sctp_server SOURCES tcp_sctp_server_demo.cc) seastar_add_demo (tls_echo_server SOURCES tls_echo_server.hh tls_echo_server_demo.cc) seastar_add_demo (tls_simple_client SOURCES tls_echo_server.hh tls_simple_client_demo.cc) seastar_add_demo (udp_client SOURCES udp_client_demo.cc) seastar_add_demo (udp_server SOURCES udp_server_demo.cc) seastar_add_demo (udp_zero_copy SOURCES udp_zero_copy_demo.cc) seastar_add_demo (sharded_parameter SOURCES sharded_parameter_demo.cc) seastar_add_demo (file SOURCES file_demo.cc) seastar_add_demo (tutorial_examples SOURCES tutorial_examples.cc) seastar_add_demo (http_client SOURCES http_client_demo.cc) if (Seastar_MODULE) add_executable (hello_cxx_module) target_sources (hello_cxx_module PRIVATE hello-cxx-module.cc) target_link_libraries (hello_cxx_module PRIVATE seastar-module) endif () seastar-25.05.0/demos/block_discard_demo.cc000066400000000000000000000045541501510432000205310ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include #include #include #include #include #include #include using namespace seastar; namespace bpo = boost::program_options; struct file_test { file_test(file&& f) : f(std::move(f)) {} file f; semaphore sem = { 0 }; }; int main(int ac, char** av) { app_template app; app.add_options() ("dev", bpo::value(), "e.g. --dev /dev/sdb") ; return app.run_deprecated(ac, av, [&app] { static constexpr auto max = 10000; auto&& config = app.configuration(); auto filepath = config["dev"].as(); return open_file_dma(filepath, open_flags::rw | open_flags::create).then([] (file f) { auto ft = new file_test{std::move(f)}; // Discard asynchronously, siganl when done. (void)ft->f.stat().then([ft] (struct stat st) mutable { SEASTAR_ASSERT(S_ISBLK(st.st_mode)); auto offset = 0; auto length = max * 4096; return ft->f.discard(offset, length).then([ft] () mutable { ft->sem.signal(); }); }); // Wait and exit. (void)ft->sem.wait().then([ft] () mutable { return ft->f.flush(); }).then([ft] () mutable { std::cout << "done\n"; delete ft; engine().exit(0); }); }); }); } seastar-25.05.0/demos/coroutines_demo.cc000066400000000000000000000046031501510432000201330ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2019 ScyllaDB Ltd. */ #include #include #include #include #include #include #include #include #include #include int main(int argc, char** argv) { seastar::app_template app; app.run(argc, argv, [] () -> seastar::future<> { std::cout << "this is a completely useless program\nplease stand by...\n"; auto f = seastar::coroutine::parallel_for_each(std::vector { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, [] (int i) -> seastar::future<> { co_await seastar::sleep(std::chrono::seconds(i)); std::cout << i << "\n"; }); auto file = co_await seastar::open_file_dma("useless_file.txt", seastar::open_flags::create | seastar::open_flags::wo); auto out = co_await seastar::make_file_output_stream(file); seastar::sstring str = "nothing to see here, move along now\n"; co_await out.write(str); co_await out.flush(); co_await out.close(); bool all_exist = true; std::vector filenames = { "useless_file.txt", "non_existing" }; co_await seastar::coroutine::parallel_for_each(filenames, [&all_exist] (const seastar::sstring& name) -> seastar::future<> { all_exist &= co_await seastar::file_exists(name); }); std::cout << (all_exist ? "" : "not ") << "all files exist" << std::endl; co_await std::move(f); std::cout << "done\n"; }); } seastar-25.05.0/demos/echo_demo.cc000066400000000000000000000070061501510432000166570ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. * */ #include #include #include #include #include #include #include #include using namespace seastar; using namespace net; void dump_packet(const packet& p) { std::cout << "rx:"; auto f = p.frag(0); for (unsigned i = 0; i < std::min(f.size, size_t(30)); ++i) { char x[4]; std::sprintf(x, " %02x", uint8_t(f.base[i])); std::cout << x; } std::cout << "\n"; } future<> echo_packet(net::qp& netif, packet p) { auto f = p.frag(0); if (f.size < sizeof(eth_hdr)) { return make_ready_future<>(); } auto pos = 0; auto eh = reinterpret_cast(f.base + pos); pos += sizeof(*eh); *eh = ntoh(*eh); if (eh->eth_proto != 0x0800) { return make_ready_future<>(); } auto iph = reinterpret_cast(f.base + pos); *iph = ntoh(*iph); pos += iph->ihl * 4; if (iph->ver != 4 || iph->ihl < 5 || iph->ip_proto != 1) { return make_ready_future<>(); } auto ip_len = iph->len; auto icmph = reinterpret_cast(f.base + pos); if (icmph->type != icmp_hdr::msg_type::echo_request) { return make_ready_future<>(); } auto icmp_len = ip_len - iph->ihl * 4; std::swap(eh->src_mac, eh->dst_mac); std::swap(iph->src_ip, iph->dst_ip); icmph->type = icmp_hdr::msg_type::echo_reply; icmph->csum = 0; *iph = hton(*iph); *eh = hton(*eh); icmph->csum = ip_checksum(icmph, icmp_len); iph->csum = 0; iph->csum = ip_checksum(iph, iph->ihl * 4); return netif.send(std::move(p)); } #ifdef SEASTAR_HAVE_DPDK void usage() { std::cout<<"Usage: echotest [-virtio|-dpdk]"< dnet; net::qp* vnet; native_stack_options opts; #ifdef SEASTAR_HAVE_DPDK if (ac > 2) { usage(); return -1; } if ((ac == 1) || !std::strcmp(av[1], "-virtio")) { dnet = create_virtio_net_device(opts.virtio_opts, opts.lro); } else if (!std::strcmp(av[1], "-dpdk")) { dnet = create_dpdk_net_device(); } else { usage(); return -1; } #else dnet = create_virtio_net_device(opts.virtio_opts, opts.lro); #endif // SEASTAR_HAVE_DPDK auto qp = dnet->init_local_queue(opts, 0); vnet = qp.get(); dnet->set_local_queue(std::move(qp)); future<> rx_done = dnet->receive([vnet] (packet p) { return echo_packet(*vnet, std::move(p)); }); engine().run(); return 0; } seastar-25.05.0/demos/file_demo.cc000066400000000000000000000235651501510432000166700ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright 2020 ScyllaDB */ // Demonstration of seastar::with_file #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace seastar; constexpr size_t aligned_size = 4096; future<> verify_data_file(file& f, temporary_buffer& rbuf, const temporary_buffer& wbuf) { return f.dma_read(0, rbuf.get_write(), aligned_size).then([&rbuf, &wbuf] (size_t count) { SEASTAR_ASSERT(count == aligned_size); fmt::print(" verifying {} bytes\n", count); SEASTAR_ASSERT(!memcmp(rbuf.get(), wbuf.get(), aligned_size)); }); } future open_data_file(sstring meta_filename, temporary_buffer& rbuf) { fmt::print(" retrieving data filename from {}\n", meta_filename); return with_file(open_file_dma(meta_filename, open_flags::ro), [&rbuf] (file& f) { return f.dma_read(0, rbuf.get_write(), aligned_size).then([&rbuf] (size_t count) { SEASTAR_ASSERT(count == aligned_size); auto data_filename = sstring(rbuf.get()); fmt::print(" opening {}\n", data_filename); return open_file_dma(data_filename, open_flags::ro); }); }); } future<> demo_with_file() { fmt::print("Demonstrating with_file():\n"); return tmp_dir::do_with_thread([] (tmp_dir& t) { auto rnd = std::mt19937(std::random_device()()); auto dist = std::uniform_int_distribution(0, std::numeric_limits::max()); auto wbuf = temporary_buffer::aligned(aligned_size, aligned_size); sstring meta_filename = (t.get_path() / "meta_file").native(); sstring data_filename = (t.get_path() / "data_file").native(); // `with_file` is used to create/open `filename` just around the call to `dma_write` auto write_to_file = [] (const sstring filename, temporary_buffer& wbuf) { auto count = with_file(open_file_dma(filename, open_flags::rw | open_flags::create), [&wbuf] (file& f) { return f.dma_write(0, wbuf.get(), aligned_size); }).get(); SEASTAR_ASSERT(count == aligned_size); }; // print the data_filename into the write buffer std::fill(wbuf.get_write(), wbuf.get_write() + aligned_size, 0); std::copy(data_filename.cbegin(), data_filename.cend(), wbuf.get_write()); // and write it to `meta_filename` fmt::print(" writing \"{}\" into {}\n", data_filename, meta_filename); write_to_file(meta_filename, wbuf); // now write some random data into data_filename fmt::print(" writing random data into {}\n", data_filename); std::generate(wbuf.get_write(), wbuf.get_write() + aligned_size, [&dist, &rnd] { return dist(rnd); }); write_to_file(data_filename, wbuf); // verify the data via meta_filename fmt::print(" verifying data...\n"); auto rbuf = temporary_buffer::aligned(aligned_size, aligned_size); with_file(open_data_file(meta_filename, rbuf), [&rbuf, &wbuf] (file& f) { return verify_data_file(f, rbuf, wbuf); }).get(); }); } future<> demo_with_file_close_on_failure() { fmt::print("\nDemonstrating with_file_close_on_failure():\n"); return tmp_dir::do_with_thread([] (tmp_dir& t) { auto rnd = std::mt19937(std::random_device()()); auto dist = std::uniform_int_distribution(0, std::numeric_limits::max()); auto wbuf = temporary_buffer::aligned(aligned_size, aligned_size); sstring meta_filename = (t.get_path() / "meta_file").native(); sstring data_filename = (t.get_path() / "data_file").native(); // with_file_close_on_failure will close the opened file only if // `make_file_output_stream` returns an error. Otherwise, in the error-free path, // the opened file is moved to `file_output_stream` that in-turn closes it // when the stream is closed. auto make_output_stream = [] (std::string_view filename) { return with_file_close_on_failure(open_file_dma(filename, open_flags::rw | open_flags::create), [] (file f) { return make_file_output_stream(std::move(f), aligned_size); }); }; // writes the buffer one byte at a time, to demonstrate output stream auto write_to_stream = [] (output_stream& o, const temporary_buffer& wbuf) { return seastar::do_for_each(wbuf, [&o] (char c) { return o.write(&c, 1); }).finally([&o] { return o.close(); }); }; // print the data_filename into the write buffer std::fill(wbuf.get_write(), wbuf.get_write() + aligned_size, 0); std::copy(data_filename.cbegin(), data_filename.cend(), wbuf.get_write()); // and write it to `meta_filename` fmt::print(" writing \"{}\" into {}\n", data_filename, meta_filename); // with_file_close_on_failure will close the opened file only if // `make_file_output_stream` returns an error. Otherwise, in the error-free path, // the opened file is moved to `file_output_stream` that in-turn closes it // when the stream is closed. output_stream o = make_output_stream(meta_filename).get(); write_to_stream(o, wbuf).get(); // now write some random data into data_filename fmt::print(" writing random data into {}\n", data_filename); std::generate(wbuf.get_write(), wbuf.get_write() + aligned_size, [&dist, &rnd] { return dist(rnd); }); o = make_output_stream(data_filename).get(); write_to_stream(o, wbuf).get(); // verify the data via meta_filename fmt::print(" verifying data...\n"); auto rbuf = temporary_buffer::aligned(aligned_size, aligned_size); with_file(open_data_file(meta_filename, rbuf), [&rbuf, &wbuf] (file& f) { return verify_data_file(f, rbuf, wbuf); }).get(); }); } static constexpr size_t half_aligned_size = aligned_size / 2; future<> demo_with_io_intent() { fmt::print("\nDemonstrating demo_with_io_intent():\n"); return tmp_dir::do_with_thread([] (tmp_dir& t) { sstring filename = (t.get_path() / "testfile.tmp").native(); auto f = open_file_dma(filename, open_flags::rw | open_flags::create).get(); auto rnd = std::mt19937(std::random_device()()); auto dist = std::uniform_int_distribution(0, std::numeric_limits::max()); auto wbuf = temporary_buffer::aligned(aligned_size, aligned_size); fmt::print(" writing random data into {}\n", filename); std::generate(wbuf.get_write(), wbuf.get_write() + aligned_size, [&dist, &rnd] { return dist(rnd); }); f.dma_write(0, wbuf.get(), aligned_size).get(); auto wbuf_n = temporary_buffer::aligned(aligned_size, aligned_size); fmt::print(" starting to overwrite {} with other random data in two steps\n", filename); std::generate(wbuf_n.get_write(), wbuf_n.get_write() + aligned_size, [&dist, &rnd] { return dist(rnd); }); io_intent intent; auto f1 = f.dma_write(0, wbuf_n.get(), half_aligned_size); auto f2 = f.dma_write(half_aligned_size, wbuf_n.get() + half_aligned_size, half_aligned_size, &intent); fmt::print(" cancel the 2nd overwriting\n"); intent.cancel(); fmt::print(" wait for overwriting IOs to complete\n"); f1.get(); bool cancelled = false; try { f2.get(); // The file::dma_write doesn't preemt, but if it // suddenly will, the 2nd write will pass before // the intent would be cancelled fmt::print(" 2nd write won the race with cancellation\n"); } catch (cancelled_error& ex) { cancelled = true; } fmt::print(" verifying data...\n"); auto rbuf = allocate_aligned_buffer(aligned_size, aligned_size); f.dma_read(0, rbuf.get(), aligned_size).get(); // First part of the buffer must coincide with the overwritten data SEASTAR_ASSERT(!memcmp(rbuf.get(), wbuf_n.get(), half_aligned_size)); if (cancelled) { // Second part -- with the old data ... SEASTAR_ASSERT(!memcmp(rbuf.get() + half_aligned_size, wbuf.get() + half_aligned_size, half_aligned_size)); } else { // ... or with new if the cancellation didn't happen SEASTAR_ASSERT(!memcmp(rbuf.get() + half_aligned_size, wbuf.get() + half_aligned_size, half_aligned_size)); } }); } int main(int ac, char** av) { app_template app; return app.run(ac, av, [] { return demo_with_file().then([] { return demo_with_file_close_on_failure().then([] { return demo_with_io_intent(); }); }); }); } seastar-25.05.0/demos/hello-cxx-module.cc000066400000000000000000000020161501510432000201170ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2023 ScyllaDB Ltd. */ import seastar; using namespace seastar; logger applog("app"); int main(int argc, char** argv) { seastar::app_template app; app.run(argc, argv, [] () -> future<> { applog.info("Hello world!"); return make_ready_future<>(); }); } seastar-25.05.0/demos/hello-world.cc000066400000000000000000000021501501510432000171600ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2022 ScyllaDB Ltd. */ #include #include #include using namespace seastar; logger applog("app"); int main(int argc, char** argv) { seastar::app_template app; app.run(argc, argv, [] () -> future<> { applog.info("Hello world!"); return make_ready_future<>(); }); } seastar-25.05.0/demos/http_client_demo.cc000066400000000000000000000106261501510432000202600ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2022 ScyllaDB Ltd. */ #include #include #include #include #include #include #include #include #include #include #include #include using namespace seastar; namespace bpo = boost::program_options; struct printer { future> operator() (temporary_buffer buf) { if (buf.empty()) { return make_ready_future>(stop_consuming(std::move(buf))); } fmt::print("{}", sstring(buf.get(), buf.size())); return make_ready_future>(continue_consuming()); } }; int main(int ac, char** av) { app_template app; app.add_options() ("https", bpo::bool_switch(), "Use HTTPS on port 443 (if off -- use HTTP on port 80)") ("host", bpo::value(), "Host to connect") ("path", bpo::value(), "Path to query upon") ("method", bpo::value()->default_value("GET"), "Method to use") ("file", bpo::value(), "File to get body from (no body if missing)") ; return app.run(ac, av, [&] { auto&& config = app.configuration(); auto host = config["host"].as(); auto path = config["path"].as(); auto method = config["method"].as(); auto body = config.count("file") == 0 ? std::string("") : config["file"].as(); auto https = config["https"].as(); return seastar::async([=] { net::hostent e = net::dns::get_host_by_name(host, net::inet_address::family::INET).get(); std::unique_ptr cln; if (https) { auto certs = ::make_shared(); certs->set_system_trust().get(); fmt::print("{} {}:443{}\n", method, e.addr_list.front(), path); cln = std::make_unique(socket_address(e.addr_list.front(), 443), std::move(certs), host); } else { fmt::print("{} {}:80{}\n", method, e.addr_list.front(), path); cln = std::make_unique(socket_address(e.addr_list.front(), 80)); } auto req = http::request::make(method, host, path); if (body != "") { future f = open_file_dma(body, open_flags::ro); req.write_body("txt", [ f = std::move(f) ] (output_stream&& out) mutable { return seastar::async([f = std::move(f), out = std::move(out)] () mutable { auto in = make_file_input_stream(f.get()); copy(in, out).get(); out.flush().get(); out.close().get(); in.close().get(); }); }); } cln->make_request(std::move(req), [] (const http::reply& rep, input_stream&& in) { fmt::print("Reply status {}\n--------8<--------\n", rep._status); return seastar::async([in = std::move(in)] () mutable { in.consume(printer{}).get(); in.close().get(); }); }).get(); cln->close().get(); }).handle_exception([](auto ep) { fmt::print("Error: {}", ep); }); }); } seastar-25.05.0/demos/ip_demo.cc000066400000000000000000000026211501510432000163470ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include #include #include #include #include #include using namespace seastar; using namespace net; int main(int ac, char** av) { native_stack_options opts; auto vnet = create_virtio_net_device(opts.virtio_opts, opts.lro); vnet->set_local_queue(vnet->init_local_queue(opts, 0)); interface netif(std::move(vnet)); ipv4 inet(&netif); inet.set_host_address(ipv4_address("192.168.122.2")); engine().run(); return 0; } seastar-25.05.0/demos/l3_demo.cc000066400000000000000000000031411501510432000162530ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include #include #include #include using namespace seastar; using namespace net; void dump_arp_packets(l3_protocol& proto) { // FIXME: ignored future (void)proto.receive([] (packet p, ethernet_address from) { std::cout << "seen arp packet\n"; return make_ready_future<>(); }, [] (forward_hash& out_hash_data, packet& p, size_t off) {return false;}); } int main(int ac, char** av) { native_stack_options opts; auto vnet = create_virtio_net_device(opts.virtio_opts, opts.lro); interface netif(std::move(vnet)); l3_protocol arp(&netif, eth_protocol_num::arp, []{ return std::optional(); }); dump_arp_packets(arp); engine().run(); return 0; } seastar-25.05.0/demos/line_count_demo.cc000066400000000000000000000055271501510432000201060ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2015 Cloudius Systems, Ltd. */ // Demonstration of file_input_stream. Don't expect stellar performance // since no read-ahead or caching is done yet. #include #include #include #include #include #include #include using namespace seastar; struct reader { public: reader(file f) : is(make_file_input_stream(std::move(f), file_input_stream_options{1 << 16, 1})) { } input_stream is; size_t count = 0; // for input_stream::consume(): using unconsumed_remainder = std::optional>; future operator()(temporary_buffer data) { if (data.empty()) { return make_ready_future(std::move(data)); } else { count += std::count(data.begin(), data.end(), '\n'); // FIXME: last line without \n? return make_ready_future(); } } }; int main(int ac, char** av) { app_template app; namespace bpo = boost::program_options; app.add_positional_options({ { "file", bpo::value(), "File to process", 1 }, }); return app.run(ac, av, [&app] { auto fname = app.configuration()["file"].as(); return open_file_dma(fname, open_flags::ro).then([] (file f) { auto r = make_shared(std::move(f)); return r->is.consume(*r).then([r] { fmt::print("{:d} lines\n", r->count); return r->is.close().then([r] {}); }); }).then_wrapped([] (future<> f) -> future { try { f.get(); return make_ready_future(0); } catch (std::exception& ex) { std::cout << ex.what() << "\n"; return make_ready_future(1); } catch (...) { std::cout << "unknown exception\n"; return make_ready_future(1); } }); }); } seastar-25.05.0/demos/rpc_demo.cc000066400000000000000000000350711501510432000165300ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright 2015 Cloudius Systems */ #include #include #include #include #include #include #include #include #include #include using namespace seastar; struct serializer { }; template inline void write_arithmetic_type(Output& out, T v) { static_assert(std::is_arithmetic_v, "must be arithmetic type"); return out.write(reinterpret_cast(&v), sizeof(T)); } template inline T read_arithmetic_type(Input& in) { static_assert(std::is_arithmetic_v, "must be arithmetic type"); T v; in.read(reinterpret_cast(&v), sizeof(T)); return v; } template inline void write(serializer, Output& output, int32_t v) { return write_arithmetic_type(output, v); } template inline void write(serializer, Output& output, uint32_t v) { return write_arithmetic_type(output, v); } template inline void write(serializer, Output& output, int64_t v) { return write_arithmetic_type(output, v); } template inline void write(serializer, Output& output, uint64_t v) { return write_arithmetic_type(output, v); } template inline void write(serializer, Output& output, double v) { return write_arithmetic_type(output, v); } template inline int32_t read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline uint32_t read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline uint64_t read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline uint64_t read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline double read(serializer, Input& input, rpc::type) { return read_arithmetic_type(input); } template inline void write(serializer, Output& out, const sstring& v) { write_arithmetic_type(out, uint32_t(v.size())); out.write(v.c_str(), v.size()); } template inline sstring read(serializer, Input& in, rpc::type) { auto size = read_arithmetic_type(in); sstring ret = uninitialized_string(size); in.read(ret.data(), size); return ret; } namespace bpo = boost::program_options; using namespace std::chrono_literals; class mycomp : public rpc::compressor::factory { const sstring _name = "LZ4"; public: virtual const sstring& supported() const override { fmt::print("supported called\n"); return _name; } virtual std::unique_ptr negotiate(sstring feature, bool is_server) const override { fmt::print("negotiate called with {}\n", feature); return feature == _name ? std::make_unique() : nullptr; } }; int main(int ac, char** av) { app_template app; app.add_options() ("port", bpo::value()->default_value(10000), "RPC server port") ("server", bpo::value(), "Server address") ("compress", bpo::value()->default_value(false), "Compress RPC traffic"); std::cout << "start "; rpc::protocol myrpc(serializer{}); static std::unique_ptr::server> server; static std::unique_ptr::client> client; static double x = 30.0; static logger log("rpc_demo"); myrpc.set_logger(&log); return app.run_deprecated(ac, av, [&] { auto&& config = app.configuration(); uint16_t port = config["port"].as(); bool compress = config["compress"].as(); static mycomp mc; auto test1 = myrpc.register_handler(1, [x = 0](int i) mutable { fmt::print("test1 count {:d} got {:d}\n", ++x, i); }); auto test2 = myrpc.register_handler(2, [](int a, int b){ fmt::print("test2 got {:d} {:d}\n", a, b); return make_ready_future(a+b); }); auto test3 = myrpc.register_handler(3, [](double x){ fmt::print("test3 got {:f}\n", x); return std::make_unique(sin(x)); }); auto test4 = myrpc.register_handler(4, [](){ fmt::print("test4 throw!\n"); throw std::runtime_error("exception!"); }); auto test5 = myrpc.register_handler(5, [](){ fmt::print("test5 no wait\n"); return rpc::no_wait; }); auto test6 = myrpc.register_handler(6, [](const rpc::client_info& info, int x){ fmt::print("test6 client {}, {:d}\n", inet_ntoa(info.addr.as_posix_sockaddr_in().sin_addr), x); }); auto test8 = myrpc.register_handler(8, [](){ fmt::print("test8 sleep for 2 sec\n"); return sleep(2s); }); auto test13 = myrpc.register_handler(13, [](){ fmt::print("test13 sleep for 1 msec\n"); return sleep(1ms); }); auto test_message_to_big = myrpc.register_handler(14, [](sstring payload){ fmt::print("test message to bit, should not get here"); }); if (config.count("server")) { std::cout << "client" << std::endl; auto test7 = myrpc.make_client(7); auto test9 = myrpc.make_client(9); // do not send optional auto test9_1 = myrpc.make_client(9); // send optional auto test9_2 = myrpc.make_client(9); // send more data than handler expects auto test10 = myrpc.make_client(10); // receive less then replied auto test10_1 = myrpc.make_client> ()>(10); // receive all auto test11 = myrpc.make_client>> ()>(11); // receive more then replied auto test12 = myrpc.make_client(12); // large payload vs. server limits auto test_nohandler = myrpc.make_client(100000000); // non existing verb auto test_nohandler_nowait = myrpc.make_client(100000000); // non existing verb, no_wait call rpc::client_options co; if (compress) { co.compressor_factory = &mc; } client = std::make_unique::client>(myrpc, co, ipv4_addr{config["server"].as()}); auto f = test8(*client, 1500ms).then_wrapped([](future<> f) { try { f.get(); printf("test8 should not get here!\n"); } catch (rpc::timeout_error&) { printf("test8 timeout!\n"); } }); for (auto i = 0; i < 100; i++) { fmt::print("iteration={:d}\n", i); (void)test1(*client, 5).then([] (){ fmt::print("test1 ended\n");}); (void)test2(*client, 1, 2).then([] (int r) { fmt::print("test2 got {:d}\n", r); }); (void)test3(*client, x).then([](double x) { fmt::print("sin={:f}\n", x); }); (void)test4(*client).then_wrapped([](future<> f) { try { f.get(); fmt::print("test4 your should not see this!\n"); } catch (std::runtime_error& x){ fmt::print("test4 {}\n", x.what()); } }); (void)test5(*client).then([] { fmt::print("test5 no wait ended\n"); }); (void)test6(*client, 1).then([] { fmt::print("test6 ended\n"); }); (void)test7(*client, 5, 6).then([] (long r) { fmt::print("test7 got {:d}\n", r); }); (void)test9(*client, 1, 2).then([] (long r) { fmt::print("test9 got {:d}\n", r); }); (void)test9_1(*client, 1, 2, 3).then([] (long r) { fmt::print("test9.1 got {:d}\n", r); }); (void)test9_2(*client, 1, 2, 3, 4).then([] (long r) { fmt::print("test9.2 got {:d}\n", r); }); (void)test10(*client).then([] (long r) { fmt::print("test10 got {:d}\n", r); }); (void)test10_1(*client).then([] (rpc::tuple r) { fmt::print("test10_1 got {:d} and {:d}\n", std::get<0>(r), std::get<1>(r)); }); (void)test11(*client).then([] (rpc::tuple > r) { fmt::print("test11 got {:d} and {:d}\n", std::get<0>(r), bool(std::get<1>(r))); }); (void)test_nohandler(*client).then_wrapped([](future<> f) { try { f.get(); fmt::print("test_nohandler your should not see this!\n"); } catch (rpc::unknown_verb_error& x){ fmt::print("test_nohandle no such verb\n"); } catch (...) { fmt::print("incorrect exception!\n"); } }); (void)test_nohandler_nowait(*client); auto c = make_lw_shared(); (void)test13(*client, *c).then_wrapped([](future<> f) { try { f.get(); fmt::print("test13 shold not get here\n"); } catch(rpc::canceled_error&) { fmt::print("test13 canceled\n"); } catch(...) { fmt::print("test13 wrong exception\n"); } }); c->cancel(); (void)test13(*client, *c).then_wrapped([](future<> f) { try { f.get(); fmt::print("test13 shold not get here\n"); } catch(rpc::canceled_error&) { fmt::print("test13 canceled\n"); } catch(...) { fmt::print("test13 wrong exception\n"); } }); (void)sleep(500us).then([c] { c->cancel(); }); (void)test_message_to_big(*client, uninitialized_string(10'000'001)).then_wrapped([](future<> f) { try { f.get(); fmt::print("test message to big shold not get here\n"); } catch(std::runtime_error& err) { fmt::print("test message to big get error {}\n", err.what()); } catch(...) { fmt::print("test message to big wrong exception\n"); } }); } // delay a little for a time-sensitive test (void)sleep(400ms).then([test12] () mutable { // server is configured for 10MB max, throw 25MB worth of requests at it. auto now = rpc::rpc_clock_type::now(); return parallel_for_each(std::views::iota(0, 25), [test12, now] (int idx) mutable { return test12(*client, 100, uninitialized_string(1'000'000)).then([idx, now] { auto later = rpc::rpc_clock_type::now(); auto delta = std::chrono::duration_cast(later - now); fmt::print("idx {:d} completed after {:d} ms\n", idx, delta.count()); }); }).then([now] { auto later = rpc::rpc_clock_type::now(); auto delta = std::chrono::duration_cast(later - now); fmt::print("test12 completed after {:d} ms (should be ~300)\n", delta.count()); }); }); (void)f.finally([] { return sleep(1s).then([] { return client->stop().then([] { return engine().exit(0); }); }); }); } else { std::cout << "server on port " << port << std::endl; myrpc.register_handler(7, [](long a, long b) mutable { auto p = make_lw_shared>(); auto t = make_lw_shared>(); fmt::print("test7 got {:d} {:d}\n", a, b); auto f = p->get_future().then([a, b, t] { fmt::print("test7 calc res\n"); return a - b; }); t->set_callback([p = std::move(p)] () mutable { p->set_value(); }); t->arm(1s); return f; }); myrpc.register_handler(9, [] (long a, long b, rpc::optional c) { long r = 2; fmt::print("test9 got {:d} {:d} ", a, b); if (c) { fmt::print("{:d}", c.value()); r++; } fmt::print("\n"); return r; }); myrpc.register_handler(10, [] { fmt::print("test 10\n"); return make_ready_future>(rpc::tuple(1, 2)); }); myrpc.register_handler(11, [] { fmt::print("test 11\n"); return 1ul; }); myrpc.register_handler(12, [] (int sleep_ms, sstring payload) { return sleep(std::chrono::milliseconds(sleep_ms)).then([] { return make_ready_future<>(); }); }); rpc::resource_limits limits; limits.bloat_factor = 1; limits.basic_request_size = 0; limits.max_memory = 10'000'000; rpc::server_options so; if (compress) { so.compressor_factory = &mc; } server = std::make_unique::server>(myrpc, so, ipv4_addr{port}, limits); } }); } seastar-25.05.0/demos/scheduling_group_demo.cc000066400000000000000000000162311501510432000213020ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2016 Scylla DB Ltd */ #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace seastar; using namespace std::chrono_literals; template future<> compute_intensive_task(Duration duration, unsigned& counter, Func func) { auto end = std::chrono::steady_clock::now() + duration; while (std::chrono::steady_clock::now() < end) { func(); } ++counter; return make_ready_future<>(); } future<> heavy_task(unsigned& counter) { return compute_intensive_task(1ms, counter, [] { static thread_local double x = 1; x = std::exp(x) / 3; }); } future<> light_task(unsigned& counter) { return compute_intensive_task(100us, counter, [] { static thread_local double x = 0.1; x = std::log(x + 1); }); } future<> medium_task(unsigned& counter) { return compute_intensive_task(400us, counter, [] { static thread_local double x = 0.1; x = std::cos(x); }); } using done_func = std::function; future<> run_compute_intensive_tasks(seastar::scheduling_group sg, done_func done, unsigned concurrency, unsigned& counter, std::function (unsigned& counter)> task) { return seastar::async([task = std::move(task), sg, concurrency, done, &counter] () mutable { while (!done()) { parallel_for_each(std::views::iota(0u, concurrency), [task, sg, &counter] (unsigned i) mutable { return with_scheduling_group(sg, [task, &counter] { return task(counter); }); }).get(); thread::maybe_yield(); } }); } future<> run_compute_intensive_tasks_in_threads(seastar::scheduling_group sg, done_func done, unsigned concurrency, unsigned& counter, std::function (unsigned& counter)> task) { auto attr = seastar::thread_attributes(); attr.sched_group = sg; return parallel_for_each(std::views::iota(0u, concurrency), [attr, done, &counter, task] (unsigned i) { return seastar::async(attr, [done, &counter, task] { while (!done()) { task(counter).get(); thread::maybe_yield(); } }); }); } future<> run_with_duty_cycle(float utilization, std::chrono::steady_clock::duration period, done_func done, std::function (done_func done)> task) { return seastar::async([=] { bool duty_toggle = true; auto t0 = std::chrono::steady_clock::now(); condition_variable cv; timer<> tmr_on([&] { duty_toggle = true; cv.signal(); }); timer<> tmr_off([&] { duty_toggle = false; }); tmr_on.arm(t0, period); tmr_off.arm(t0 + std::chrono::duration_cast(period * utilization), period); auto combined_done = [&] { return done() || !duty_toggle; }; while (!done()) { while (!combined_done()) { task(std::cref(combined_done)).get(); thread::maybe_yield(); } cv.wait([&] { return done() || duty_toggle; }).get(); } tmr_on.cancel(); tmr_off.cancel(); }); } #include template auto var_fn(T& var) { return [&var] { return var; }; } int main(int ac, char** av) { app_template app; return app.run(ac, av, [] { return seastar::async([] { auto sg100 = seastar::create_scheduling_group("sg100", 100).get(); auto ksg100 = seastar::defer([&] () noexcept { seastar::destroy_scheduling_group(sg100).get(); }); auto sg20 = seastar::create_scheduling_group("sg20", 20).get(); auto ksg20 = seastar::defer([&] () noexcept { seastar::destroy_scheduling_group(sg20).get(); }); auto sg50 = seastar::create_scheduling_group("sg50", 50).get(); auto ksg50 = seastar::defer([&] () noexcept { seastar::destroy_scheduling_group(sg50).get(); }); bool done = false; auto end = timer<>([&done] { done = true; }); end.arm(10s); unsigned ctr100 = 0, ctr20 = 0, ctr50 = 0; fmt::print("running three scheduling groups with 100% duty cycle each:\n"); when_all( run_compute_intensive_tasks(sg100, var_fn(done), 5, ctr100, heavy_task), run_compute_intensive_tasks(sg20, var_fn(done), 3, ctr20, light_task), run_compute_intensive_tasks_in_threads(sg50, var_fn(done), 2, ctr50, medium_task) ).get(); fmt::print("{:10} {:15} {:10} {:12} {:8}\n", "shares", "task_time (us)", "executed", "runtime (ms)", "vruntime"); fmt::print("{:10d} {:15d} {:10d} {:12d} {:8.2f}\n", 100, 1000, ctr100, ctr100 * 1000 / 1000, ctr100 * 1000 / 1000 / 100.); fmt::print("{:10d} {:15d} {:10d} {:12d} {:8.2f}\n", 20, 100, ctr20, ctr20 * 100 / 1000, ctr20 * 100 / 1000 / 20.); fmt::print("{:10d} {:15d} {:10d} {:12d} {:8.2f}\n", 50, 400, ctr50, ctr50 * 400 / 1000, ctr50 * 400 / 1000 / 50.); fmt::print("\n"); fmt::print("running two scheduling groups with 100%/50% duty cycles (period=1s:\n"); unsigned ctr100_2 = 0, ctr50_2 = 0; done = false; end.arm(10s); when_all( run_compute_intensive_tasks(sg50, var_fn(done), 5, ctr50_2, heavy_task), run_with_duty_cycle(0.5, 1s, var_fn(done), [=, &ctr100_2] (done_func done) { return run_compute_intensive_tasks(sg100, done, 4, ctr100_2, heavy_task); }) ).get(); fmt::print("{:10} {:10} {:15} {:10} {:12} {:8}\n", "shares", "duty", "task_time (us)", "executed", "runtime (ms)", "vruntime"); fmt::print("{:10d} {:10d} {:15d} {:10d} {:12d} {:8.2f}\n", 100, 50, 1000, ctr100_2, ctr100_2 * 1000 / 1000, ctr100_2 * 1000 / 1000 / 100.); fmt::print("{:10d} {:10d} {:15d} {:10d} {:12d} {:8.2f}\n", 50, 100, 400, ctr50_2, ctr50_2 * 1000 / 1000, ctr50_2 * 1000 / 1000 / 50.); return 0; }); }); } seastar-25.05.0/demos/sharded_parameter_demo.cc000066400000000000000000000053601501510432000214140ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright 2020 ScyllaDB */ // Demonstration of seastar::sharded_parameter #include #include #include #include #include // This is some service that we wish to run on all shards. class service_one { int _capacity = 7; public: // Pretend that this int is some important resource. int get_capacity() const { return _capacity; } }; // Another service that we run on all shards, that depends on service_one. class service_two { int _resource_allocation; public: service_two(service_one& s1, int resource_allocation) : _resource_allocation(resource_allocation) {} int get_resource_allocation() const { return _resource_allocation; } }; int main(int ac, char** av) { seastar::app_template app; return app.run(ac, av, [&] { // sharded<> setup code is typically run in a seastar::thread return seastar::async([&] { // Launch service_one seastar::sharded s1; s1.start().get(); auto stop_s1 = seastar::deferred_stop(s1); auto calculate_half_capacity = [] (service_one& s1) { return s1.get_capacity() / 2; }; // Launch service_two, passing it per-shard dependencies from s1 seastar::sharded s2; // Start s2, passing two parameters to service_two's constructor s2.start( // Each service_two instance will get a reference to a service_one instance on the same shard std::ref(s1), // This calculation will be performed on each shard seastar::sharded_parameter(calculate_half_capacity, std::ref(s1)) ).get(); auto stop_s2 = seastar::deferred_stop(s2); s2.invoke_on_all([] (service_two& s2) { SEASTAR_ASSERT(s2.get_resource_allocation() == 3); }).get(); }); }); } seastar-25.05.0/demos/tcp_demo.cc000066400000000000000000000045441501510432000165330ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include #include #include #include #include using namespace seastar; using namespace net; struct tcp_test { ipv4& inet; using tcp = net::tcp; tcp::listener _listener; struct connection { tcp::connection tcp_conn; explicit connection(tcp::connection tc) : tcp_conn(std::move(tc)) {} void run() { // Read packets and echo back in the background. (void)tcp_conn.wait_for_data().then([this] { auto p = tcp_conn.read(); if (!p.len()) { tcp_conn.close_write(); return; } fmt::print("read {:d} bytes\n", p.len()); (void)tcp_conn.send(std::move(p)); run(); }); } }; tcp_test(ipv4& inet) : inet(inet), _listener(inet.get_tcp().listen(10000)) {} void run() { // Run all connections in the background. (void)_listener.accept().then([this] (tcp::connection conn) { (new connection(std::move(conn)))->run(); run(); }); } }; int main(int ac, char** av) { native_stack_options opts; auto vnet = create_virtio_net_device(opts.virtio_opts, opts.lro); interface netif(std::move(vnet)); ipv4 inet(&netif); inet.set_host_address(ipv4_address("192.168.122.2")); tcp_test tt(inet); (void)engine().when_started().then([&tt] { tt.run(); }); engine().run(); } seastar-25.05.0/demos/tcp_sctp_client_demo.cc000066400000000000000000000247171501510432000211260ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include #include #include #include #include using namespace seastar; using namespace net; using namespace std::chrono_literals; static int rx_msg_size = 4_KiB; static int tx_msg_total_size = 100_MiB; static int tx_msg_size = 4_KiB; static int tx_msg_nr = tx_msg_total_size / tx_msg_size; static std::string str_txbuf(tx_msg_size, 'X'); class client; distributed clients; transport protocol = transport::TCP; class client { private: static constexpr unsigned _pings_per_connection = 10000; unsigned _total_pings; unsigned _concurrent_connections; ipv4_addr _server_addr; std::string _test; lowres_clock::time_point _earliest_started = lowres_clock::time_point::max(); lowres_clock::time_point _latest_finished = lowres_clock::time_point::min(); size_t _processed_bytes; unsigned _num_reported; public: class connection { connected_socket _fd; input_stream _read_buf; output_stream _write_buf; size_t _bytes_read = 0; size_t _bytes_write = 0; public: connection(connected_socket&& fd) : _fd(std::move(fd)) , _read_buf(_fd.input()) , _write_buf(_fd.output()) {} future<> do_read() { return _read_buf.read_exactly(rx_msg_size).then([this] (temporary_buffer buf) { _bytes_read += buf.size(); if (buf.size() == 0) { return make_ready_future(); } else { return do_read(); } }); } future<> do_write(int end) { if (end == 0) { return make_ready_future(); } return _write_buf.write(str_txbuf).then([this] { _bytes_write += tx_msg_size; return _write_buf.flush(); }).then([this, end] { return do_write(end - 1); }); } future<> ping(int times) { return _write_buf.write("ping").then([this] { return _write_buf.flush(); }).then([this, times] { return _read_buf.read_exactly(4).then([this, times] (temporary_buffer buf) { if (buf.size() != 4) { fmt::print(std::cerr, "illegal packet received: {}\n", buf.size()); return make_ready_future(); } auto str = std::string(buf.get(), buf.size()); if (str != "pong") { fmt::print(std::cerr, "illegal packet received: {}\n", buf.size()); return make_ready_future(); } if (times > 0) { return ping(times - 1); } else { return make_ready_future(); } }); }); } future rxrx() { return _write_buf.write("rxrx").then([this] { return _write_buf.flush(); }).then([this] { return do_write(tx_msg_nr).then([this] { return _write_buf.close(); }).then([this] { return make_ready_future(_bytes_write); }); }); } future txtx() { return _write_buf.write("txtx").then([this] { return _write_buf.flush(); }).then([this] { return do_read().then([this] { return make_ready_future(_bytes_read); }); }); } }; future<> ping_test(connection *conn) { auto started = lowres_clock::now(); return conn->ping(_pings_per_connection).then([started] { auto finished = lowres_clock::now(); (void)clients.invoke_on(0, &client::ping_report, started, finished); }); } future<> rxrx_test(connection *conn) { auto started = lowres_clock::now(); return conn->rxrx().then([started] (size_t bytes) { auto finished = lowres_clock::now(); (void)clients.invoke_on(0, &client::rxtx_report, started, finished, bytes); }); } future<> txtx_test(connection *conn) { auto started = lowres_clock::now(); return conn->txtx().then([started] (size_t bytes) { auto finished = lowres_clock::now(); (void)clients.invoke_on(0, &client::rxtx_report, started, finished, bytes); }); } void ping_report(lowres_clock::time_point started, lowres_clock::time_point finished) { if (_earliest_started > started) _earliest_started = started; if (_latest_finished < finished) _latest_finished = finished; if (++_num_reported == _concurrent_connections) { auto elapsed = _latest_finished - _earliest_started; auto usecs = std::chrono::duration_cast(elapsed).count(); auto secs = static_cast(usecs) / static_cast(1000 * 1000); fmt::print(std::cout, "========== ping ============\n"); fmt::print(std::cout, "Server: {}\n", _server_addr); fmt::print(std::cout,"Connections: {}\n", _concurrent_connections); fmt::print(std::cout, "Total PingPong: {}\n", _total_pings); fmt::print(std::cout, "Total Time(Secs): {}\n", secs); fmt::print(std::cout, "Requests/Sec: {}\n", static_cast(_total_pings) / secs); (void)clients.stop().then([] { engine().exit(0); }); } } void rxtx_report(lowres_clock::time_point started, lowres_clock::time_point finished, size_t bytes) { if (_earliest_started > started) _earliest_started = started; if (_latest_finished < finished) _latest_finished = finished; _processed_bytes += bytes; if (++_num_reported == _concurrent_connections) { auto elapsed = _latest_finished - _earliest_started; auto usecs = std::chrono::duration_cast(elapsed).count(); auto secs = static_cast(usecs) / static_cast(1000 * 1000); fmt::print(std::cout, "========== {} ============\n", _test); fmt::print(std::cout, "Server: {}\n", _server_addr); fmt::print(std::cout, "Connections: {}\n", _concurrent_connections); fmt::print(std::cout, "Bytes Received(MiB): {}\n", _processed_bytes / 1_MiB); fmt::print(std::cout, "Total Time(Secs): {}\n", secs); fmt::print(std::cout, "Bandwidth(Gbits/Sec): {}\n", static_cast((_processed_bytes * 8)) / (1000 * 1000 * 1000) / secs); (void)clients.stop().then([] { engine().exit(0); }); } } future<> start(ipv4_addr server_addr, std::string test, unsigned ncon) { _server_addr = server_addr; _concurrent_connections = ncon * smp::count; _total_pings = _pings_per_connection * _concurrent_connections; _test = test; for (unsigned i = 0; i < ncon; i++) { socket_address local = socket_address(::sockaddr_in{AF_INET, INADDR_ANY, {0}}); (void)connect(make_ipv4_address(server_addr), local, protocol).then([this, test] (connected_socket fd) { auto conn = new connection(std::move(fd)); (void)(this->*tests.at(test))(conn).then_wrapped([conn] (auto&& f) { delete conn; try { f.get(); } catch (std::exception& ex) { fmt::print(std::cerr, "request error: {}\n", ex.what()); } }); }); } return make_ready_future(); } future<> stop() { return make_ready_future(); } typedef future<> (client::*test_fn)(connection *conn); static const std::map tests; }; namespace bpo = boost::program_options; int main(int ac, char ** av) { app_template app; app.add_options() ("server", bpo::value()->required(), "Server address") ("test", bpo::value()->default_value("ping"), "test type(ping | rxrx | txtx)") ("conn", bpo::value()->default_value(16), "nr connections per cpu") ("proto", bpo::value()->default_value("tcp"), "transport protocol tcp|sctp") ; return app.run_deprecated(ac, av, [&app] { auto&& config = app.configuration(); auto server = config["server"].as(); auto test = config["test"].as(); auto ncon = config["conn"].as(); auto proto = config["proto"].as(); if (proto == "tcp") { protocol = transport::TCP; } else if (proto == "sctp") { protocol = transport::SCTP; } else { fmt::print(std::cerr, "Error: --proto=tcp|sctp\n"); return engine().exit(1); } if (!client::tests.count(test)) { fmt::print(std::cerr, "Error: -test=ping | rxrx | txtx\n"); return engine().exit(1); } (void)clients.start().then([server, test, ncon] () { return clients.invoke_on_all(&client::start, ipv4_addr{server}, test, ncon); }); }); } const std::map client::tests = { {"ping", &client::ping_test}, {"rxrx", &client::rxrx_test}, {"txtx", &client::txtx_test}, }; seastar-25.05.0/demos/tcp_sctp_server_demo.cc000066400000000000000000000175001501510432000211460ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright 2014 Cloudius Systems */ #include #include #include #include #include #include #include #include #include #include "../apps/lib/stop_signal.hh" using namespace seastar; static std::string str_ping{"ping"}; static std::string str_txtx{"txtx"}; static std::string str_rxrx{"rxrx"}; static std::string str_pong{"pong"}; static std::string str_unknow{"unknow cmd"}; static int tx_msg_total_size = 100 * 1024 * 1024; static int tx_msg_size = 4 * 1024; static int tx_msg_nr = tx_msg_total_size / tx_msg_size; static int rx_msg_size = 4 * 1024; static std::string str_txbuf(tx_msg_size, 'X'); static bool enable_tcp = false; static bool enable_sctp = false; class tcp_server { std::vector _tcp_listeners; std::vector _sctp_listeners; std::optional> _tcp_task; std::optional> _sctp_task; public: future<> listen(ipv4_addr addr) { if (enable_tcp) { listen_options lo; lo.proto = transport::TCP; lo.reuse_address = true; _tcp_listeners.push_back(seastar::listen(make_ipv4_address(addr), lo)); _tcp_task = do_accepts(_tcp_listeners); } if (enable_sctp) { listen_options lo; lo.proto = transport::SCTP; lo.reuse_address = true; _sctp_listeners.push_back(seastar::listen(make_ipv4_address(addr), lo)); _sctp_task = do_accepts(_sctp_listeners); } return make_ready_future<>(); } future<> stop() { co_await do_stop(_tcp_listeners, _tcp_task); co_await do_stop(_sctp_listeners, _sctp_task); } future<> do_accepts(std::vector& listeners) { int which = listeners.size() - 1; // Accept in the background. return listeners[which].accept().then([this, &listeners] (accept_result ar) mutable { connected_socket fd = std::move(ar.connection); socket_address addr = std::move(ar.remote_address); auto conn = new connection(*this, std::move(fd), addr); (void)conn->process().then_wrapped([conn] (auto&& f) { delete conn; try { f.get(); } catch (std::exception& ex) { std::cout << "request error " << ex.what() << "\n"; } }); return do_accepts(listeners); }).then_wrapped([] (auto&& f) { try { f.get(); } catch (std::exception& ex) { std::cout << "accept failed: " << ex.what() << "\n"; } }); } static future<> do_stop(std::vector& listeners, std::optional>& task) { for (auto& listener : listeners) { listener.abort_accept(); } if (auto fut = std::exchange(task, {})) { co_await std::move(*fut); } } class connection { connected_socket _fd; input_stream _read_buf; output_stream _write_buf; public: connection(tcp_server& server, connected_socket&& fd, socket_address addr) : _fd(std::move(fd)) , _read_buf(_fd.input()) , _write_buf(_fd.output()) {} future<> process() { return read(); } future<> read() { if (_read_buf.eof()) { return make_ready_future(); } // Expect 4 bytes cmd from client size_t n = 4; return _read_buf.read_exactly(n).then([this] (temporary_buffer buf) { if (buf.size() == 0) { return make_ready_future(); } auto cmd = std::string(buf.get(), buf.size()); // pingpong test if (cmd == str_ping) { return _write_buf.write(str_pong).then([this] { return _write_buf.flush(); }).then([this] { return this->read(); }); // server tx test } else if (cmd == str_txtx) { return tx_test(); // server tx test } else if (cmd == str_rxrx) { return rx_test(); // unknow test } else { return _write_buf.write(str_unknow).then([this] { return _write_buf.flush(); }).then([] { return make_ready_future(); }); } }); } future<> do_write(int end) { if (end == 0) { return make_ready_future<>(); } return _write_buf.write(str_txbuf).then([this] { return _write_buf.flush(); }).then([this, end] { return do_write(end - 1); }); } future<> tx_test() { return do_write(tx_msg_nr).then([this] { return _write_buf.close(); }).then([] { return make_ready_future<>(); }); } future<> do_read() { return _read_buf.read_exactly(rx_msg_size).then([this] (temporary_buffer buf) { if (buf.size() == 0) { return make_ready_future(); } else { return do_read(); } }); } future<> rx_test() { return do_read().then([] { return make_ready_future<>(); }); } }; }; namespace bpo = boost::program_options; int main(int ac, char** av) { app_template app; app.add_options() ("port", bpo::value()->default_value(10000), "TCP server port") ("tcp", bpo::value()->default_value("yes"), "tcp listen") ("sctp", bpo::value()->default_value("no"), "sctp listen") ; return app.run(ac, av, [&] { return async([&app] { seastar_apps_lib::stop_signal stop_signal; auto&& config = app.configuration(); uint16_t port = config["port"].as(); enable_tcp = config["tcp"].as() == "yes"; enable_sctp = config["sctp"].as() == "yes"; if (!enable_tcp && !enable_sctp) { fmt::print(std::cerr, "Error: no protocols enabled. Use \"--tcp yes\" and/or \"--sctp yes\" to enable\n"); return 1; } distributed server; server.start().get(); auto stop_server = deferred_stop(server); // Start listening in the background. server.invoke_on_all(&tcp_server::listen, ipv4_addr{port}).get(); fmt::print("Seastar TCP server listening on port {} ...\n", port); stop_signal.wait().get(); return 0; }); }); } seastar-25.05.0/demos/tls_echo_server.hh000066400000000000000000000113251501510432000201340ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright 2017 ScyllaDB */ #pragma once #include #include #include #include #include #include #include #include using namespace seastar; struct streams { connected_socket s; input_stream in; output_stream out; streams(connected_socket cs) : s(std::move(cs)), in(s.input()), out(s.output()) {} }; class echoserver { server_socket _socket; shared_ptr _certs; seastar::gate _gate; bool _stopped = false; bool _verbose = false; public: echoserver(bool verbose = false) : _certs(make_shared(make_shared())) , _verbose(verbose) {} future<> listen(socket_address addr, sstring crtfile, sstring keyfile, tls::client_auth ca = tls::client_auth::NONE) { _certs->set_client_auth(ca); return _certs->set_x509_key_file(crtfile, keyfile, tls::x509_crt_format::PEM).then([this, addr] { ::listen_options opts; opts.reuse_address = true; _socket = tls::listen(_certs, addr, opts); // Listen in background. (void)repeat([this] { if (_stopped) { return make_ready_future(stop_iteration::yes); } return with_gate(_gate, [this] { return _socket.accept().then([this](accept_result ar) { ::connected_socket s = std::move(ar.connection); socket_address a = std::move(ar.remote_address); if (_verbose) { std::cout << "Got connection from "<< a << std::endl; } auto strms = make_lw_shared(std::move(s)); return repeat([strms, this]() { return strms->in.read().then([this, strms](temporary_buffer buf) { if (buf.empty()) { if (_verbose) { std::cout << "EOM" << std::endl; } return make_ready_future(stop_iteration::yes); } sstring tmp(buf.begin(), buf.end()); if (_verbose) { std::cout << "Read " << tmp.size() << "B" << std::endl; } return strms->out.write(tmp).then([strms]() { return strms->out.flush(); }).then([] { return make_ready_future(stop_iteration::no); }); }); }).then([strms]{ return strms->out.close(); }).handle_exception([](auto ep) { }).finally([this, strms]{ if (_verbose) { std::cout << "Ending session" << std::endl; } return strms->in.close(); }); }).handle_exception([this](auto ep) { if (!_stopped) { std::cerr << "Error: " << ep << std::endl; } }).then([this] { return make_ready_future(_stopped ? stop_iteration::yes : stop_iteration::no); }); }); }); return make_ready_future(); }); } future<> stop() { _stopped = true; _socket.abort_accept(); return _gate.close(); } }; seastar-25.05.0/demos/tls_echo_server_demo.cc000066400000000000000000000056301501510432000211300ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright 2015 Cloudius Systems */ #include #include #include #include #include #include #include #include "../apps/lib/stop_signal.hh" #include "tls_echo_server.hh" using namespace seastar; namespace bpo = boost::program_options; int main(int ac, char** av) { app_template app; app.add_options() ("port", bpo::value()->default_value(10000), "Server port") ("address", bpo::value()->default_value("127.0.0.1"), "Server address") ("cert,c", bpo::value()->required(), "Server certificate file") ("key,k", bpo::value()->required(), "Certificate key") ("verbose,v", bpo::value()->default_value(false)->implicit_value(true), "Verbose") ; return app.run(ac, av, [&app] { return async([&app] { seastar_apps_lib::stop_signal stop_signal; auto&& config = app.configuration(); uint16_t port = config["port"].as(); auto crt = config["cert"].as(); auto key = config["key"].as(); auto addr = config["address"].as(); auto verbose = config["verbose"].as(); std::cout << "Starting..." << std::endl; net::inet_address a = net::dns::resolve_name(addr).get(); ipv4_addr ia(a, port); seastar::sharded server; server.start(verbose).get(); auto stop_server = deferred_stop(server); try { server.invoke_on_all(&echoserver::listen, socket_address(ia), sstring(crt), sstring(key), tls::client_auth::NONE).get(); } catch (...) { std::cerr << "Error: " << std::current_exception() << std::endl; return 1; } std::cout << "TLS echo server running at " << addr << ":" << port << std::endl; stop_signal.wait().get(); return 0; }); }); } seastar-25.05.0/demos/tls_simple_client_demo.cc000066400000000000000000000134631501510432000214560ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright 2015 Cloudius Systems */ #include #include #include #include #include #include #include #include #include "tls_echo_server.hh" using namespace seastar; namespace bpo = boost::program_options; int main(int ac, char** av) { app_template app; app.add_options() ("port", bpo::value()->default_value(10000), "Remote port") ("address", bpo::value()->default_value("127.0.0.1"), "Remote address") ("trust,t", bpo::value(), "Trust store") ("msg,m", bpo::value(), "Message to send") ("bytes,b", bpo::value()->default_value(512), "Use random bytes of length as message") ("iterations,i", bpo::value()->default_value(1), "Repeat X times") ("read-response,r", bpo::value()->default_value(true)->implicit_value(true), "Read echoed message") ("verbose,v", bpo::value()->default_value(false)->implicit_value(true), "Verbose operation") ("check-name,c", bpo::value()->default_value(false)->implicit_value(true), "Check server name") ("server-name,s", bpo::value(), "Expected server name") ; return app.run_deprecated(ac, av, [&] { auto&& config = app.configuration(); uint16_t port = config["port"].as(); auto addr = config["address"].as(); auto n = config["bytes"].as(); auto i = config["iterations"].as(); auto do_read = config["read-response"].as(); auto verbose = config["verbose"].as(); auto check = config["check-name"].as(); std::cout << "Starting..." << std::endl; auto certs = ::make_shared(); auto f = make_ready_future(); if (config.count("trust")) { f = certs->set_x509_trust_file(config["trust"].as(), tls::x509_crt_format::PEM); } seastar::shared_ptr msg; if (config.count("msg")) { msg = seastar::make_shared(config["msg"].as()); } else { msg = seastar::make_shared(uninitialized_string(n)); for (size_t i = 0; i < n; ++i) { (*msg)[i] = '0' + char(::rand() % 30); } } sstring server_name; if (config.count("server-name")) { server_name = config["server-name"].as(); } if (verbose) { std::cout << "Msg (" << msg->size() << "B):" << std::endl << *msg << std::endl; } return f.then([=]() { return net::dns::get_host_by_name(addr).then([=](net::hostent e) { ipv4_addr ia(e.addr_list.front(), port); tls::tls_options options; if (check) { options.server_name = server_name.empty() ? e.names.front() : server_name; } return tls::connect(certs, ia, options).then([=](::connected_socket s) { auto strms = ::make_lw_shared(std::move(s)); auto range = std::views::iota(size_t(0), i); return do_for_each(range, [=](auto) { auto f = strms->out.write(*msg); if (!do_read) { return strms->out.close().then([f = std::move(f)]() mutable { return std::move(f); }); } return f.then([=]() { return strms->out.flush().then([=] { return strms->in.read_exactly(msg->size()).then([=](temporary_buffer buf) { sstring tmp(buf.begin(), buf.end()); if (tmp != *msg) { std::cerr << "Got garbled message!" << std::endl; if (verbose) { std::cout << "Got (" << tmp.size() << ") :" << std::endl << tmp << std::endl; } throw std::runtime_error("Got garbled message!"); } }); }); }); }).then([strms, do_read]{ return do_read ? strms->out.close() : make_ready_future<>(); }).finally([strms]{ return strms->in.close(); }); }); }).handle_exception([](auto ep) { std::cerr << "Error: " << ep << std::endl; }); }).finally([] { engine().exit(0); }); }); } seastar-25.05.0/demos/tutorial_examples.cc000066400000000000000000000104011501510432000204670ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2020 ScyllaDB. */ #include #include #include #include #include seastar::future<> service_loop() { return seastar::do_with(seastar::listen(seastar::make_ipv4_address({1234})), [] (auto& listener) { return seastar::keep_doing([&listener] () { return listener.accept().then( [] (seastar::accept_result res) { std::cout << "Accepted connection from " << res.remote_address << "\n"; }); }); }); } const char* canned_response = "Seastar is the future!\n"; seastar::future<> service_loop_2() { seastar::listen_options lo; lo.reuse_address = true; return seastar::do_with(seastar::listen(seastar::make_ipv4_address({1234}), lo), [] (auto& listener) { return seastar::keep_doing([&listener] () { return listener.accept().then( [] (seastar::accept_result res) { auto s = std::move(res.connection); auto out = s.output(); return seastar::do_with(std::move(s), std::move(out), [] (auto& s, auto& out) { return out.write(canned_response).then([&out] { return out.close(); }); }); }); }); }); } seastar::future<> handle_connection_3(seastar::connected_socket s, seastar::socket_address a) { auto out = s.output(); auto in = s.input(); return do_with(std::move(s), std::move(out), std::move(in), [] (auto& s, auto& out, auto& in) { return seastar::repeat([&out, &in] { return in.read().then([&out] (auto buf) { if (buf) { return out.write(std::move(buf)).then([&out] { return out.flush(); }).then([] { return seastar::stop_iteration::no; }); } else { return seastar::make_ready_future( seastar::stop_iteration::yes); } }); }).then([&out] { return out.close(); }); }); } seastar::future<> service_loop_3() { seastar::listen_options lo; lo.reuse_address = true; return seastar::do_with(seastar::listen(seastar::make_ipv4_address({1234}), lo), [] (auto& listener) { return seastar::keep_doing([&listener] () { return listener.accept().then( [] (seastar::accept_result res) { // Note we ignore, not return, the future returned by // handle_connection(), so we do not wait for one // connection to be handled before accepting the next one. (void)handle_connection_3(std::move(res.connection), std::move(res.remote_address)).handle_exception( [] (std::exception_ptr ep) { fmt::print(stderr, "Could not handle connection: {}\n", ep); }); }); }); }); } #include int main(int ac, char** av) { seastar::app_template app; return app.run(ac, av, [] { std::cout << "This is the tutorial examples demo. It is not running anything but rather makes sure the tutorial examples compile" << std::endl; return seastar::make_ready_future<>(); }); } seastar-25.05.0/demos/udp_client_demo.cc000066400000000000000000000052151501510432000200670ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include #include #include #include using namespace seastar; using namespace net; using namespace std::chrono_literals; class client { private: udp_channel _chan; uint64_t n_sent {}; uint64_t n_received {}; uint64_t n_failed {}; timer<> _stats_timer; public: void start(ipv4_addr server_addr) { std::cout << "Sending to " << server_addr << std::endl; _chan = make_unbound_datagram_channel(AF_INET); _stats_timer.set_callback([this] { std::cout << "Out: " << n_sent << " pps, \t"; std::cout << "Err: " << n_failed << " pps, \t"; std::cout << "In: " << n_received << " pps" << std::endl; n_sent = 0; n_received = 0; n_failed = 0; }); _stats_timer.arm_periodic(1s); // Run sender in background. (void)keep_doing([this, server_addr] { return _chan.send(server_addr, "hello!\n") .then_wrapped([this] (auto&& f) { try { f.get(); n_sent++; } catch (...) { n_failed++; } }); }); // Run receiver in background. (void)keep_doing([this] { return _chan.receive().then([this] (auto) { n_received++; }); }); } }; namespace bpo = boost::program_options; int main(int ac, char ** av) { client _client; app_template app; app.add_options() ("server", bpo::value(), "Server address") ; return app.run_deprecated(ac, av, [&_client, &app] { auto&& config = app.configuration(); _client.start(config["server"].as()); }); } seastar-25.05.0/demos/udp_server_demo.cc000066400000000000000000000061121501510432000201140ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include #include #include #include #include #include "../apps/lib/stop_signal.hh" using namespace seastar; using namespace net; using namespace std::chrono_literals; class udp_server { private: std::optional _chan; std::optional> _task; timer<> _stats_timer; uint64_t _n_sent {}; public: void start(uint16_t port) { ipv4_addr listen_addr{port}; _chan = make_bound_datagram_channel(listen_addr); _stats_timer.set_callback([this] { std::cout << "Out: " << _n_sent << " pps" << std::endl; _n_sent = 0; }); _stats_timer.arm_periodic(1s); // Run server in background. _task = keep_doing([this] { return _chan->receive().then([this] (datagram dgram) { return _chan->send(dgram.get_src(), std::move(dgram.get_data())).then([this] { _n_sent++; }); }); }); } future<> stop() { if (_chan) { _chan->shutdown_input(); _chan->shutdown_output(); } if (_task) { co_await _task->handle_exception([](std::exception_ptr e) { std::cerr << "exception in udp_server: " << e << "\n"; }); } } }; namespace bpo = boost::program_options; int main(int ac, char ** av) { app_template app; app.add_options() ("port", bpo::value()->default_value(10000), "UDP server port") ; return app.run(ac, av, [&] { return async([&app] { seastar_apps_lib::stop_signal stop_signal; auto&& config = app.configuration(); uint16_t port = config["port"].as(); sharded server; if (engine().net().has_per_core_namespace()) { server.start().get(); } else { server.start_single().get(); } auto stop_server = deferred_stop(server); server.invoke_on_all(&udp_server::start, port).get(); std::cout << "Seastar UDP server listening on port " << port << " ...\n"; stop_signal.wait().get(); }); }); } seastar-25.05.0/demos/udp_zero_copy_demo.cc000066400000000000000000000122021501510432000206140ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2014 Cloudius Systems, Ltd. */ #include #include #include #include #include #include #include #include #include #include #include #include using namespace seastar; using namespace net; using namespace std::chrono_literals; namespace bpo = boost::program_options; template typename Duration::rep to_seconds(Duration d) { return std::chrono::duration_cast(d).count(); } class server { private: udp_channel _chan; timer<> _stats_timer; uint64_t _n_sent {}; size_t _chunk_size; bool _copy; std::vector _packets; std::unique_ptr> _out; steady_clock_type::time_point _last; sstring _key; size_t _packet_size = 8*KB; char* _mem; size_t _mem_size; std::mt19937 _rnd; std::random_device _randem_dev; std::uniform_int_distribution _chunk_distribution; private: char* next_chunk() { return _mem + _chunk_distribution(_rnd); } public: server() : _rnd(std::random_device()()) { } future<> send(ipv4_addr dst, packet p) { return _chan.send(dst, std::move(p)).then([this] { _n_sent++; }); } void start(int chunk_size, bool copy, size_t mem_size) { ipv4_addr listen_addr{10000}; _chan = make_bound_datagram_channel(listen_addr); std::cout << "Listening on " << listen_addr << std::endl; _last = steady_clock_type::now(); _stats_timer.set_callback([this] { auto now = steady_clock_type::now(); std::cout << "Out: " << std::setprecision(2) << std::fixed << (double)_n_sent / to_seconds(now - _last) << " pps" << std::endl; _last = now; _n_sent = 0; }); _stats_timer.arm_periodic(1s); _chunk_size = chunk_size; _copy = copy; _key = sstring(new char[64], 64); _out = std::make_unique>( data_sink(std::make_unique(_packets)), _packet_size); _mem = new char[mem_size]; _mem_size = mem_size; _chunk_distribution = std::uniform_int_distribution(0, _mem_size - _chunk_size * 3); SEASTAR_ASSERT(3 * _chunk_size <= _packet_size); // Run sender in background. (void)keep_doing([this] { return _chan.receive().then([this] (datagram dgram) { auto chunk = next_chunk(); lw_shared_ptr item; if (_copy) { _packets.clear(); // FIXME: future is discarded (void)_out->write(chunk, _chunk_size); chunk += _chunk_size; (void)_out->write(chunk, _chunk_size); chunk += _chunk_size; (void)_out->write(chunk, _chunk_size); (void)_out->flush(); SEASTAR_ASSERT(_packets.size() == 1); return send(dgram.get_src(), std::move(_packets[0])); } else { auto chunk = next_chunk(); scattered_message msg; msg.reserve(3); msg.append_static(chunk, _chunk_size); msg.append_static(chunk, _chunk_size); msg.append_static(chunk, _chunk_size); return send(dgram.get_src(), std::move(msg).release()); } }); }); } }; int main(int ac, char ** av) { server s; app_template app; app.add_options() ("chunk-size", bpo::value()->default_value(1024), "Chunk size") ("mem-size", bpo::value()->default_value(512), "Memory pool size in MiB") ("copy", "Copy data rather than send via zero-copy") ; return app.run_deprecated(ac, av, [&app, &s] { auto&& config = app.configuration(); auto chunk_size = config["chunk-size"].as(); auto mem_size = (size_t)config["mem-size"].as() * MB; auto copy = config.count("copy"); s.start(chunk_size, copy, mem_size); }); } seastar-25.05.0/demos/websocket_server_demo.cc000066400000000000000000000056511501510432000213210ustar00rootroot00000000000000/* * This file is open source software, licensed to you under the terms * of the Apache License, Version 2.0 (the "License"). See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. You may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (C) 2021 ScyllaDB Ltd. */ #include #include #include #include #include #include #include #include #include using namespace seastar; using namespace seastar::experimental; namespace bpo = boost::program_options; int main(int argc, char** argv) { seastar::app_template app; app.add_options() ("port", bpo::value()->default_value(10000), "WebSocket server port") ; app.run(argc, argv, [&app]() -> seastar::future<> { auto&& config = app.configuration(); uint16_t port = config["port"].as(); return async([port] { websocket::server ws; ws.register_handler("echo", [] (input_stream& in, output_stream& out) { return repeat([&in, &out]() { return in.read().then([&out](temporary_buffer f) { std::cerr << "f.size(): " << f.size() << "\n"; if (f.empty()) { return make_ready_future(stop_iteration::yes); } else { return out.write(std::move(f)).then([&out]() { return out.flush().then([] { return make_ready_future(stop_iteration::no); }); }); } }); }); }); auto d = defer([&ws] () noexcept { ws.stop().get(); }); ws.listen(socket_address(ipv4_addr("127.0.0.1", port))); std::cout << "Listening on 127.0.0.1:" << port << " for 1 hour (interruptible, hit Ctrl-C to stop)..." << std::endl; seastar::sleep_abortable(std::chrono::hours(1)).handle_exception([](auto ignored) {}).get(); std::cout << "Stopping the server, deepest thanks to all clients, hope we meet again" << std::endl; }); }); } seastar-25.05.0/doc/000077500000000000000000000000001501510432000140615ustar00rootroot00000000000000seastar-25.05.0/doc/CMakeLists.txt000066400000000000000000000036571501510432000166340ustar00rootroot00000000000000find_program (Seastar_DOXYGEN_EXECUTABLE doxygen) if (NOT Seastar_DOXYGEN_EXECUTABLE) message (FATAL_ERROR "doxgen is required for building document!") endif () configure_file ( ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) configure_file ( ${CMAKE_CURRENT_SOURCE_DIR}/DoxygenLayout.xml ${CMAKE_CURRENT_BINARY_DIR}/DoxygenLayout.xml COPYONLY) add_custom_target (doc_api COMMAND ${Seastar_DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile) add_custom_command ( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/tutorial.md COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/md2html ${CMAKE_CURRENT_SOURCE_DIR}/tutorial.md ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html) add_custom_target (doc_tutorial_html DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html) add_custom_command ( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/html/split DEPENDS # Necessary because file-level dependencies are not propagated for custom targets. ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html doc_tutorial_html COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/html/split COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/htmlsplit.py --input ${CMAKE_CURRENT_BINARY_DIR}/html/tutorial.html --output-dir ${CMAKE_CURRENT_BINARY_DIR}/html/split) add_custom_target (doc_tutorial_html_split DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/html/split) add_custom_command ( OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/tutorial.pdf DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/tutorial.md COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/md2pdf ${CMAKE_CURRENT_SOURCE_DIR}/tutorial.md ${CMAKE_CURRENT_BINARY_DIR}/tutorial.pdf) add_custom_target (doc_tutorial_pdf DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/tutorial.pdf) # Logical target for all documentation. add_custom_target (docs DEPENDS doc_api doc_tutorial_html doc_tutorial_html_split doc_tutorial_pdf) seastar-25.05.0/doc/Doxyfile.in000066400000000000000000003125101501510432000161760ustar00rootroot00000000000000# Doxyfile 1.8.9.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all text # before the first occurrence of this tag. Doxygen uses libiconv (or the iconv # built into libc) for the transcoding. See http://www.gnu.org/software/libiconv # for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = "Seastar" # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "High performance C++ framework for concurrent servers" # With the PROJECT_LOGO tag one can specify a logo or an icon that is included # in the documentation. The maximum height of the logo should not exceed 55 # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy # the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = @CMAKE_CURRENT_BINARY_DIR@ # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. # The default value is: NO. CREATE_SUBDIRS = NO # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode # U+3044. # The default value is: NO. ALLOW_UNICODE_NAMES = YES # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, # Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), # Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, # Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, # Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, # Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, # Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = YES # If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = YES # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = @Seastar_SOURCE_DIR@/include @Seastar_BINARY_DIR@/gen/include @Seastar_SOURCE_DIR@/doc # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = @Seastar_SOURCE_DIR@/include @Seastar_BINARY_DIR@/gen/include # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new # page for each member. If set to NO, the documentation of a member will be part # of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:\n" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, Javascript, # C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: # FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: # Fortran. In the later case the parser tries to guess whether the code is fixed # or free formatted code, this is the default for Fortran type files), VHDL. For # instance to make doxygen treat .inc files as Fortran files (default is PHP), # and .f files as C (default is Fortran), use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = YES # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = YES # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined # locally in source files will be included in the documentation. If set to NO, # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = NO # This flag is only useful for Objective-C code. If set to YES, local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO, only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO, these classes will be included in the various overviews. This option # has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # (class|struct|union) declarations. If set to NO, these declarations will be # included in the documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = YES # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO, these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file # names in lower-case letters. If set to YES, upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. # The default value is: system dependent. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES, the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO # If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will # append additional text to a page's title, such as Class Reference. If set to # YES the compound reference will be hidden. # The default value is: NO. HIDE_COMPOUND_REFERENCE= NO # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each # grouped member an include statement to the documentation, telling the reader # which file to include in order to use the member. # The default value is: NO. SHOW_GROUPED_MEMB_INC = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo # list. This list is created by putting \todo commands in the documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test # list. This list is created by putting \test commands in the documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if ... \endif and \cond # ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES, the # list will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters # in a documented function, or documenting parameters that don't exist or using # markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO, doxygen will only warn about wrong or incomplete # parameter documentation, but not about the absence of documentation. # The default value is: NO. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. # Note: If this tag is empty the current directory is searched. INPUT = @Seastar_SOURCE_DIR@/include INPUT += @Seastar_BINARY_DIR@/gen/include INPUT += @Seastar_SOURCE_DIR@/doc/rpc.md INPUT += @Seastar_SOURCE_DIR@/doc/rpc-streaming.md INPUT += @Seastar_SOURCE_DIR@/doc/rpc-compression.md INPUT += @Seastar_SOURCE_DIR@/doc/compatibility.md # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: http://www.gnu.org/software/libiconv) for the list of # possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank the # following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, # *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, # *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, # *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, # *.qsf, *.as and *.js. FILE_PATTERNS = # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = build dpdk tests apps scripts # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = test.py # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = seastar::internal seastar::coroutine::internal # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = @Seastar_SOURCE_DIR@/demos @Seastar_SOURCE_DIR@/tests/unit # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # # # where is the value of the INPUT_FILTER tag, and is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # function all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see http://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the config file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = YES # The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in # which the alphabetical index list will be split. # Minimum value: 1, maximum value: 20, default value: 5. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output # The default value is: NO. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined # cascading style sheets that are included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet files to the output directory. # Note: The order of the extra style sheet files is of importance (e.g. the last # style sheet in the list overrules the setting of the previous ones in the # list). For an example see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # http://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to NO can help when comparing the output of multiple runs. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: http://developer.apple.com/tools/xcode/), introduced with # OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on # Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler (hhc.exe). If non-empty, # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated # (YES) or that it should be included in the master .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated # (YES) or a normal table of contents (NO) in the .chm file. Furthermore it # enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- # folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location of Qt's # qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the # generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has # the same information as the tab index, you could consider setting # DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # # Note that when changing this option you need to delete any form_*.png files in # the HTML output directory before the changes have effect. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # http://www.mathjax.org) which uses client side Javascript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from http://www.mathjax.org before deployment. # The default value is: http://cdn.mathjax.org/mathjax/latest. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use + S # (what the is depends on the OS and browser, but it is typically # , /