pax_global_header00006660000000000000000000000064144336477540014533gustar00rootroot0000000000000052 comment=fd3e9a8a5eb0a1cdb64e1ff20a388094bdfbdb3c rpma-1.3.0/000077500000000000000000000000001443364775400124735ustar00rootroot00000000000000rpma-1.3.0/.circleci/000077500000000000000000000000001443364775400143265ustar00rootroot00000000000000rpma-1.3.0/.circleci/config.yml000066400000000000000000000022531443364775400163200ustar00rootroot00000000000000version: 2.1 workflows: main: jobs: - build jobs: build: machine: image: ubuntu-2204:2022.04.1 steps: - checkout - run: name: Install required packages command: .circleci/install-pkgs-ubuntu.sh - run: name: Install cmocka command: utils/docker/images/install-cmocka.sh - run: # Configure SoftRoCE and read the RPMA_TESTING_IP environment variable name: Configure SoftRoCE command: ./tools/config_softroce.sh && source $BASH_ENV - run: name: Build rpma command: | mkdir build cd build cmake .. -DTESTS_RDMA_CONNECTION=ON make -j$(nproc) - run: name: Run tests without memcheck command: cd build && ctest -E memcheck --output-on-failure - run: name: Run all examples command: cd build && make run_all_examples - run: name: Run all examples under valgrind command: cd build && make run_all_examples_under_valgrind - run: name: Run tests with memcheck command: cd build && ctest --output-on-failure rpma-1.3.0/.circleci/install-pkgs-ubuntu.sh000077500000000000000000000025771443364775400206300ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2021-2022, Intel Corporation # # # .circleci/install-pkgs-ubuntu.sh - install packages for the CircleCI Ubuntu image # set -e BASE_DEPS="\ apt-utils \ build-essential \ devscripts \ git \ pkg-config \ sudo \ whois" EXAMPLES_DEPS="\ libpmem-dev \ libprotobuf-c-dev \ valgrind" RPMA_DEPS="\ cmake \ curl \ gawk \ groff \ graphviz \ libibverbs-dev \ librdmacm-dev \ libunwind-dev \ linux-modules-extra-$(uname -r) \ pandoc" export DEBIAN_FRONTEND=noninteractive # Update existing packages sudo apt-get update --allow-unauthenticated # update list of sources MIRROR="http://ddebs.ubuntu.com" echo " deb $MIRROR $(lsb_release -cs) main restricted universe multiverse deb $MIRROR $(lsb_release -cs)-updates main restricted universe multiverse deb $MIRROR $(lsb_release -cs)-proposed main restricted universe multiverse" | \ sudo tee -a /etc/apt/sources.list.d/ddebs.list # import missing GPG keys: sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C8CAB6595FDFF622 sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 536F8F1DE80F6A35 # Update existing packages once again sudo apt-get update --allow-unauthenticated # Install new packages sudo apt-get install --assume-yes --no-install-recommends --allow-unauthenticated \ $BASE_DEPS \ $EXAMPLES_DEPS \ $RPMA_DEPS rpma-1.3.0/.codecov.yml000066400000000000000000000004221443364775400147140ustar00rootroot00000000000000ignore: - cmake/ - doc/ - examples/ - src/valgrind/ - tests/ - utils/ comment: layout: "diff" behavior: default require_changes: yes parsers: gcov: branch_detection: conditional: false loop: false method: false macro: false rpma-1.3.0/.gitattributes000066400000000000000000000001071443364775400153640ustar00rootroot00000000000000* text=auto eol=lf *.jpg binary *.png binary *.gif binary *.ico binary rpma-1.3.0/.github/000077500000000000000000000000001443364775400140335ustar00rootroot00000000000000rpma-1.3.0/.github/workflows/000077500000000000000000000000001443364775400160705ustar00rootroot00000000000000rpma-1.3.0/.github/workflows/coverity.yml000066400000000000000000000026071443364775400204640ustar00rootroot00000000000000# # The Coverity build run over the night. # # It is run at 00:00 UTC every day or on demand. # name: Coverity on: workflow_dispatch: schedule: - cron: '0 0 * * *' env: GITHUB_REPO: pmem/rpma # use GitHub Container Registry as a repository of docker images GH_CR_ADDR: ghcr.io DOCKER_REPO: ghcr.io/pmem/rpma # use org's Private Access Token to log in to GitHub Container Registry GH_CR_USER: ${{ secrets.GH_CR_USER }} GH_CR_PAT: ${{ secrets.GH_CR_PAT }} COVERITY_SCAN_NOTIFICATION_EMAIL: ${{ secrets.COVERITY_SCAN_NOTIFICATION_EMAIL }} COVERITY_SCAN_TOKEN: ${{ secrets.COVERITY_SCAN_TOKEN }} HOST_WORKDIR: /home/runner/work/rpma/rpma WORKDIR: utils/docker jobs: linux: name: Linux runs-on: ubuntu-latest strategy: matrix: CONFIG: ["N=1 OS=ubuntu OS_VER=latest TYPE=coverity CC=gcc"] steps: - name: Clone the git repo uses: actions/checkout@v1 - name: Pull or rebuild the image run: cd $WORKDIR && ${{ matrix.CONFIG }} ./pull-or-rebuild-image.sh - name: Run the build run: cd $WORKDIR && ${{ matrix.CONFIG }} ./build.sh - name: Push the image run: cd $WORKDIR && source ./set-vars.sh && ${{ matrix.CONFIG }} /bin/bash -c "if [[ -f ${CI_FILE_PUSH_IMAGE_TO_REPO} ]]; then images/push-image.sh; fi" rpma-1.3.0/.github/workflows/nightly.yml000066400000000000000000000077631443364775400203060ustar00rootroot00000000000000# # Builds run over the night testing all OSes supported by librpma. # (each of them is built with two compilers, gcc and clang, separately). # # It is run at 00:00 UTC every day or on demand. # name: Nightly on: workflow_dispatch: schedule: - cron: '0 0 * * *' env: GITHUB_REPO: pmem/rpma # use GitHub Container Registry as a repository of docker images GH_CR_ADDR: ghcr.io DOCKER_REPO: ghcr.io/pmem/rpma # use org's Private Access Token to log in to GitHub Container Registry GH_CR_USER: ${{ secrets.GH_CR_USER }} GH_CR_PAT: ${{ secrets.GH_CR_PAT }} DOC_UPDATE_GITHUB_TOKEN: ${{ secrets.DOC_UPDATE_GITHUB_TOKEN }} HOST_WORKDIR: /home/runner/work/rpma/rpma WORKDIR: utils/docker TYPE: normal jobs: gcc: name: GCC runs-on: ubuntu-latest env: CC: gcc PUSH_IMAGE: 1 strategy: fail-fast: false matrix: CONFIG: ["N=Ubuntu OS=ubuntu OS_VER=latest", "N=Fedora OS=fedora OS_VER=latest", "N=Debian OS=debian OS_VER=latest", "N=DebianS OS=debian OS_VER=stable", "N=CentOS7 OS=centos OS_VER=7", # successors of CentOS: "N=RockyLinux9 OS=rockylinux OS_VER=9", "N=RockyLinux8 OS=rockylinux OS_VER=8", "N=VzLinux8 OS=vzlinux OS_VER=latest", # Rolling/testing/experimental distributions # (Fedora Rawhide, Ubuntu Rolling and Arch Linux # were moved to Nightly_Experimental): "N=Debian_Testing OS=debian OS_VER=testing", "N=Debian_Experimental OS=debian OS_VER=experimental", "N=OpenSUSE_Leap OS=opensuse-leap OS_VER=latest", "N=OpenSUSE_Tumbleweed OS=opensuse-tumbleweed OS_VER=latest"] steps: - name: Clone the git repo uses: actions/checkout@v1 - name: Pull or rebuild the image run: cd $WORKDIR && ${{ matrix.CONFIG }} ./pull-or-rebuild-image.sh - name: Run the build run: cd $WORKDIR && ${{ matrix.CONFIG }} ./build.sh - name: Push the image run: cd $WORKDIR && source ./set-vars.sh && ${{ matrix.CONFIG }} /bin/bash -c "if [[ -f ${CI_FILE_PUSH_IMAGE_TO_REPO} ]]; then images/push-image.sh; fi" clang: name: Clang runs-on: ubuntu-latest env: CC: clang strategy: fail-fast: false matrix: CONFIG: ["N=Ubuntu OS=ubuntu OS_VER=latest", "N=Fedora OS=fedora OS_VER=latest", "N=Debian OS=debian OS_VER=latest", "N=DebianS OS=debian OS_VER=stable", "N=CentOS7 OS=centos OS_VER=7", # successors of CentOS: "N=RockyLinux9 OS=rockylinux OS_VER=9", "N=RockyLinux8 OS=rockylinux OS_VER=8", "N=VzLinux8 OS=vzlinux OS_VER=latest", # Rolling/testing/experimental distributions # (Fedora Rawhide, Ubuntu Rolling and Arch Linux # were moved to Nightly_Experimental): "N=Debian_Testing OS=debian OS_VER=testing", "N=Debian_Experimental OS=debian OS_VER=experimental", "N=OpenSUSE_Leap OS=opensuse-leap OS_VER=latest", "N=OpenSUSE_Tumbleweed OS=opensuse-tumbleweed OS_VER=latest"] steps: - name: Clone the git repo uses: actions/checkout@v1 - name: Pull or rebuild the image run: cd $WORKDIR && ${{ matrix.CONFIG }} ./pull-or-rebuild-image.sh - name: Run the build run: cd $WORKDIR && ${{ matrix.CONFIG }} ./build.sh - name: Push the image run: cd $WORKDIR && source ./set-vars.sh && ${{ matrix.CONFIG }} /bin/bash -c "if [[ -f ${CI_FILE_PUSH_IMAGE_TO_REPO} ]]; then images/push-image.sh; fi" rpma-1.3.0/.github/workflows/nightly_experimental.yml000066400000000000000000000044111443364775400230460ustar00rootroot00000000000000# # Builds using very experimental OS distributions that can fail very often. # # It is run at 00:00 UTC every day or on demand. # name: Nightly_Experimental on: workflow_dispatch: schedule: - cron: '0 0 * * *' env: GITHUB_REPO: pmem/rpma # use GitHub Container Registry as a repository of docker images GH_CR_ADDR: ghcr.io DOCKER_REPO: ghcr.io/pmem/rpma # use org's Private Access Token to log in to GitHub Container Registry GH_CR_USER: ${{ secrets.GH_CR_USER }} GH_CR_PAT: ${{ secrets.GH_CR_PAT }} DOC_UPDATE_GITHUB_TOKEN: ${{ secrets.DOC_UPDATE_GITHUB_TOKEN }} HOST_WORKDIR: /home/runner/work/rpma/rpma WORKDIR: utils/docker TYPE: normal jobs: experimental: name: experimental runs-on: ubuntu-latest strategy: fail-fast: false matrix: CONFIG: ["N=Fedora_Rawhide OS=fedora OS_VER=rawhide CC=gcc CI_SANITS=ON PUSH_IMAGE=0", "N=Fedora_Rawhide_SANITS OS=fedora OS_VER=rawhide CC=clang CI_SANITS=ON PUSH_IMAGE=0", "N=Fedora_Rawhide_no_SANITS OS=fedora OS_VER=rawhide CC=clang CI_SANITS=OFF PUSH_IMAGE=1 REBUILD_ALWAYS=YES", "N=Ubuntu_Rolling OS=ubuntu OS_VER=rolling CC=clang CI_SANITS=ON PUSH_IMAGE=1 REBUILD_ALWAYS=YES", # There is no official Docker image of CentOS Stream yet: https://hub.docker.com/_/centos # and the unofficial one used so far (tgagor/centos:stream) has stopped working, so we have to disable it. # "N=CentOS_Stream OS=centos OS_VER=stream CC=gcc CI_SANITS=OFF PUSH_IMAGE=1 REBUILD_ALWAYS=YES", "N=Arch_Linux_Latest OS=archlinux OS_VER=latest CC=gcc CI_SANITS=OFF PUSH_IMAGE=1 REBUILD_ALWAYS=YES"] steps: - name: Clone the git repo uses: actions/checkout@v1 - name: Pull or rebuild the image run: cd $WORKDIR && ${{ matrix.CONFIG }} ./pull-or-rebuild-image.sh - name: Run the build run: cd $WORKDIR && ${{ matrix.CONFIG }} ./build.sh - name: Push the image run: cd $WORKDIR && source ./set-vars.sh && ${{ matrix.CONFIG }} /bin/bash -c "if [[ -f ${CI_FILE_PUSH_IMAGE_TO_REPO} ]]; then images/push-image.sh; fi" rpma-1.3.0/.github/workflows/nightly_rebuild.yml000066400000000000000000000050561443364775400220050ustar00rootroot00000000000000# # Builds containing rolling/testing/experimental OS distributions # which are updated very frequently and because of that # their docker images should be rebuilt every time. # # It is run at 00:00 UTC every day or on demand. # name: Nightly_Rebuild on: workflow_dispatch: schedule: - cron: '0 0 * * *' env: GITHUB_REPO: pmem/rpma # use GitHub Container Registry as a repository of docker images GH_CR_ADDR: ghcr.io DOCKER_REPO: ghcr.io/pmem/rpma # use org's Private Access Token to log in to GitHub Container Registry GH_CR_USER: ${{ secrets.GH_CR_USER }} GH_CR_PAT: ${{ secrets.GH_CR_PAT }} DOC_UPDATE_GITHUB_TOKEN: ${{ secrets.DOC_UPDATE_GITHUB_TOKEN }} HOST_WORKDIR: /home/runner/work/rpma/rpma WORKDIR: utils/docker TYPE: normal jobs: gcc: name: GCC runs-on: ubuntu-latest env: CC: gcc PUSH_IMAGE: 1 strategy: fail-fast: false matrix: # only rolling/testing/experimental distributions with rebuild: CONFIG: ["N=Ubuntu_Rolling OS=ubuntu OS_VER=rolling REBUILD_ALWAYS=YES", # the Fedora_Rawhide build was moved to Nightly_Experimental "N=Debian_Testing OS=debian OS_VER=testing REBUILD_ALWAYS=YES", "N=Debian_Experimental OS=debian OS_VER=experimental REBUILD_ALWAYS=YES", "N=Arch_Linux_Latest OS=archlinux OS_VER=latest REBUILD_ALWAYS=YES", # The CentOS_Stream build was moved to Nightly_Experimental, # because the 'epel-release' repo of CentOS Stream cannot be found # and this build has been failing for a long time. # # The OpenSUSE_Tumbleweed build was temporarily moved to Nightly_Experimental # because of the following bug: # https://bugzilla.opensuse.org/show_bug.cgi?id=1190670 "N=OpenSUSE_Leap OS=opensuse-leap OS_VER=latest REBUILD_ALWAYS=YES"] steps: - name: Clone the git repo uses: actions/checkout@v1 - name: Pull or rebuild the image run: cd $WORKDIR && ${{ matrix.CONFIG }} ./pull-or-rebuild-image.sh - name: Run the build run: cd $WORKDIR && ${{ matrix.CONFIG }} ./build.sh - name: Push the image run: cd $WORKDIR && source ./set-vars.sh && ${{ matrix.CONFIG }} /bin/bash -c "if [[ -f ${CI_FILE_PUSH_IMAGE_TO_REPO} ]]; then images/push-image.sh; fi" rpma-1.3.0/.github/workflows/on_pull_request.yml000066400000000000000000000041101443364775400220270ustar00rootroot00000000000000# # Builds run on every pull request and every push to the repo. # name: GitHubActions on: [push, pull_request] env: GITHUB_REPO: pmem/rpma # use GitHub Container Registry as a repository of docker images GH_CR_ADDR: ghcr.io DOCKER_REPO: ghcr.io/pmem/rpma # use org's Private Access Token to log in to GitHub Container Registry GH_CR_USER: ${{ secrets.GH_CR_USER }} GH_CR_PAT: ${{ secrets.GH_CR_PAT }} DOC_UPDATE_GITHUB_TOKEN: ${{ secrets.DOC_UPDATE_GITHUB_TOKEN }} HOST_WORKDIR: /home/runner/work/rpma/rpma WORKDIR: utils/docker TYPE: normal jobs: linux: name: Linux runs-on: ubuntu-latest strategy: matrix: CONFIG: ["N=1 OS=ubuntu OS_VER=latest CC=gcc", # to be reverted: TESTS_COVERAGE=1", "N=2 OS=ubuntu OS_VER=latest CC=clang PUSH_IMAGE=1", "N=3 OS=fedora OS_VER=latest CC=gcc PUSH_IMAGE=1", "N=4 OS=fedora OS_VER=latest CC=clang AUTO_DOC_UPDATE=1", "N=5 OS=rockylinux OS_VER=9 CC=gcc PUSH_IMAGE=1", "N=6 OS=rockylinux OS_VER=8 CC=gcc PUSH_IMAGE=1", # Ubuntu-latest with rdma-core v45.0 installed from sources "N=7 OS=ubuntu OS_VER=latest-with-rdma-core-45 CC=gcc TESTS_COVERAGE=1", "N=8 OS=ubuntu OS_VER=latest-with-rdma-core-45 CC=clang PUSH_IMAGE=1", # Fedora-latest with rdma-core v45.0 installed from sources "N=9 OS=fedora OS_VER=latest-with-rdma-core-45 CC=gcc PUSH_IMAGE=1", "N=10 OS=fedora OS_VER=latest-with-rdma-core-45 CC=clang"] steps: - name: Clone the git repo uses: actions/checkout@v1 - name: Pull or rebuild the image run: cd $WORKDIR && ${{ matrix.CONFIG }} ./pull-or-rebuild-image.sh - name: Run the build run: cd $WORKDIR && ${{ matrix.CONFIG }} ./build.sh - name: Push the image run: cd $WORKDIR && source ./set-vars.sh && ${{ matrix.CONFIG }} /bin/bash -c "if [[ -f ${CI_FILE_PUSH_IMAGE_TO_REPO} ]]; then images/push-image.sh; fi" rpma-1.3.0/.gitignore000066400000000000000000000001511443364775400144600ustar00rootroot00000000000000.* !.circleci/ !.gitignore !.gitattributes !.github/ !.travis.yml !.mailmap !.version build/ *~ *.swp ~* rpma-1.3.0/CHANGELOG.md000066400000000000000000000355331443364775400143150ustar00rootroot00000000000000# Changelog All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ## [1.3.0] - 2023-05-25 ### Added - the native flush support (required support in the kernel and in an RNIC's driver) - missing unit tests for rpma_mr_atomic_write() - a check if librpma uses rdma-core installed from sources - CI builds with rdma-core v45.0 installed from sources (with support for both native atomic write and native flush) on Ubuntu-latest and Fedora-latest CIs - internal APIs: - rpma_utils_ibv_context_is_flush_capable() - checks if kernel supports the native flush - BUILD_FORCE_ODP_NOT_SUPPORTED CMake option to disable On-Demand Paging (ODP) support in libibverbs - BUILD_FORCE_NATIVE_ATOMIC_WRITE_NOT_SUPPORTED CMake option to disable support for native atomic write in libibverbs - BUILD_FORCE_NATIVE_FLUSH_NOT_SUPPORTED CMake option to disable support for the native flush in libibverbs ### Fixed - the "ibv_query_device_ex_mock undeclared" compiler error when ON_DEMAND_PAGING_SUPPORTED is not defined and NATIVE_ATOMIC_WRITE_SUPPORTED is defined - some wrong and misleading comments in tests/unit/peer/peer-mr_reg.c - OS version in the Coverity CI build - preventing from failing the build when 'rpm -q' or 'dpkg-deb' commands fail - the "ibv_query_device_ex_mock undeclared" compiler error when both ON_DEMAND_PAGING_SUPPORTED and NATIVE_ATOMIC_WRITE_SUPPORTED are not defined ### Changed - renamed IBV_WR_ATOMIC_WRITE_SUPPORTED to NATIVE_ATOMIC_WRITE_SUPPORTED - renamed is_ibv_wr_atomic_write_supported() to is_ibv_atomic_write_supported() - updated the install-pmdk.sh script to install PMDK 1.12.1-119-g8074b19b1 with fixes required for Rocky Linux 8 and 9 and verify if the installation succeeded - rpma_peer_new() to check the native flush support in kernel - rpma_peer_setup_qp() to enable the native flush if both kernel and libibverbs support it - rpma_peer_usage2access() to return native access flags if both kernel and libibverbs support the native flush - rpma_flush() to use the native flush if the created QP supports it ## [1.2.0] - 2023-01-10 ### Added - native atomic write support (required support in the kernel and in an RNIC's driver) - security warnings to the documentation of the following functions of the API: - rpma_conn_get_private_data() - rpma_conn_req_get_private_data() - rpma_mr_get_descriptor() - rpma_mr_remote_from_descriptor() - rpma_peer_cfg_from_descriptor() - rpma_peer_cfg_get_descriptor() - offset of the beginning of the used persistent memory in the clients using PMem in the examples - one suppression for Memcheck on Ubuntu 22.04 - CI Coverity build run once a day over the night - a check for the native atomic write support in libibverbs - internal APIs: - rpma_utils_ibv_context_is_atomic_write_capable() - checks if kernel supports native atomic write - Rocky Linux 8 and 9 builds to the "on_pull_request" workflow - show git diff of changed documentation when pull requests with updated documentation are not generated ### Fixed - DEVELOPMENT.md file - `CMAKE_BUILD_TYPE` must be set to `Debug` when running the tests - build system for CentOS 7 (use cmake3 instead of cmake if a version of cmake is v2.x) - check-headers.sh file - corrected the path of check-ms-license.pl and removed unneeded '*' at the start of the grep expressions - (examples) use HELLO_STR_SIZE instead of KILOBYTE in case of the hello string - the common_pmem_map_file_with_signature_check() function in examples - `wr` passed to ibv_post_send(), ibv_post_recv() and ibv_post_srq_recv() is initialized to 0 - `sge` passed to a log message (in rpma_mr_*() functions) is initialized to 0 - `rq_size` in rpma_peer_create_srq() initialized to 0 - detecting no free slot for a new connection request in example 13 - memory allocations in example 07 - minor issues detected by Coverity - sleep(1) added to mtt_client_connect() before the next connection retry ### Changed - the default 'master' branch has been renamed to 'main' - logging of the source and the destination GID addresses in rpma_conn_req_new_from_id() has been restricted to only one case when CMAKE_BUILD_TYPE is set to 'Debug' - rpma_peer_new() to check the native atomic write support in kernel - rpma_peer_setup_qp() to enable native atomic write if both kernel and libibverbs supported it - rpma_mr_atomic_write() to use native atomic write if the created QP supported it - only the labeled (latest/stable/rolling etc.) versions of docker images (if available) are used in CI - it makes the CI self-updating ### Removed - whole benchmarking framework for librpma (the last commit with the benchmarking framework present is marked with the "[benchmarking-framework][bench-frame]" tag) - unused doc_snippets - meaningless template-example - meaningless template unit test - Debian 10 from the on_pull_request CI workflow [bench-frame]: https://github.com/pmem/rpma/tree/benchmarking-framework/tools/perf ## [1.1.0] - 2022-09-08 ### Added - (tools) description of the 'schematic' variable (from the report.json file) in the 'tools/perf/BENCHMARKING.md' file ### Fixed - removed unnecessary rpma_conn_req_delete() calls from examples - (tools) added checking if a path saved in the 'schematic' variable exists - common source code of GPSMP examples moved to one folder - changed size of the read-after-write (RAW) buffer in the example 04 - (examples) changed the write size from KILOBYTE to HELLO_STR_SIZE ### Changed - unified coding style in the source, the test and the example files ## [1.0.0] - 2022-08-25 ### Added - DEVELOPMENT.md file containing the most important information needed during development of the library - THREAD_SAFETY.md file containing the analysis of thread safety of the librpma library - APIs: - rpma_conn_cfg_get_compl_channel - gets if the completion event channel can be shared by CQ and RCQ - rpma_conn_cfg_get_srq - gets the shared RQ object from the connection - rpma_conn_cfg_set_compl_channel - sets if the completion event channel can be shared by CQ and RCQ - rpma_conn_cfg_set_srq - sets a shared RQ object for the connection - rpma_conn_get_compl_fd - gets a file descriptor of the shared completion channel from the connection - rpma_conn_wait - waits for a completion event on the shared completion channel from CQ or RCQ - rpma_srq_cfg_delete - deletes the shared RQ configuration object - rpma_srq_cfg_get_rcq_size - gets the receive CQ size of the shared RQ - rpma_srq_cfg_get_rq_size - gets the RQ size of the shared RQ - rpma_srq_cfg_new - creates a new shared RQ configuration object - rpma_srq_cfg_set_rcq_size - sets the receive CQ size of the shared RQ - rpma_srq_cfg_set_rq_size - sets the RQ size of the shared RQ - rpma_srq_delete - deletes the shared RQ object - rpma_srq_get_rcq - gets the receive CQ from the shared RQ object - rpma_srq_new - creates a new shared RQ object - rpma_srq_recv - initiates the receive operation in shared RQ - error RPMA_E_SHARED_CHANNEL - the completion event channel is shared and cannot be handled by any particular CQ - error RPMA_E_NOT_SHARED_CHNL - the completion event channel is not shared - examples: - 08srq-simple-messages-ping-pong-with-srq - a single-connection example for shared RQ with ping-pong messages - 13-messages-ping-pong-with-srq - a multi-connection example for shared RQ with ping-pong messages - logging of the source and the destination GID addresses in rpma_conn_req_new_from_id() - error message for RPMA_E_AGAIN: "Temporary error, try again" - peer_cfg: get/set_direct_write_to_pmem and get_descriptor are now thread-safe - conn_cfg: all get and set functions for cq, rq, sq, rcq, timeout and compl_channel are now thread-safe - multi-threaded tests: - rpma_conn_apply_remote_peer_cfg - rpma_conn_cfg_get_srq - rpma_conn_cfg_set_srq - rpma_conn_req_connect - rpma_ep_next_conn_req - rpma_log_set_function - rpma_log_set_get_threshold - rpma_log_set_threshold - rpma_peer_cfg_set_direct_write_to_pmem - rpma_srq_cfg_new - rpma_srq_cfg_get_rcq_size - rpma_srq_cfg_get_rq_size - rpma_srq_cfg_set_rcq_size - rpma_srq_cfg_set_rq_size - rpma_srq_delete - rpma_srq_get_rcq - rpma_srq_new ### Changed - APIs: - rpma_cq_wait - returns RPMA_E_SHARED_CHANNEL if the completion channel is shared - Renamed CMake variables: - COVERAGE to TESTS_COVERAGE - DEVELOPER_MODE to BUILD_DEVELOPER_MODE - TEST_PYTHON_TOOLS to TESTS_PERF_TOOLS - TRACE_TESTS to TESTS_VERBOSE_OUTPUT - USE_ASAN to DEBUG_USE_ASAN - USE_UBSAN to DEBUG_USE_UBSAN - Changed default values of CMake variables: - CMAKE_BUILD_TYPE from Debug to Release - TESTS_PERF_TOOLS - from ON to OFF - all examples and internal API files now comply with the new character limit per line (100 characters) ### Fixed - APIs: - rpma_peer_delete - fixed memory leak when ibv_dealloc_pd() fails ### Removed - CMake variables: - CHECK_CSTYLE - TESTS_LONG - TESTS_USE_VALGRIND - old integration tests - suppressions for get and set functions for cq, rq, sq and timeout has been removed ## [0.14.0] - 2022-03-15 ### Added - APIs: - RPMA_CONN_UNREACHABLE enum rpma_conn_event to handle RDMA_CM_EVENT_UNREACHABLE ### Fixed - APIs: - rpma_log_init - cannot fail to set the default log function now - unit tests of rpma_log_set_threshold and RPMA_LOG_* macros ### Removed - APIs: - rpma_write_atomic - replaced with rpma_atomic_write ## [0.13.0] - 2022-03-09 ### Added - APIs: - rpma_atomic_write - initiates the atomic 8 bytes write operation ### Changed - ibv_qp_cap.max_inline_data set to 8 bytes to allow implementation of atomic write over ibv_post_send with IBV_SEND_INLINE - example 07 uses rpma_atomic_write() instead of rpma_write_atomic() ### Deprecated - API: - rpma_write_atomic - replaced with rpma_atomic_write ### Fixed - the part_write.json template in the benchmarking framework - documentation of rpma_utils_conn_event_2str() ## [0.12.0] - 2022-02-21 ### Fixed - links to https://pmem.io in the benchmarking framework - templates with fio configuration in the benchmarking framework ### Removed - APIs: - rpma_cq_get_completion - replaced with rpma_cq_get_wc - struct rpma_completion - replaced with struct ibv_wc from libibverbs - enum rpma_op - replaced with enum ibv_wc_opcode from libibverbs ## [0.11.0] - 2022-02-08 ### Added - Example (#12) for separate receive completion queue (RCQ). - Documented the default values of struct rpma_conn_cfg. - APIs: - rpma_cq_get_wc - receive one or more completions - Tools: - Benchmarking framework basing on python scripts. ### Deprecated - APIs: - rpma_cq_get_completion - replaced with rpma_cq_get_wc - struct rpma_completion - replaced with struct ibv_wc from libibverbs - enum rpma_op - replaced with enum ibv_wc_opcode from libibverbs ### Fixed - Examples 07 and 08. ### Removed - APIs: - rpma_conn_completion_get - replaced with rpma_conn_get_cq and rpma_cq_get_wc, - rpma_conn_completion_wait - replaced with rpma_conn_get_cq and rpma_cq_wait, - rpma_conn_get_completion_fd - replaced with rpma_conn_get_cq and rpma_cq_get_fd. - Tools: - Benchmarking framework basing on bash scripts. ## [0.10.0] - 2022-01-24 ### Added - Unblocked performance of File System DAX by adding the possibility to use ibv_advice_mr() for registered memory ([#1220][1220]). - Separate receive completion queue (RCQ) ([#1080][1080]). - Support for iWARP protocol ([#1044][1044]). - Write operation with immediate data (enum rpma_op RPMA_OP_RECV_RDMA_WITH_IMM added) ([#856][856]). - Send operation with immediate data ([#713][713]). - Handling the RDMA_CM_EVENT_REJECTED event (enum rpma_conn_event RPMA_CONN_REJECTED added) ([#802][802]). - Support for many [Linux distributions and versions][distros] each of them tested [once a day with CI][nightly]. - APIs: - rpma_conn_cfg_get_rcq_size - gets receive CQ size for the connection, - rpma_conn_cfg_set_rcq_size - sets receive CQ size for the connection, - rpma_conn_get_cq - gets the connection's main CQ, - rpma_conn_get_qp_num - gets the connection's qp_num, - rpma_conn_get_rcq - gets the connection's receive CQ, - rpma_conn_req_get_private_data - gets a pointer to the request's private data, - rpma_cq_get_completion - receives a completion of an operation, - rpma_cq_get_fd - gets the completion queue's file descriptor, - rpma_cq_wait - waits for a completion, - rpma_mr_advise - gives advice about an address range in a memory registration, - rpma_mr_get_ptr - gets the pointer to the local memory region, - rpma_mr_get_size - gets the size of the local memory region, - rpma_send_with_imm - initiates the send operation with immediate data, - rpma_write_with_imm - initiates the write operation with immediate data. - Tools: - [ddio.sh script][ddio] to toggle and query the DDIO (Intel® Data Direct I/O Technology) state per PCIe root port on Intel® Cascade Lake platforms ([#597][597]). - Benchmarking framework for librpma. ### Changed - Atomic write operation (rpma_write_atomic()) implemented with fence (now it waits for RDMA Read which simulates RDMA flush) ([#603][603]). - API for completions handling (see Deprecated). - rpma_read, rpma_write, rpma_send and rpma_recv can be called with 0B message. - Updated and fixed documentation. ### Deprecated - APIs: - rpma_conn_completion_get - replaced with rpma_conn_get_cq and rpma_cq_get_completion, - rpma_conn_completion_wait - replaced with rpma_conn_get_cq and rpma_cq_wait, - rpma_conn_get_completion_fd - replaced with rpma_conn_get_cq and rpma_cq_get_fd. - Tools: - This is the last release with the benchmarking framework basing on Bash scripts. In the next release it will be replaced with a Python-based benchmarking framework. ### Fixed - rpma_flush_apm_new() fixed, so that rpma_mr_reg() can be called after ibv_fork_init() ([#866][866]). [ddio]: https://github.com/pmem/rpma/blob/main/tools/ddio.sh [distros]: https://github.com/pmem/rpma/blob/main/.github/workflows/nightly.yml [nightly]: https://github.com/pmem/rpma/actions/workflows/nightly.yml [1220]: https://github.com/pmem/rpma/pull/1220 [1080]: https://github.com/pmem/rpma/pull/1080 [1044]: https://github.com/pmem/rpma/pull/1044 [866]: https://github.com/pmem/rpma/pull/866 [856]: https://github.com/pmem/rpma/pull/856 [802]: https://github.com/pmem/rpma/pull/802 [713]: https://github.com/pmem/rpma/pull/713 [603]: https://github.com/pmem/rpma/pull/603 [597]: https://github.com/pmem/rpma/pull/597 ## [0.9.0] - 2020-10-01 ### Added - This is the first official release of the librpma library. - The API provides the most flexible implementation of remote persistency via Appliance Persistency Method. - Multiple examples show how to use this API (including an example of how to build your own General Purpose Persistency Method implementation basing on the services provided by the librpma library). - The extensive documentation describes all guiding principles. rpma-1.3.0/CMakeLists.txt000066400000000000000000000302441443364775400152360ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2023, Intel Corporation # Copyright (c) 2022-2023, Fujitsu Limited # cmake_minimum_required(VERSION 3.3) project(rpma C) set(VERSION_MAJOR 1) set(VERSION_MINOR 3) set(VERSION_PATCH 0) # set(VERSION_PRERELEASE rc1) set(VERSION ${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}) if (VERSION_PRERELEASE) set(VERSION ${VERSION}-${VERSION_PRERELEASE}) endif() set(LIBRPMA_LIBRARIES rpma) set(LIBRPMA_LIBRARY_DIRS ${CMAKE_BINARY_DIR}/src/) set(LIBRPMA_INCLUDE_DIRS ${CMAKE_SOURCE_DIR}/src/include/) set(LIBRPMA_SOURCE_DIR ${CMAKE_SOURCE_DIR}/src/) # required only for some examples set(LIBPMEM_REQUIRED_VERSION 1.6) set(LIBPMEM2_REQUIRED_VERSION 1.10) set(CMAKE_DISABLE_IN_SOURCE_BUILD ON) include(FindPerl) include(FindThreads) include(CMakePackageConfigHelpers) include(GNUInstallDirs) include(${CMAKE_SOURCE_DIR}/cmake/functions.cmake) option(BUILD_DOC "build documentation" ON) option(BUILD_TESTS "build tests" ON) option(BUILD_EXAMPLES "build examples" ON) option(BUILD_DEVELOPER_MODE "enable developer checks" OFF) option(BUILD_FORCE_ODP_NOT_SUPPORTED "Disable On-Demand Paging (ODP) support in libibverbs" OFF) option(BUILD_FORCE_NATIVE_ATOMIC_WRITE_NOT_SUPPORTED "Disable support for native atomic write in libibverbs" OFF) option(BUILD_FORCE_NATIVE_FLUSH_NOT_SUPPORTED "Disable support for the native flush in libibverbs" OFF) option(TESTS_COVERAGE "run coverage test" OFF) option(TESTS_NO_FORTIFY_SOURCE "enable tests that do not pass when -D_FORTIFY_SOURCE=2 flag set" OFF) option(TESTS_RDMA_CONNECTION "enable tests that require a configured RDMA-capable network interface (valgrind required)" OFF) option(TESTS_USE_FORCED_PMEM "run tests with PMEM_IS_PMEM_FORCE=1" OFF) option(TESTS_USE_VALGRIND_PMEMCHECK "enable tests with valgrind pmemcheck (if found)" OFF) option(TESTS_VERBOSE_OUTPUT "verbose test outputs" OFF) option(DEBUG_LOG_TRACE "enable logging function traces" OFF) option(DEBUG_FAULT_INJECTION "enable fault injection" OFF) option(DEBUG_USE_ASAN "enable AddressSanitizer (-fsanitize=address)" OFF) option(DEBUG_USE_UBSAN "enable UndefinedBehaviorSanitizer (-fsanitize=undefined)" OFF) # Do not treat include directories from the interfaces # of consumed Imported Targets as SYSTEM by default. set(CMAKE_NO_SYSTEM_FROM_IMPORTED 1) set(TEST_DIR ${CMAKE_CURRENT_BINARY_DIR}/test CACHE STRING "working directory for tests") set(buildTypes Release Debug RelWithDebInfo MinSizeRel) if(NOT CMAKE_BUILD_TYPE) message(STATUS "No build type selected (CMAKE_BUILD_TYPE), defaulting to Release") set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Choose the type of build, options are: Release Debug RelWithDebInfo MinSizeRel ..." FORCE) set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS ${buildTypes}) else() message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") if(NOT CMAKE_BUILD_TYPE IN_LIST buildTypes) message(WARNING "Unusual build type was set, please make sure it's a proper one. " "Only following are supported by default: ${buildTypes}.") endif() endif() if(EXISTS "${CMAKE_SOURCE_DIR}/.git") execute_process(COMMAND git describe OUTPUT_VARIABLE SRCVERSION WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET) if(NOT SRCVERSION) execute_process(COMMAND git log -1 --format=%h OUTPUT_VARIABLE SRCVERSION WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE) endif() else() execute_process(COMMAND cat .version OUTPUT_VARIABLE SRCVERSION WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE) endif() set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/cmake) find_package(PkgConfig REQUIRED) if(NOT PERL_FOUND) message(FATAL_ERROR "Perl not found") endif() pkg_check_modules(LIBIBVERBS libibverbs) if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() pkg_check_modules(LIBRDMACM librdmacm) if(NOT LIBRDMACM_FOUND) find_package(LIBRDMACM REQUIRED librdmacm) endif() pkg_check_modules(LIBPMEM libpmem>=${LIBPMEM_REQUIRED_VERSION}) if(NOT LIBPMEM_FOUND) find_package(LIBPMEM ${LIBPMEM_REQUIRED_VERSION}) endif() pkg_check_modules(LIBPMEM2 libpmem2>=${LIBPMEM2_REQUIRED_VERSION}) if(NOT LIBPMEM2_FOUND) find_package(LIBPMEM2 ${LIBPMEM2_REQUIRED_VERSION}) endif() if(NOT LIBPMEM2_FOUND) if(NOT LIBPMEM_FOUND) message(WARNING "Since libpmem2 and libpmem are missing, the examples will be unable to use PMem. They will be using DRAM instead.") else() message(STATUS "libpmem will be used for examples that use PMem.") endif() else() # XXX adjust message when all examples are adapted to libpmem2 # message(STATUS "libpmem2 will be used for examples that use PMem.") if(NOT LIBPMEM_FOUND) message(WARNING "Since libpmem is missing, the examples that are adapted only to libpmem will be unable to use PMem. They will be using DRAM instead.") message(STATUS "libpmem2 will be used for examples that use PMem and are adapted to libpmem2.") else() message(STATUS "libpmem2 will be used for examples that use PMem and are adapted to libpmem2, otherwise libpmem will be used.") endif() endif() pkg_check_modules(LIBPROTOBUFC libprotobuf-c) # check if libibverbs has ODP support if(BUILD_FORCE_ODP_NOT_SUPPORTED) message(WARNING "On-Demand Paging (ODP) support in libibverbs is disabled by the BUILD_FORCE_ODP_NOT_SUPPORTED option!") else() is_ODP_supported(ON_DEMAND_PAGING_SUPPORTED) if(ON_DEMAND_PAGING_SUPPORTED) message(STATUS "On-Demand Paging (ODP) in libibverbs supported - Success") add_flag(-DON_DEMAND_PAGING_SUPPORTED=1) else() message(WARNING "On-Demand Paging (ODP) is NOT supported and will be disabled (too old version of libibverbs)!") endif() endif() # check if libibverbs has ibv_advise_mr() support is_ibv_advise_mr_supported(IBV_ADVISE_MR_SUPPORTED) if(IBV_ADVISE_MR_SUPPORTED) message(STATUS "ibv_advise_mr() supported in libibverbs - Success") add_flag(-DIBV_ADVISE_MR_SUPPORTED=1) else() message(WARNING "ibv_advise_mr() is NOT supported and will be disabled (too old version of libibverbs)!") endif() # check if all required IBV_ADVISE_MR* flags are supported are_ibv_advise_flags_supported(IBV_ADVISE_MR_FLAGS_SUPPORTED) # check if librdmacm has correct signature of rdma_getaddrinfo() check_signature_rdma_getaddrinfo(RDMA_GETADDRINFO_NEW_SIGNATURE) if(RDMA_GETADDRINFO_NEW_SIGNATURE) message(STATUS "Using a new signature of rdma_getaddrinfo()") else() message(STATUS "Using an old signature of rdma_getaddrinfo()") add_flag(-DRDMA_GETADDRINFO_OLD_SIGNATURE=1) endif() # check if atomic operations are supported atomic_operations_supported(ATOMIC_OPERATIONS_SUPPORTED) if(ATOMIC_OPERATIONS_SUPPORTED) message(STATUS "atomic operations are supported") add_flag(-DATOMIC_OPERATIONS_SUPPORTED=1) else() message(WARNING "atomic operations are NOT supported (too old gcc/clang compiler). Some *_set*() functions will NOT be thread-safe!") endif() # check if libibverbs supports native atomic write if(BUILD_FORCE_NATIVE_ATOMIC_WRITE_NOT_SUPPORTED) message(WARNING "Support for native atomic write in libibverbs is disabled by the BUILD_FORCE_NATIVE_ATOMIC_WRITE_NOT_SUPPORTED option!") else() is_ibv_atomic_write_supported(NATIVE_ATOMIC_WRITE_SUPPORTED) if(NATIVE_ATOMIC_WRITE_SUPPORTED) message(STATUS "Native atomic write supported in libibverbs - Success") add_flag(-DNATIVE_ATOMIC_WRITE_SUPPORTED=1) else() message(WARNING "Native atomic write is NOT supported and will be disabled (too old version of libibverbs)!") endif() endif() # check if libibverbs supports the native flush if(BUILD_FORCE_NATIVE_FLUSH_NOT_SUPPORTED) message(WARNING "Support for the native flush in libibverbs is disabled by the BUILD_FORCE_NATIVE_FLUSH_NOT_SUPPORTED option!") else() is_ibv_flush_supported(NATIVE_FLUSH_SUPPORTED) if(NATIVE_FLUSH_SUPPORTED) message(STATUS "Native flush supported in libibverbs - Success") add_flag(-DNATIVE_FLUSH_SUPPORTED=1) else() message(WARNING "Native flush is NOT supported and will be disabled (too old version of libibverbs)!") endif() endif() add_custom_target(checkers ALL) add_custom_target(cstyle) add_custom_target(check-whitespace) add_custom_target(check-license COMMAND ${CMAKE_SOURCE_DIR}/utils/check_license/check-headers.sh ${CMAKE_SOURCE_DIR} BSD-3-Clause) add_custom_target(check-commits COMMAND ${CMAKE_SOURCE_DIR}/utils/check-commits.sh) add_custom_target(check-whitespace-main COMMAND ${PERL_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/check_whitespace ${CMAKE_SOURCE_DIR}/utils/check_license/* ${CMAKE_SOURCE_DIR}/README.md) add_dependencies(check-whitespace check-whitespace-main) add_flag(-Wpointer-arith) add_flag(-Wunused-macros) add_flag(-Wsign-conversion) add_flag(-Wsign-compare) add_flag(-Wunreachable-code-return) add_flag(-Wmissing-variable-declarations) add_flag(-fno-common) add_flag(-std=gnu11) add_flag(-ggdb DEBUG) add_flag(-DDEBUG DEBUG) add_flag("-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2" RELEASE) if(DEBUG_USE_ASAN) add_sanitizer_flag(address) endif() if(DEBUG_USE_UBSAN) add_sanitizer_flag(undefined) endif() if(TESTS_COVERAGE) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -coverage") endif() if(BUILD_DEVELOPER_MODE) set(TESTS_NO_FORTIFY_SOURCE ON) add_flag(-Wall) add_flag(-Werror) add_dependencies(checkers cstyle) add_dependencies(checkers check-whitespace) add_dependencies(checkers check-license) add_dependencies(checkers check-commits) endif() add_check_whitespace(cmake-main ${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt) add_check_whitespace(cmake-helpers ${CMAKE_CURRENT_SOURCE_DIR}/cmake/*.cmake) configure_file(${CMAKE_SOURCE_DIR}/cmake/librpma.pc.in ${CMAKE_CURRENT_BINARY_DIR}/librpma.pc @ONLY) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/librpma.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig) configure_file( "${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in" "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" IMMEDIATE @ONLY) add_custom_target(uninstall COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake) configure_package_config_file(${CMAKE_SOURCE_DIR}/cmake/librpma-config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/librpma-config.cmake INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/librpma/cmake PATH_VARS CMAKE_INSTALL_LIBDIR CMAKE_INSTALL_INCLUDEDIR) write_basic_package_version_file(librpma-config-version.cmake VERSION ${VERSION} COMPATIBILITY AnyNewerVersion) install(FILES ${CMAKE_CURRENT_BINARY_DIR}/librpma-config.cmake ${CMAKE_CURRENT_BINARY_DIR}/librpma-config-version.cmake DESTINATION ${CMAKE_INSTALL_LIBDIR}/librpma/cmake) pkg_check_modules(VALGRIND valgrind) if(VALGRIND_FOUND) include_directories(${VALGRIND_INCLUDE_DIRS}) # sets VALGRIND_S_OPTION to "-s" if valgrind supports it or "" otherwise valgrind_check_s_option() add_flag(-DVG_MEMCHECK_ENABLED=1) add_flag(-DVG_DRD_ENABLED=1) add_flag(-DVG_HELGRIND_ENABLED=1) if(TESTS_USE_VALGRIND_PMEMCHECK) find_pmemcheck() endif() if(VALGRIND_PMEMCHECK_FOUND) add_flag(-DVG_PMEMCHECK_ENABLED=1) endif() else() if(TESTS_RDMA_CONNECTION) message(FATAL_ERROR "Valgrind not found! - tests requiring a configured RDMA-capable network interface" "(all multi-threaded and integration tests) require also valgrind to be installed." "Install valgrind or set TESTS_RDMA_CONNECTION to OFF.") else() message(STATUS "NOTICE: valgrind not found! - the tests requiring valgrind " "(all multi-threaded and integration tests) will not be run!") endif() endif() add_subdirectory(src) if(BUILD_TESTS) if(TEST_DIR) enable_testing() else() message(WARNING "TEST_DIR is empty - 'make test' will not work") endif() add_subdirectory(tests) endif() if(BUILD_DOC) add_subdirectory(doc) endif() if(BUILD_EXAMPLES) add_subdirectory(examples) endif() if(NOT "${CPACK_GENERATOR}" STREQUAL "") include(${CMAKE_SOURCE_DIR}/cmake/packages.cmake) endif() add_custom_target(config_softroce COMMAND ${CMAKE_SOURCE_DIR}/tools/config_softroce.sh) add_custom_target(run_all_examples COMMAND ${CMAKE_SOURCE_DIR}/examples/run-all-examples.sh ${CMAKE_BINARY_DIR}/examples) add_custom_target(run_all_examples_under_valgrind COMMAND ${CMAKE_SOURCE_DIR}/examples/run-all-examples.sh ${CMAKE_BINARY_DIR}/examples --valgrind) add_custom_target(run_all_examples_with_fault_injection COMMAND ${CMAKE_SOURCE_DIR}/examples/run-all-examples.sh ${CMAKE_BINARY_DIR}/examples --integration-tests) rpma-1.3.0/CONTRIBUTING.md000066400000000000000000000155101443364775400147260ustar00rootroot00000000000000# Contributing to the Remote Persistent Memory Access (RPMA) library (librpma) Down below you'll find instructions on how to contribute to the librpma library. Your contributions are most welcome! You'll find it is best to begin with a conversation about your changes, rather than just writing a bunch of code and contributing it out of the blue. To propose new features, suggest adding features, or simply start a dialogue about the librpma library, open an issue in our [GitHub Issues Database](https://github.com/pmem/rpma/issues) **Note**: If you do decide to implement code changes and contribute them, please make sure you agree your contribution can be made available under the [BSD-style License used for the RPMA](https://github.com/pmem/rpma/blob/main/LICENSE). **Note**: Submitting your changes also means that you certify the following: ``` Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. ``` In case of any doubt, the gatekeeper may ask you to certify the above in writing, i.e. via email or by including a `Signed-off-by:` line at the bottom of your commit comments. To improve tracking of who is the author of the contribution, we kindly ask you to use your real name (not an alias) when committing your changes to the RPMA: ``` Author: Random J Developer ``` ## Code contributions First you should get familiar with all information about developing the librpma library located in the [DEVELOPMENT.md](DEVELOPMENT.md) file. It is very useful: 1) to enable all developer checks by setting the `BUILD_DEVELOPER_MODE` CMake variable to `ON` and also 2) to set the `CMAKE_BUILD_TYPE` CMake variable to `Debug` in order to be able to see the debug information in case of failures: ```sh [rpma/build]$ cmake .. -DBUILD_DEVELOPER_MODE=ON -DCMAKE_BUILD_TYPE=Debug ``` ### Code formatting Here is the list of the most important rules of code formatting: - The limit for a line length is 100 characters. - Indent the code with TABs, not spaces. Tab width is 8 characters. - Do not break user-visible strings (even when they are longer than 100 characters), but make them the only token in the line. - Put each variable declaration in a separate line. - Do not use C++ comments `//` (except for the License). - Spaces around operators are mandatory. - No whitespace is allowed at the end of a line. - For multi-line macros, do not put whitespace before the `\` character. - Precede definition of each function with a brief, non-trivial description. (Usually a single line is enough.) - Use the `XXX` tag to indicate a hack, problematic code, or something to be done. - For pointer variables, place the `*` close to the variable name, not the pointer type. - Avoid unnecessary variable initialization. - Never type `unsigned int` - just use `unsigned` in such case. Same with `long int` and `long`, etc. - Sized types like `uint32_t`, `int64_t` should be used when there is an on-media format. Otherwise, just use `unsigned`, `long`, etc. - Functions with local scope must be declared as `static`. Before contributing please remember to run: ```sh [rpma/build]$ make cstyle [rpma/build]$ make check-whitespace ``` This will check all C files in the tree for style issues. ### Commit messages All commit lines (entered when you run `git commit`) must follow the common conventions for git commit messages: - The first line is a short summary, no longer than **50 characters**, starting with an area name and then a colon. There should be no period after the short summary. - Valid area names are: **rpma, test, examples, doc** and **common** (for everything else). - It is acceptable for the short summary to be the only thing in the commit message if it is a trivial change. Otherwise, the second line must be a blank line. - Starting at the third line, additional information is given in complete English sentences and, optionally, bulleted points. This content must not extend beyond **column 72**. - The English sentences should be written in the imperative, so you say "Fix bug X" instead of "Fixed bug X" or "Fixes bug X". - Bullet points should use hanging indents when they take up more than one line (see example below). - There can be any number of paragraphs, separated by a blank line, as many as it takes to describe the change. - Any references to GitHub issues are at the end of the commit message. If you want to check the commit before creating PR, run: ```sh [rpma/build]$ make check-commits ``` For example, here is a properly-formatted commit message: ``` doc: fix code formatting in man pages This section contains paragraph style text with complete English sentences. There can be as many paragraphs as necessary. - Bullet points are typically sentence fragments - The first word of the bullet point is usually capitalized and if the point is long, it is continued with a hanging indent - The sentence fragments don't typically end with a period Ref: rpma/issues#1 ``` ## Code coverage One of the basic principles of delivering a new functionality to the library is to deliver it simultaneously with the unit tests to ensure that the new features behave correctly. You have to expect that code that degrades the test coverage will not pass the review process unless it is explicitly marked as a draft. Keep in mind that only PRs with 100% coverage can be merged. Please read the [DEVELOPMENT.md](DEVELOPMENT.md) file to see how to run all unit tests. ## Bug reports Bugs for the RPMA project are tracked in our [GitHub Issues Database](https://github.com/pmem/rpma/issues). When reporting a new bug, please use the `New issue` button. Provide as much information as possible. ## Other issues On our issues page we also gather feature requests and questions. Templates to use are `Feature` and `Question`, respectively. They should help deliver meaningful description of a feature or ask a question to us. rpma-1.3.0/DEVELOPMENT.md000066400000000000000000000210201443364775400145720ustar00rootroot00000000000000# DEVELOPMENT ENVIRONMENT SETTINGS ## Configuring CMake options CMake creates many cache files and subdirectories in the directory where it is run, so it is recommended to run all CMake-related commands (like `cmake` and `ccmake`) in a separate newly created subdirectory usually called `build`: ```sh [rpma]$ mkdir build [rpma]$ cd build [rpma/build]$ cmake .. ``` All examples listed below use the `build` subdirectory as the CMake build directory. CMake options can be changed using the `-D` option e.g.: ```sh [rpma/build]$ cmake -DCMAKE_BUILD_TYPE=Debug -DBUILD_DEVELOPER_MODE=ON .. ``` You can browse and edit the CMake options using `cmake-gui` or `ccmake` e.g.: ```sh [rpma/build]$ ccmake .. ``` ## CMake options of the librpma library Here is a list of the most interesting CMake options of the librpma library: | Name | Description | Values | Default | | - | - | - | - | | CMAKE_BUILD_TYPE | Choose the type of build | None/Release/Debug/RelWithDebInfo | Release | | CMAKE_INSTALL_PREFIX | An install path prefix, prepended to install directories | *dir path* | /usr/local | | CPACK_GENERATOR | Use CPack to generate a librpma package (RPM or DEB) | ""/RPM/DEB | "" | | BUILD_DOC | Build the documentation | ON/OFF | ON | | BUILD_TESTS | Build the tests | ON/OFF | ON | | BUILD_EXAMPLES | Build the examples | ON/OFF | ON | | BUILD_DEVELOPER_MODE | Enable developer checks | ON/OFF | OFF | | BUILD_FORCE_ODP_NOT_SUPPORTED | Disable On-Demand Paging (ODP) support in libibverbs | ON/OFF | OFF | | BUILD_FORCE_NATIVE_ATOMIC_WRITE_NOT_SUPPORTED | Disable support for native atomic write in libibverbs | ON/OFF | OFF | | BUILD_FORCE_NATIVE_FLUSH_NOT_SUPPORTED | Disable support for the native flush in libibverbs | ON/OFF | OFF | | TESTS_COVERAGE | Check the code coverage during compilation | ON/OFF | OFF | | TESTS_USE_FORCED_PMEM | Run tests with PMEM_IS_PMEM_FORCE=1 | ON/OFF | OFF | | TESTS_USE_VALGRIND_PMEMCHECK | Enable tests with valgrind pmemcheck (if found)| ON/OFF | OFF | | TESTS_RDMA_CONNECTION | Enable tests that require a configured RDMA-capable network interface (valgrind required) | ON/OFF | OFF | | TESTS_VERBOSE_OUTPUT | More verbose test outputs | ON/OFF | OFF | | DEBUG_LOG_TRACE | Enable logging functions' traces | ON/OFF | OFF | | DEBUG_FAULT_INJECTION | Enable fault injection | ON/OFF | OFF | | DEBUG_USE_ASAN | Enable AddressSanitizer | ON/OFF | OFF | | DEBUG_USE_UBSAN | Enable UndefinedBehaviorSanitizer | ON/OFF | OFF | | TEST_DIR | Working directory for tests | *dir path* | ./build/test | The following command can be used to see all available CMake options: ```sh [rpma/build]$ cmake -LAH .. ``` ### CMake options most useful during development The most useful CMake options during development are briefly described below: - `CMAKE_BUILD_TYPE` should be set to `Debug` (the default is `Release`) to be able to run the tests and see the debug information in case of failures, - `BUILD_DEVELOPER_MODE` should be set to `ON` (the default is `OFF`) to enable all developer checks (checking: licenses, coding style, whitespaces and commits), it sets also two compiler flags: `-Wall` and `-Werror`, - `BUILD_DOC` should be set to `ON` (the default) to turn on building the documentation, - `BUILD_TESTS` should be set to `ON` (the default) to turn on building the tests, - `BUILD_EXAMPLES` should be set to `ON` (the default) to turn on building the examples, - `TESTS_RDMA_CONNECTION` should be set to `ON` (the default is `OFF`) to enable tests that require a configured RDMA-capable network interface (valgrind is also required), - `TESTS_VERBOSE_OUTPUT` should be set to `ON` (the default is `OFF`) to put cmake in the trace mode with variables expanded, - `DEBUG_LOG_TRACE` enables logging functions' traces, so it is very useful during debugging (it should be set to `ON` then, the default is `OFF`). # Testing This section describes how to prepare the environment for execution of all available kinds of tests: - unit tests, - multi-threaded (MT) tests and - integration tests. ## Running only the unit tests The unit tests are implemented using the [cmocka](https://cmocka.org/) framework. They do not need any RDMA-capable network interface. All unit tests are located in the `./tests/unit/` subfolder of the main directory. In order to run **only** the unit tests (this is the default configuration): 1. Build the librpma library with the `CMAKE_BUILD_TYPE` CMake variable set to `Debug` and the `TESTS_RDMA_CONNECTION` CMake variable set to `OFF`: ```sh [rpma]$ cd build [rpma/build]$ cmake -DCMAKE_BUILD_TYPE=Debug -DTESTS_RDMA_CONNECTION=OFF .. [rpma/build]$ make -j$(nproc) ``` or just: ```sh [rpma]$ cd build; rm -rf ./* [rpma/build]$ cmake -DCMAKE_BUILD_TYPE=Debug .. [rpma/build]$ make -j$(nproc) ``` in the empty `build` subfolder. 2. Run tests from the `build` subdirectory: ```sh [rpma/build]$ make test ``` or: ```sh [rpma/build]$ ctest --output-on-failure ``` to print out the output of the failed tests too. ## Running multi-threaded or integration tests on SoftRoCE or RDMA HW **Note**: The analysis of thread safety of the librpma library is located in the [THREAD_SAFETY.md](THREAD_SAFETY.md) file. ### Preparing the environment In order to run the multi-threaded or the integration tests: 1. Make sure you have all needed packages installed (you can support yourself with [Dockerfiles](./utils/docker/images/) (see the EXAMPLES_DEPS section)). 2. Valgrind must be installed to run both the multi-threaded and the integration tests. 3. A correctly configured RDMA-capable network interface (SoftRoCE or RDMA HW) with an IP address assigned is required. 4. If SoftRoCE is to be used to run tests, it can be configured in the following two alternative ways (it also prints out the IP of the configured interface): ```sh [rpma]$ ./tools/config_softroce.sh ``` or: ```sh [rpma/build]$ cmake .. [rpma/build]$ make config_softroce ``` 5. Set the `RPMA_TESTING_IP` environment variable to an IP address of this interface: ```sh $ export RPMA_TESTING_IP=192.168.0.1 # insert your own IP address here ``` ### Building the librpma library for running multi-threaded or integration tests 1. In order to run the **multi-threaded tests** build the librpma library with the `CMAKE_BUILD_TYPE` CMake variable set to `Debug` and the `TESTS_RDMA_CONNECTION` CMake variable set to `ON`: ```sh [rpma]$ cd build [rpma/build]$ cmake -DCMAKE_BUILD_TYPE=Debug -DTESTS_RDMA_CONNECTION=ON .. [rpma/build]$ make -j$(nproc) ``` 2. In order to run the **integration tests** build the librpma library with the `CMAKE_BUILD_TYPE` CMake variable set to `Debug` and the `TESTS_RDMA_CONNECTION` and the `DEBUG_FAULT_INJECTION` CMake variables set to `ON`: ```sh [rpma]$ cd build [rpma/build]$ cmake -DCMAKE_BUILD_TYPE=Debug -DTESTS_RDMA_CONNECTION=ON -DDEBUG_FAULT_INJECTION=ON .. [rpma/build]$ make -j$(nproc) ``` ### Running unit and multi-threaded tests In order to run both: unit and multi-threaded tests, run the following command: ```sh [rpma/build]$ make test ``` or: ```sh [rpma/build]$ ctest --output-on-failure ``` to print out also the output of the failed tests. ### Running only multi-threaded tests In order to run **only** the multi-threaded tests, run the following command: ```sh [rpma/build]$ ctest -R mtt --output-on-failure ``` ### Running integration tests The integration tests are implemented as examples run together with the fault injection mechanism. The integration tests can be started using one of the following commands: 1. From the build directory: ```sh [rpma/build]$ make run_all_examples_with_fault_injection ``` or: 2. From the main directory of the librpma repository: ```sh [rpma]$ ./examples/run-all-examples.sh ./build/examples/ --integration-tests ``` In order to run the integration tests on a PMem (a DAX device or a file on a file system DAX), an absolute path (starting with `/`) to this PMem has to be provided either via the `` argument: ```sh [rpma]$ ./examples/run-all-examples.sh ./build/examples/ --integration-tests ``` or via the `RPMA_EXAMPLES_PMEM_PATH` environment variable. If both of them are set, the command line argument `` will be used. By default the integration tests do not stop on a failure. In order to stop on the first failure, the `RPMA_EXAMPLES_STOP_ON_FAILURE` environment variable has to be set to `ON` or the following command has to be run: ```sh [rpma]$ ./examples/run-all-examples.sh ./build/examples/ --integration-tests --stop-on-failure ``` To see all available configuration options please take a look at the help message printed out by the following command: ```sh [rpma]$ ./examples/run-all-examples.sh ``` rpma-1.3.0/INSTALL.md000066400000000000000000000070551443364775400141320ustar00rootroot00000000000000# How to build the librpma library from source ## Requirements ### Common requirements In order to build librpma, you need to have installed several components: - C compiler - [CMake](http://www.cmake.org) >= 3.3 - pkg-config - libibverbs-dev(el) - librdmacm-dev(el) - libcmocka-dev(el) == 1.1.5-26-g672c5ce (please see [our docker script](./utils/docker/images/install-cmocka.sh) to install the verified revision) **Note**: To make sure you have all needed packages installed you can support yourself with [Dockerfiles](./utils/docker/images/) (see the BASE_DEPS and RPMA_DEPS sections) ### In order to build the documentation you also need: - diff - find - groff - txt2man == 1.7.0 (please see [our docker script](./utils/docker/images/install-txt2man.sh) to install the verified revision) and optionally: - pandoc to generate the Markdown documentation. **Note**: building the documentation can be turned off using the CMake `BUILD_DOC` option (see [Configuring CMake options](DEVELOPMENT.md#configuring-cmake-options) and [CMake options of the librpma library](DEVELOPMENT.md#cmake-options-of-the-librpma-library)). ### For some examples you also need: - libpmem-dev(el) >= 1.6 or libpmem2-dev(el) >= 1.11 for examples: 3, 4, 5, 7, 9, 9s - libprotobuf-c-dev(el) >= 1.0 for examples: 9, 9s **Note**: the above versions of libraries are proven to work correctly. **Note**: see [the list of the supported OSes](INSTALL.md#os-support). **Note**: please be aware that the libpmem2-dev(el) package is not available on some distributions. Use [our script](./utils/docker/images/install-pmdk.sh) to install it manually from sources. You can check all needed additional packages in one of our [Dockerfiles](./utils/docker/images/), for example [here](./utils/docker/images/Dockerfile.archlinux-latest), in the variable called PMDK_DEPS. **Note**: libprotobuf-c-dev(el) is needed to run examples: 9 and 9s **Note**: Examples that use PMem (3, 4, 5, 7, 9, 9s) require only one of the following libraries to be run: libpmem or libpmem2. In case of having installed both of them libpmem2 will be used. ## Building First, you have to create a `build` directory. From there you have to prepare the compilation using CMake. The final build step is just the `make` command: ```sh [rpma]$ mkdir build [rpma]$ cd build [rpma/build]$ cmake .. [rpma/build]$ make -j ``` When the librpma library has been successfully built, the binaries of the library can be found in the `build/src` directory. Additionally, you can run the examples in `build/examples/*`. To check how to set environment settings please checkout [DEVELOPMENT.md file](DEVELOPMENT.md). ### Building packages In order to build 'rpm' or 'deb' packages you should issue the following commands: ```sh [rpma/build]$ cmake .. -DCPACK_GENERATOR="$GEN" -DCMAKE_INSTALL_PREFIX=/usr [rpma/build]$ make package ``` where $GEN is a type of package generator: RPM or DEB. CMAKE_INSTALL_PREFIX must be set to a destination were packages will be installed It is recommended to run all unit test before the installation. In order to do it you can issue the following command. ```sh [rpma/build]$ make test ``` The `ctest` command can be used instead of `make test`. ## Installing After compiling the library, you can install it: ```sh [rpma/build]$ sudo make install ``` ## Testing In order to learn how to run all tests and set environment settings, please see [Testing](DEVELOPMENT.md#Testing) section in [DEVELOPMENT.md](DEVELOPMENT.md) file. ## OS support Supported Linux distributions include: - CentOS 7 - Debian >= 10 - Fedora >= 32 - Ubuntu >= 20.04 rpma-1.3.0/LICENSE000066400000000000000000000032571443364775400135070ustar00rootroot00000000000000SPDX-License-Identifier: BSD-3-Clause Copyright 2020, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Everything in this source tree is covered by the previous license with the following exceptions: * utils/cstyle (used only during development) licensed under CDDL. rpma-1.3.0/README.md000066400000000000000000000046411443364775400137570ustar00rootroot00000000000000# **librpma: Remote Persistent Memory Access Library** [![GHA build status](https://github.com/pmem/rpma/workflows/GitHubActions/badge.svg?branch=main)](https://github.com/pmem/rpma/actions) [![CircleCI build status](https://circleci.com/gh/pmem/rpma.svg?style=shield)](https://app.circleci.com/pipelines/github/pmem/rpma) [![Coverity build status](https://scan.coverity.com/projects/21470/badge.svg)](https://scan.coverity.com/projects/pmem-rpma) [![Coverage Status](https://codecov.io/github/pmem/rpma/coverage.svg?branch=main)](https://codecov.io/gh/pmem/rpma/branch/main) [![librpma version](https://img.shields.io/github/tag/pmem/rpma.svg)](https://github.com/pmem/rpma/releases/latest) [![Packaging status](https://repology.org/badge/tiny-repos/rpma.svg)](https://repology.org/project/rpma/versions) The **Remote Persistent Memory Access (RPMA) Library** is a C library to simplify accessing persistent memory on remote hosts over **Remote Direct Memory Access (RDMA)**. For more information, see [pmem.io](https://pmem.io). ## Installing If you want to install this library, check out the [INSTALL.md](INSTALL.md) file. ## Examples Examples of usage of this library are located in the [./examples/](./examples/) directory and described in the [./examples/README.md](./examples/README.md) file. ## Development If you want to develop this library, read the [DEVELOPMENT.md](DEVELOPMENT.md) file. ## Contributing If you want to contribute to the development of this library, you should get familiar with all the following files: [INSTALL.md](INSTALL.md), [DEVELOPMENT.md](DEVELOPMENT.md) and [CONTRIBUTING.md](CONTRIBUTING.md). ## Thread safety The analysis of thread safety of the librpma library is located in the [THREAD_SAFETY.md](THREAD_SAFETY.md) file. ## Contact Us For more information on this library, contact Tomasz Gromadzki (tomasz.gromadzki@intel.com), Lukasz Dorau (lukasz.dorau@intel.com), Piotr Balcer (piotr.balcer@intel.com), or post to our [Google group](https://groups.google.com/group/pmem). ## More Info [Persistent Memory Over Traditional RDMA White Paper - Part 1](https://software.intel.com/content/www/us/en/develop/articles/persistent-memory-replication-over-traditional-rdma-part-1-understanding-remote-persistent.html) - which describes a technology behind RPMA. **Note:** Parts 2-4 relates directly to the librpmem library which is a predecessor of librpma. So parts 2-4 do not relate exactly to how librpma works. rpma-1.3.0/ROADMAP.md000066400000000000000000000021401443364775400140750ustar00rootroot00000000000000# The librpma roadmap [04/08/2021] After the initial stage of development and having a growing community of developers we have decided to move the librpma roadmap fully on GitHub. We are planning to post and discuss ideas about future improvements of the library there. Please join us at: https://github.com/pmem/rpma/issues if you find any of the items listed there interesting for you (either you expect this item to be added to the library or you would like to deliver a source code solution for it). **Note**: Please take a look at [CONTRIBUTING.md](CONTRIBUTING.md) before you start developing. # The librpma Fio engine roadmap [04/08/2021] The librpma FIO engine is already out there. Please give it a try: https://github.com/axboe/fio In the meantime, we are still working on new concepts and improvements to the existing engines. If you feel the vibe, please join us here: https://github.com/pmem/fio # Contact Us For more information about both roadmaps, please contact Tomasz Gromadzki (tomasz.gromadzki@intel.com). # More Info Please read [README.md](README.md) for more information about the project. rpma-1.3.0/THREAD_SAFETY.md000066400000000000000000000207231443364775400150430ustar00rootroot00000000000000# THREAD SAFETY This document presents the analysis of thread safety of the librpma library. **Note**: the analysis is based on static code analysis and a set of multithreaded tests executed under Ubuntu 22.04. In order to confirm thread safety on another OS configuration, all multithreaded tests should be rerun. Detected and masked multithreading issues can be seen in the following suppression files: - [drd.supp](tests/drd.supp) and - [helgrind.supp](tests/helgrind.supp) ## Main assumptions The main assumptions this analysis is based on are following: 1) the API of libibverbs is fully thread-safe and it can be called from every thread in the process (see [Relationship of libibverbs and librdmacm](#relationship-of-libibverbs-and-librdmacm) for details) 2) many threads may use the same peer (`struct rpma_peer`) to create separate connections, 3) there can be only one endpoint (`struct rpma_ep`) and only one thread can use it (call `rpma_ep_next_conn_req()` on it), 4) each of the connections (`struct rpma_conn_req` and `struct rpma_conn`) can be used by only one thread at the same time. **If the above assumptions are not met, thread safety of the librpma library is not guaranteed.** The most common scenarios are following: 1) on the active side: the main thread creates connection requests (`struct rpma_conn_req`) for all threads and pass them to those threads which use them to create separate connections (`struct rpma_conn`), 2) on the passive side: the main thread establishes the connection but the rest of work (including connection shutdown) is done by separate thread(s) (if more than one connection is established). Most of the core librpma API calls are thread-safe but there are also very important exceptions (described below) mainly related to connection's configuration, establishment and tear-down. Creating resources of RPMA library usually involves dynamic memory allocation and destroying resources usually involves a dynamic memory release. The same resource cannot be destroyed more than once at any thread and a resource cannot be used after it was destroyed. It is the user's responsibility to follow those rules and not doing so may result in a segmentation fault or an undefined behaviour. ## Thread-safe API calls The following API calls of the librpma library are thread-safe: - rpma_peer_new - rpma_peer_delete - rpma_peer_cfg_new - rpma_peer_cfg_delete - rpma_peer_cfg_from_descriptor - rpma_peer_cfg_get_descriptor_size - rpma_ep_get_fd - rpma_conn_cfg_new - rpma_conn_cfg_delete - rpma_mr_get_descriptor - rpma_mr_remote_from_descriptor - rpma_mr_get_descriptor_size - rpma_mr_get_ptr - rpma_mr_get_size - rpma_mr_remote_get_size - rpma_mr_remote_delete - rpma_mr_remote_get_flush_type - rpma_mr_advise - rpma_conn_req_get_private_data - rpma_conn_req_recv - rpma_conn_delete - rpma_conn_disconnect - rpma_conn_get_cq - rpma_conn_get_compl_fd - rpma_conn_get_event_fd - rpma_conn_get_private_data - rpma_conn_get_qp_num - rpma_conn_get_rcq - rpma_conn_next_event - rpma_conn_wait - rpma_atomic_write - rpma_flush - rpma_read - rpma_recv - rpma_send - rpma_send_with_imm - rpma_srq_get_rcq - rpma_write - rpma_write_with_imm - rpma_cq_get_fd - rpma_cq_wait - rpma_cq_get_wc - rpma_utils_ibv_context_is_odp_capable - rpma_utils_conn_event_2str - rpma_err_2str - rpma_log_get_threshold - rpma_log_set_function - rpma_log_set_threshold ## Conditionally thread-safe API calls **Note**: Thread safety of the following functions depends on the support of atomic operations (`atomic_store` and `atomic_load`) on the specific OS. If they are supported then all the following functions are thread-safe (except `rpma_conn_req_connect`). The following API calls of the librpma library: - rpma_peer_cfg_set_direct_write_to_pmem - rpma_peer_cfg_get_direct_write_to_pmem - rpma_peer_cfg_get_descriptor - rpma_conn_apply_remote_peer_cfg - calls rpma_peer_cfg_get_direct_write_to_pmem are thread-safe only if each thread operates on a **separate peer configuration structure** (`struct rpma_peer_cfg`) used only by this one thread. They are not thread-safe if threads operate on one peer configuration structure common for more than one thread. The following API calls of the librpma library: - rpma_conn_cfg_get_compl_channel - rpma_conn_cfg_get_cq_size - rpma_conn_cfg_get_rcq_size - rpma_conn_cfg_get_rq_size - rpma_conn_cfg_get_sq_size - rpma_conn_cfg_get_srq - rpma_conn_cfg_get_timeout - rpma_conn_cfg_set_compl_channel - rpma_conn_cfg_set_cq_size - rpma_conn_cfg_set_rcq_size - rpma_conn_cfg_set_rq_size - rpma_conn_cfg_set_sq_size - rpma_conn_cfg_set_srq - rpma_conn_cfg_set_timeout are thread-safe only if each thread operates on a **separate connection configuration structure** (`struct rpma_conn_cfg`) used only by this one thread. They are not thread-safe if threads operate on one connection configuration structure common for more than one thread. The following API call of the librpma library: - rpma_conn_req_connect is thread-safe only if each thread operates on a **separate connection request** (`struct rpma_conn_req`) used only by this one thread. They are not thread-safe if threads operate on one connection request common for more than one thread. ## NOT thread-safe API calls The following API calls of the librpma library are NOT thread-safe: - rpma_conn_req_new - rpma_conn_req_delete - rpma_ep_listen - rpma_ep_next_conn_req - rpma_ep_shutdown - rpma_mr_reg - rpma_mr_dereg - rpma_srq_delete - rpma_srq_new - rpma_utils_get_ibv_context ### rpma_log_default_function() The `rpma_log_default_function()` function is used throughout the API: - when logging errors and warnings (with the *LOG_LEVEL_ERROR* or *LOG_LEVEL_WARNING* log levels) - when establishing a connection (with the *LOG_LEVEL_NOTICE* log level) In the first case, the function is called only in error handling paths, so there is no risk to a normal operation. In the second case, it is called in functions that are marked as not thread-safe. The `rpma_log_default_function()` function is NOT thread-safe because it uses the [localtime_r(3)](https://www.gnu.org/software/libc/manual/html_node/Broken_002ddown-Time.html#index-localtime_005fr) and the [syslog(3)](https://www.gnu.org/software/libc/manual/html_node/syslog_003b-vsyslog.html#index-syslog) functions which are labeled as **MT-Safe env locale**. According to [Safety Remarks](https://www.gnu.org/software/libc/manual/html_node/Other-Safety-Remarks.html) documentation: - `env`: Functions marked with env as an MT-Safety issue access the environment with getenv or similar, without any guards to ensure safety in the presence of concurrent modifications. We do not mark these functions as MT- or AS-Unsafe, however, because functions that modify the environment are all marked with const:env and regarded as unsafe. Being unsafe, the latter are not to be called when multiple threads are running or asynchronous signals are enabled, and so the environment can be considered effectively constant in these contexts, which makes the former safe. - `locale`: Functions annotated with locale as an MT-Safety issue read from the locale object without any form of synchronization. Functions annotated with locale called concurrently with locale changes may behave in ways that do not correspond to any of the locales active during their execution, but an unpredictable mix thereof. We do not mark these functions as MT- or AS-Unsafe, however, because functions that modify the locale object are marked with const:locale and regarded as unsafe. Being unsafe, the latter are not to be called when multiple threads are running or asynchronous signals are enabled, and so the locale can be considered effectively constant in these contexts, which makes the former safe. ## Relationship of libibverbs and librdmacm The API of libibverbs is fully thread-safe and it can be called from every thread in the process. The detailed description is available at: - [rdmamojo/libibverbs](https://www.rdmamojo.com/2013/07/26/libibverbs-thread-safe-level/) - [ibv_alloc_td.3](https://man7.org/linux/man-pages/man3/ibv_alloc_td.3.html) ## Analysis of Valgrind suppressions ### Suppressions for libibverbs and librdmacm The suppressions for libibverbs and librdmacm are described in the [tests/memcheck-libibverbs-librdmacm.supp](tests/memcheck-libibverbs-librdmacm.supp) file. ### Suppressions for the drd tool The suppressions for the drd tool are described in the [tests/drd.supp](tests/drd.supp) file. ### Suppressions for the helgrind tool The suppressions for the helgrind tool are described in the [tests/helgrind.supp](tests/helgrind.supp) file. rpma-1.3.0/cmake/000077500000000000000000000000001443364775400135535ustar00rootroot00000000000000rpma-1.3.0/cmake/FindLIBIBVERBS.cmake000066400000000000000000000010721443364775400170010ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # message(STATUS "Checking for module 'libibverbs' w/o PkgConfig") find_library(LIBIBVERBS_LIBRARY NAMES libibverbs.so libibverbs ibverbs) set(LIBIBVERBS_LIBRARIES ${LIBIBVERBS_LIBRARY}) if(LIBIBVERBS_LIBRARY) message(STATUS " Found libibverbs w/o PkgConfig") else() set(MSG_NOT_FOUND "libibverbs NOT found (set CMAKE_PREFIX_PATH to point the location)") if(LIBIBVERBS_FIND_REQUIRED) message(FATAL_ERROR ${MSG_NOT_FOUND}) else() message(WARNING ${MSG_NOT_FOUND}) endif() endif() rpma-1.3.0/cmake/FindLIBPMEM.cmake000066400000000000000000000010361443364775400164430ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2021, Intel Corporation # message(STATUS "Checking for module 'libpmem' w/o PkgConfig") find_library(LIBPMEM_LIBRARY NAMES libpmem.so libpmem pmem) set(LIBPMEM_LIBRARIES ${LIBPMEM_LIBRARY}) if(LIBPMEM_LIBRARY) message(STATUS " Found libpmem w/o PkgConfig") else() set(MSG_NOT_FOUND "libpmem NOT found (set CMAKE_PREFIX_PATH to point the location)") if(LIBPMEM_FIND_REQUIRED) message(FATAL_ERROR ${MSG_NOT_FOUND}) else() message(WARNING ${MSG_NOT_FOUND}) endif() endif() rpma-1.3.0/cmake/FindLIBPMEM2.cmake000066400000000000000000000010511443364775400165220ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2021, Intel Corporation # message(STATUS "Checking for module 'libpmem2' w/o PkgConfig") find_library(LIBPMEM2_LIBRARY NAMES libpmem2.so libpmem2 pmem2) set(LIBPMEM2_LIBRARIES ${LIBPMEM2_LIBRARY}) if(LIBPMEM2_LIBRARY) message(STATUS " Found libpmem2 w/o PkgConfig") else() set(MSG_NOT_FOUND "libpmem2 NOT found (set CMAKE_PREFIX_PATH to point the location)") if(LIBPMEM2_FIND_REQUIRED) message(FATAL_ERROR ${MSG_NOT_FOUND}) else() message(WARNING ${MSG_NOT_FOUND}) endif() endif() rpma-1.3.0/cmake/FindLIBPROTOBUFC.cmake000066400000000000000000000011261443364775400172500ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # message(STATUS "Checking for module 'libprotobuf-c' w/o PkgConfig") find_library(LIBPROTOBUFC_LIBRARY NAMES libprotobuf-c.so libprotobuf-c protobuf-c) set(LIBPROTOBUFC_LIBRARIES ${LIBPROTOBUFC_LIBRARY}) if(LIBPROTOBUFC_LIBRARY) message(STATUS " Found libprotobuf-c w/o PkgConfig") else() set(MSG_NOT_FOUND "libprotobuf-c NOT found (set CMAKE_PREFIX_PATH to point the location)") if(LIBPROTOBUFC_FIND_REQUIRED) message(FATAL_ERROR ${MSG_NOT_FOUND}) else() message(WARNING ${MSG_NOT_FOUND}) endif() endif() rpma-1.3.0/cmake/FindLIBRDMACM.cmake000066400000000000000000000010571443364775400166530ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # message(STATUS "Checking for module 'librdmacm' w/o PkgConfig") find_library(LIBRDMACM_LIBRARY NAMES librdmacm.so librdmacm rdmacm) set(LIBRDMACM_LIBRARIES ${LIBRDMACM_LIBRARY}) if(LIBRDMACM_LIBRARY) message(STATUS " Found librdmacm w/o PkgConfig") else() set(MSG_NOT_FOUND "librdmacm NOT found (set CMAKE_PREFIX_PATH to point the location)") if(LIBRDMACM_FIND_REQUIRED) message(FATAL_ERROR ${MSG_NOT_FOUND}) else() message(WARNING ${MSG_NOT_FOUND}) endif() endif() rpma-1.3.0/cmake/cmake_uninstall.cmake.in000066400000000000000000000020241443364775400203310ustar00rootroot00000000000000# From: https://cmake.org/Wiki/CMake_FAQ if(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") message(FATAL_ERROR "Cannot find install manifest: @CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") endif(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") file(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) string(REGEX REPLACE "\n" ";" files "${files}") foreach(file ${files}) message(STATUS "Uninstalling $ENV{DESTDIR}${file}") if(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") exec_program("@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" OUTPUT_VARIABLE rm_out RETURN_VALUE rm_retval ) if(NOT "${rm_retval}" STREQUAL 0) message(FATAL_ERROR "Problem when removing $ENV{DESTDIR}${file}") endif(NOT "${rm_retval}" STREQUAL 0) else(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") message(STATUS "File $ENV{DESTDIR}${file} does not exist.") endif(IS_SYMLINK "$ENV{DESTDIR}${file}" OR EXISTS "$ENV{DESTDIR}${file}") endforeach(file) rpma-1.3.0/cmake/functions.cmake000066400000000000000000000255611443364775400165760ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2022, Intel Corporation # Copyright (c) 2022-2023, Fujitsu Limited # # # functions.cmake - helper functions for CMakeLists.txt # include(CheckCCompilerFlag) # prepends prefix to list of strings function(prepend var prefix) set(listVar "") foreach(f ${ARGN}) list(APPEND listVar "${prefix}/${f}") endforeach(f) set(${var} "${listVar}" PARENT_SCOPE) endfunction() # Checks whether flag is supported by current C compiler and appends # it to the relevant cmake variable. # 1st argument is a flag # 2nd (optional) argument is a build type (debug, release) macro(add_flag flag) string(REPLACE - _ flag2 ${flag}) string(REPLACE " " _ flag2 ${flag2}) string(REPLACE = "_" flag2 ${flag2}) set(check_name "C_HAS_${flag2}") check_c_compiler_flag(${flag} ${check_name}) if (${${check_name}}) if (${ARGC} EQUAL 1) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}") else() set(CMAKE_C_FLAGS_${ARGV1} "${CMAKE_C_FLAGS_${ARGV1}} ${flag}") endif() endif() endmacro() macro(add_sanitizer_flag flag) set(SAVED_CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES}) set(CMAKE_REQUIRED_LIBRARIES "${CMAKE_REQUIRED_LIBRARIES} -fsanitize=${flag}") if(${flag} STREQUAL "address") set(check_name "C_HAS_ASAN") elseif(${flag} STREQUAL "undefined") set(check_name "C_HAS_UBSAN") endif() check_c_compiler_flag("-fsanitize=${flag}" ${check_name}) if (${${check_name}}) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=${flag}") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=${flag}") else() message(STATUS " ${flag} sanitizer is not supported") endif() set(CMAKE_REQUIRED_LIBRARIES ${SAVED_CMAKE_REQUIRED_LIBRARIES}) endmacro() # Generates cstyle-$name target and attaches it # as a dependency of global "cstyle" target. # cstyle-$name target verifies C style of files in current source dir. # If more arguments are used, they are used as files to be checked # instead. # ${name} must be unique. function(add_cstyle name) if(${ARGC} EQUAL 1) add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/cstyle-${name}-status DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/*.c ${CMAKE_CURRENT_SOURCE_DIR}/*.h COMMAND ${PERL_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/cstyle -pP -o src2man ${CMAKE_CURRENT_SOURCE_DIR}/*.c ${CMAKE_CURRENT_SOURCE_DIR}/*.h COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_BINARY_DIR}/cstyle-${name}-status ) else() add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/cstyle-${name}-status DEPENDS ${ARGN} COMMAND ${PERL_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/cstyle -pP -o src2man ${ARGN} COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_BINARY_DIR}/cstyle-${name}-status ) endif() add_custom_target(cstyle-${name} DEPENDS ${CMAKE_BINARY_DIR}/cstyle-${name}-status) add_dependencies(cstyle cstyle-${name}) endfunction() # Generates check-whitespace-$name target and attaches it as a dependency # of global "check-whitespace" target. # ${name} must be unique. function(add_check_whitespace name) if(${ARGC} EQUAL 1) add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/check-whitespace-${name}-status DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/*.c ${CMAKE_CURRENT_SOURCE_DIR}/*.h COMMAND ${PERL_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/check_whitespace ${CMAKE_CURRENT_SOURCE_DIR}/*.c ${CMAKE_CURRENT_SOURCE_DIR}/*.h COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_BINARY_DIR}/check-whitespace-${name}-status) else() add_custom_command(OUTPUT ${CMAKE_BINARY_DIR}/check-whitespace-${name}-status DEPENDS ${ARGN} COMMAND ${PERL_EXECUTABLE} ${CMAKE_SOURCE_DIR}/utils/check_whitespace ${ARGN} COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_BINARY_DIR}/check-whitespace-${name}-status) endif() add_custom_target(check-whitespace-${name} DEPENDS ${CMAKE_BINARY_DIR}/check-whitespace-${name}-status) add_dependencies(check-whitespace check-whitespace-${name}) endfunction() # Sets ${ret} to version of program specified by ${name} in major.minor format function(get_program_version_major_minor name ret) execute_process(COMMAND ${name} --version OUTPUT_VARIABLE cmd_ret ERROR_QUIET) STRING(REGEX MATCH "([0-9]+.)([0-9]+)" VERSION ${cmd_ret}) SET(${ret} ${VERSION} PARENT_SCOPE) endfunction() function(find_pmemcheck) set(ENV{PATH} ${VALGRIND_PREFIX}/bin:$ENV{PATH}) execute_process(COMMAND valgrind --tool=pmemcheck --help RESULT_VARIABLE VALGRIND_PMEMCHECK_RET OUTPUT_QUIET ERROR_QUIET) if(VALGRIND_PMEMCHECK_RET) set(VALGRIND_PMEMCHECK_FOUND 0 CACHE INTERNAL "") else() set(VALGRIND_PMEMCHECK_FOUND 1 CACHE INTERNAL "") endif() if(VALGRIND_PMEMCHECK_FOUND) execute_process(COMMAND valgrind --tool=pmemcheck true ERROR_VARIABLE PMEMCHECK_OUT OUTPUT_QUIET) string(REGEX MATCH ".*pmemcheck-([0-9.]*),.*" PMEMCHECK_OUT "${PMEMCHECK_OUT}") set(PMEMCHECK_VERSION ${CMAKE_MATCH_1} CACHE INTERNAL "") else() message(WARNING "Valgrind pmemcheck NOT found. Pmemcheck tests will not be performed.") endif() endfunction() function(valgrind_check_s_option) set(ENV{PATH} ${VALGRIND_PREFIX}/bin:$ENV{PATH}) execute_process(COMMAND valgrind -s date ERROR_VARIABLE VALGRIND_S_OPTION_STDERR OUTPUT_QUIET) string(REGEX MATCH "Unknown option: -s" VALGRIND_S_OPTION_MATCH "${VALGRIND_S_OPTION_STDERR}") if(VALGRIND_S_OPTION_MATCH) set(VALGRIND_S_OPTION "" CACHE INTERNAL "") message(STATUS "valgrind -s option is not supported") else() set(VALGRIND_S_OPTION "-s" CACHE INTERNAL "") message(STATUS "valgrind -s option is supported") endif() endfunction() # check if libibverbs has ODP support function(is_ODP_supported var) CHECK_C_SOURCE_COMPILES(" #include /* check if 'IBV_ACCESS_ON_DEMAND is defined */ int main() { return IBV_ACCESS_ON_DEMAND; }" ON_DEMAND_PAGING_SUPPORTED) set(var ${ON_DEMAND_PAGING_SUPPORTED} PARENT_SCOPE) endfunction() # check if libibverbs has ibv_advise_mr() support function(is_ibv_advise_mr_supported var) CHECK_C_SOURCE_COMPILES(" #include /* check if ibv_advise_mr() is defined */ int main() { return !ibv_advise_mr; }" IBV_ADVISE_MR_SUPPORTED) set(var ${IBV_ADVISE_MR_SUPPORTED} PARENT_SCOPE) endfunction() # check if libibverbs has ibv_advise_mr() support function(are_ibv_advise_flags_supported var) CHECK_C_SOURCE_COMPILES(" #include /* check if all required IBV_ADVISE_MR* flags are supported */ int main() { return IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE | IBV_ADVISE_MR_FLAG_FLUSH; }" IBV_ADVISE_MR_FLAGS_SUPPORTED) if(IBV_ADVISE_MR_FLAGS_SUPPORTED) message(STATUS "All required IBV_ADVISE_MR* flags are supported") else() message(WARNING "Required IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE or IBV_ADVISE_MR_FLAG_FLUSH flags are NOT supported. " "rpma_mr_advise() will not be called in the examples.") endif() set(var ${IBV_ADVISE_MR_FLAGS_SUPPORTED} PARENT_SCOPE) endfunction() # check if librdmacm has correct signature of rdma_getaddrinfo() function(check_signature_rdma_getaddrinfo var) get_filename_component(REAL_CMAKE_C_COMPILER ${CMAKE_C_COMPILER} REALPATH) if(${REAL_CMAKE_C_COMPILER} MATCHES "gcc") set(DISCARDED_QUALIFIERS_FLAG "-Werror=discarded-qualifiers") elseif(${REAL_CMAKE_C_COMPILER} MATCHES "clang") set(DISCARDED_QUALIFIERS_FLAG "-Werror;-Wincompatible-pointer-types-discards-qualifiers") endif() # check if a compiler supports the ${DISCARDED_QUALIFIERS_FLAG} flag CHECK_C_COMPILER_FLAG("${DISCARDED_QUALIFIERS_FLAG}" C_HAS_Werror_discarded_qualifiers) if(C_HAS_Werror_discarded_qualifiers) set(CMAKE_REQUIRED_FLAGS "${DISCARDED_QUALIFIERS_FLAG};${CMAKE_REQUIRED_FLAGS}") set(CMAKE_REQUIRED_LIBRARIES "-lrdmacm;${CMAKE_REQUIRED_LIBRARIES}") CHECK_C_SOURCE_COMPILES(" #include int main() { const char *node; const char *service; const struct rdma_addrinfo *hints; struct rdma_addrinfo **res; return rdma_getaddrinfo(node, service, hints, res); }" RDMA_GETADDRINFO_NEW_SIGNATURE) set(var ${RDMA_GETADDRINFO_NEW_SIGNATURE} PARENT_SCOPE) return() endif() # # We are running an old version of the GCC compiler # that does not support the '-Werror=discarded-qualifiers' flag. # message(STATUS "Performing Test RDMA_GETADDRINFO_NEW_SIGNATURE") find_file(RDMA_CMA_H rdma_cma.h PATHS /usr/include/rdma /usr/include) if(NOT RDMA_CMA_H) message(FATAL_ERROR "Cannot find the 'rdma_cma.h' header file!") endif() file(STRINGS ${RDMA_CMA_H} CORRECT_SIGNATURE_FOUND REGEX "int rdma_getaddrinfo[(]const char") if(CORRECT_SIGNATURE_FOUND) message(STATUS "Performing Test RDMA_GETADDRINFO_NEW_SIGNATURE - Success") # XXX It should be: # set(var 1 PARENT_SCOPE) # but for an unknown reason it does not work. set(RDMA_GETADDRINFO_NEW_SIGNATURE 1 PARENT_SCOPE) else() message(STATUS "Performing Test RDMA_GETADDRINFO_NEW_SIGNATURE - Failed") endif() endfunction() # clock_gettime() requires linking with -lrt for glibc versions before 2.17 function(check_if_librt_is_required) set(MINIMUM_GLIBC_VERSION "2.17") execute_process(COMMAND ldd --version OUTPUT_VARIABLE LDD_OUTPUT OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET) string(REGEX MATCH "[0-9][.][0-9]+" LDD_VERSION "${LDD_OUTPUT}") if(${LDD_VERSION} VERSION_LESS ${MINIMUM_GLIBC_VERSION}) set(LIBRT_LIBRARIES "rt" PARENT_SCOPE) # librt endif() endfunction() # check if atomic operations are supported function(atomic_operations_supported var) CHECK_C_SOURCE_COMPILES(" #include /* check if atomic operations are supported */ int main() { _Atomic int i, j; atomic_init(&i, 0); j = atomic_load_explicit(&i, __ATOMIC_SEQ_CST); atomic_store_explicit(&i, 1, __ATOMIC_SEQ_CST); return 0; }" ATOMIC_OPERATIONS_SUPPORTED) set(var ${ATOMIC_OPERATIONS_SUPPORTED} PARENT_SCOPE) endfunction() # check if libibverbs supports native atomic write function(is_ibv_atomic_write_supported var) CHECK_C_SOURCE_COMPILES(" #include /* * check if IB_UVERBS_DEVICE_ATOMIC_WRITE, IBV_QP_EX_WITH_ATOMIC_WRITE * and ibv_wr_atomic_write() are defined */ int main() { uint64_t device_cap_flag = IB_UVERBS_DEVICE_ATOMIC_WRITE; uint64_t send_ops_flag = IBV_QP_EX_WITH_ATOMIC_WRITE; return !ibv_wr_atomic_write; }" NATIVE_ATOMIC_WRITE_SUPPORTED) set(var ${NATIVE_ATOMIC_WRITE_SUPPORTED} PARENT_SCOPE) endfunction() # check if libibverbs supports the native flush function(is_ibv_flush_supported var) CHECK_C_SOURCE_COMPILES(" #include /* * check if IBV_ACCESS_FLUSH_GLOBAL, IBV_ACCESS_FLUSH_PERSISTENT, * IB_UVERBS_DEVICE_FLUSH_GLOBAL, IB_UVERBS_DEVICE_FLUSH_PERSISTENT, * IBV_QP_EX_WITH_FLUSH and ibv_wr_flush() are defined */ int main() { int access = IBV_ACCESS_FLUSH_GLOBAL | IBV_ACCESS_FLUSH_PERSISTENT; uint64_t device_cap_flag = IB_UVERBS_DEVICE_FLUSH_GLOBAL | IB_UVERBS_DEVICE_FLUSH_PERSISTENT; uint64_t send_ops_flag = IBV_QP_EX_WITH_FLUSH; return !ibv_wr_flush; }" NATIVE_FLUSH_SUPPORTED) set(var ${NATIVE_FLUSH_SUPPORTED} PARENT_SCOPE) endfunction() rpma-1.3.0/cmake/librpma-config.cmake.in000066400000000000000000000005341443364775400200550ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # @PACKAGE_INIT@ find_path(LIBRPMA_INCLUDE_DIR librpma.h) find_library(LIBRPMA_LIBRARY NAMES rpma librpma) set_and_check(LIBRPMA_INCLUDE "@PACKAGE_CMAKE_INSTALL_INCLUDEDIR@") set(LIBRPMA_LIBRARIES ${LIBRPMA_LIBRARY}) set(LIBRPMA_INCLUDE_DIRS ${LIBRPMA_INCLUDE_DIR}) rpma-1.3.0/cmake/librpma.pc.in000066400000000000000000000005371443364775400161370ustar00rootroot00000000000000prefix=@CMAKE_INSTALL_PREFIX@ includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ Name: librpma Description: librpma - library to simplify accessing persistent memory on remote hosts over RDMA Version: @VERSION@ URL: https://github.com/pmem/rpma Cflags: -I${includedir} Libs: -L${libdir} -lrpma -libverbs -lrdmacm rpma-1.3.0/cmake/packages.cmake000066400000000000000000000056621443364775400163440ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2020, Intel Corporation # # # packages.cmake - CPack configuration for rpm and deb generation # string(TOUPPER "${CPACK_GENERATOR}" CPACK_GENERATOR) if(NOT ("${CPACK_GENERATOR}" STREQUAL "DEB" OR "${CPACK_GENERATOR}" STREQUAL "RPM")) message(FATAL_ERROR "Wrong CPACK_GENERATOR value, valid generators are: DEB, RPM") endif() if("${CPACK_GENERATOR}" STREQUAL "RPM") find_program(RPMBUILD NAMES rpmbuild) if(NOT RPMBUILD) message(FATAL_ERROR "rpmbuild not found - RPM packages will not be generated") endif() endif() if("${CPACK_GENERATOR}" STREQUAL "DEB") find_program(DEBBUILD NAMES dpkg-buildpackage) if(NOT DEBBUILD) message(FATAL_ERROR "dpkg-buildpackage not found - DEB packages will not be generated") endif() endif() set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") set(CMAKE_INSTALL_TMPDIR /tmp CACHE PATH "Output dir for tmp") set(CPACK_COMPONENTS_ALL_IN_ONE) # Filter out some of directories from %dir section, which are expected # to exist in filesystem. Leaving them might lead to conflicts with other # packages (for example with 'filesystem' package on fedora which specify # /usr, /usr/local, etc.) set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION ${CPACK_PACKAGING_INSTALL_PREFIX} ${CPACK_PACKAGING_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR} ${CPACK_PACKAGING_INSTALL_PREFIX}/${CMAKE_INSTALL_LIBDIR}/pkgconfig ${CPACK_PACKAGING_INSTALL_PREFIX}/${CMAKE_INSTALL_INCDIR} ${CPACK_PACKAGING_INSTALL_PREFIX}/share ${CPACK_PACKAGING_INSTALL_PREFIX}/share/doc) set(CPACK_PACKAGE_NAME "librpma") set(CPACK_PACKAGE_VERSION ${VERSION}) set(CPACK_PACKAGE_VERSION_MAJOR ${VERSION_MAJOR}) set(CPACK_PACKAGE_VERSION_MINOR ${VERSION_MINOR}) set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "library to simplify accessing persistent memory on remote hosts over RDMA") set(CPACK_PACKAGE_VENDOR "Intel") set(CPACK_RPM_PACKAGE_NAME "librpma-devel") set(CPACK_RPM_PACKAGE_GROUP "Development/Libraries") set(CPACK_RPM_PACKAGE_LICENSE "BSD") set(CPACK_RPM_PACKAGE_ARCHITECTURE x86_64) set(CPACK_RPM_PACKAGE_REQUIRES "libibverbs, librdmacm") #set(CPACK_RPM_CHANGELOG_FILE ${CMAKE_SOURCE_DIR}/ChangeLog) set(CPACK_DEBIAN_PACKAGE_NAME "librpma-dev") set(CPACK_DEBIAN_PACKAGE_VERSION ${CPACK_PACKAGE_VERSION}) set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE amd64) set(CPACK_DEBIAN_PACKAGE_DEPENDS "libibverbs-dev, librdmacm-dev") set(CPACK_DEBIAN_PACKAGE_MAINTAINER "tomasz.gromadzki@intel.com") if("${CPACK_GENERATOR}" STREQUAL "RPM") set(CPACK_PACKAGE_FILE_NAME ${CPACK_RPM_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}.${CPACK_RPM_PACKAGE_ARCHITECTURE}) elseif("${CPACK_GENERATOR}" STREQUAL "DEB") # We are using "gnutar" to avoid this bug: # https://gitlab.kitware.com/cmake/cmake/issues/14332 set(CPACK_DEBIAN_ARCHIVE_TYPE "gnutar") set(CPACK_PACKAGE_FILE_NAME ${CPACK_DEBIAN_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}_${CPACK_DEBIAN_PACKAGE_ARCHITECTURE}) endif() set(targetDestDir ${CMAKE_INSTALL_TMPDIR}) include(CPack) rpma-1.3.0/doc/000077500000000000000000000000001443364775400132405ustar00rootroot00000000000000rpma-1.3.0/doc/CMakeLists.txt000066400000000000000000000040161443364775400160010ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019-2021, Intel Corporation # find_program(SRC2MAN NAMES src2man) find_program(GROFF NAMES groff) find_program(FIND NAMES find) find_program(DIFF NAMES diff) find_program(GAWK NAMES gawk) # required by src2man find_program(PANDOC NAMES pandoc) if(NOT PANDOC) message(WARNING "pandoc not found - Markdown documentation will not be generated") endif() if(SRC2MAN AND GROFF AND FIND AND DIFF AND GAWK) # create man pages from sources set(man3_list ${CMAKE_SOURCE_DIR}/doc/manuals_3.txt) set(man7_list ${CMAKE_SOURCE_DIR}/doc/manuals_7.txt) add_custom_target(doc ALL COMMAND ${CMAKE_SOURCE_DIR}/utils/src2mans.sh ${CMAKE_SOURCE_DIR}/src ${man3_list} ${man7_list} ${CMAKE_SOURCE_DIR}/utils/mans_header.md) add_custom_target(doc-fix COMMAND ${CMAKE_SOURCE_DIR}/utils/src2mans.sh ${CMAKE_SOURCE_DIR}/src ${man3_list} ${man7_list} ${CMAKE_SOURCE_DIR}/utils/mans_header.md fix) file(STRINGS ${man3_list} man3) file(STRINGS ${man7_list} man7) # # It is just: # list(TRANSFORM man3 PREPEND "${CMAKE_CURRENT_BINARY_DIR}/") # but 'list(TRANSFORM' requires CMake>=v3.12 # set(new_man3 "") foreach(item IN LISTS man3) set(new_man3 "${CMAKE_CURRENT_BINARY_DIR}/${item};${new_man3}") endforeach() set(new_man7 "") foreach(item IN LISTS man7) set(new_man7 "${CMAKE_CURRENT_BINARY_DIR}/${item};${new_man7}") endforeach() # install manpages install(FILES ${new_man3} DESTINATION ${CMAKE_INSTALL_MANDIR}/man3) install(FILES ${new_man7} DESTINATION ${CMAKE_INSTALL_MANDIR}/man7) else() if(NOT SRC2MAN) message(WARNING "src2man not found - man pages will not be generated") endif() if(NOT GROFF) message(WARNING "groff not found - man pages will not be generated") endif() if(NOT FIND) message(WARNING "find not found - man pages will not be generated") endif() if(NOT DIFF) message(WARNING "diff not found - man pages will not be generated") endif() if(NOT GAWK) message(WARNING "gawk not found - man pages will not be generated") endif() endif() rpma-1.3.0/doc/manuals_3.txt000066400000000000000000000037061443364775400156710ustar00rootroot00000000000000rpma_atomic_write.3 rpma_conn_apply_remote_peer_cfg.3 rpma_conn_cfg_delete.3 rpma_conn_cfg_get_compl_channel.3 rpma_conn_cfg_get_cq_size.3 rpma_conn_cfg_get_rcq_size.3 rpma_conn_cfg_get_rq_size.3 rpma_conn_cfg_get_sq_size.3 rpma_conn_cfg_get_srq.3 rpma_conn_cfg_get_timeout.3 rpma_conn_cfg_new.3 rpma_conn_cfg_set_compl_channel.3 rpma_conn_cfg_set_cq_size.3 rpma_conn_cfg_set_rcq_size.3 rpma_conn_cfg_set_rq_size.3 rpma_conn_cfg_set_sq_size.3 rpma_conn_cfg_set_srq.3 rpma_conn_cfg_set_timeout.3 rpma_conn_delete.3 rpma_conn_disconnect.3 rpma_conn_get_compl_fd.3 rpma_conn_get_cq.3 rpma_conn_get_event_fd.3 rpma_conn_get_private_data.3 rpma_conn_get_qp_num.3 rpma_conn_get_rcq.3 rpma_conn_next_event.3 rpma_conn_req_connect.3 rpma_conn_req_delete.3 rpma_conn_req_get_private_data.3 rpma_conn_req_new.3 rpma_conn_req_recv.3 rpma_conn_wait.3 rpma_cq_get_fd.3 rpma_cq_get_wc.3 rpma_cq_wait.3 rpma_ep_get_fd.3 rpma_ep_listen.3 rpma_ep_next_conn_req.3 rpma_ep_shutdown.3 rpma_err_2str.3 rpma_flush.3 rpma_log_get_threshold.3 rpma_log_set_function.3 rpma_log_set_threshold.3 rpma_mr_advise.3 rpma_mr_dereg.3 rpma_mr_get_descriptor.3 rpma_mr_get_descriptor_size.3 rpma_mr_get_ptr.3 rpma_mr_get_size.3 rpma_mr_reg.3 rpma_mr_remote_delete.3 rpma_mr_remote_from_descriptor.3 rpma_mr_remote_get_flush_type.3 rpma_mr_remote_get_size.3 rpma_peer_cfg_delete.3 rpma_peer_cfg_from_descriptor.3 rpma_peer_cfg_get_descriptor.3 rpma_peer_cfg_get_descriptor_size.3 rpma_peer_cfg_get_direct_write_to_pmem.3 rpma_peer_cfg_new.3 rpma_peer_cfg_set_direct_write_to_pmem.3 rpma_peer_delete.3 rpma_peer_new.3 rpma_read.3 rpma_recv.3 rpma_send.3 rpma_send_with_imm.3 rpma_srq_cfg_delete.3 rpma_srq_cfg_get_rcq_size.3 rpma_srq_cfg_get_rq_size.3 rpma_srq_cfg_new.3 rpma_srq_cfg_set_rcq_size.3 rpma_srq_cfg_set_rq_size.3 rpma_srq_delete.3 rpma_srq_get_rcq.3 rpma_srq_new.3 rpma_srq_recv.3 rpma_utils_conn_event_2str.3 rpma_utils_get_ibv_context.3 rpma_utils_ibv_context_is_odp_capable.3 rpma_write.3 rpma_write_with_imm.3 rpma-1.3.0/doc/manuals_7.txt000066400000000000000000000000121443364775400156600ustar00rootroot00000000000000librpma.7 rpma-1.3.0/examples/000077500000000000000000000000001443364775400143115ustar00rootroot00000000000000rpma-1.3.0/examples/01-connection/000077500000000000000000000000001443364775400166665ustar00rootroot00000000000000rpma-1.3.0/examples/01-connection/CMakeLists.txt000066400000000000000000000016541443364775400214340ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2021, Intel Corporation # cmake_minimum_required(VERSION 3.3) project(connection-example C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PUBLIC ${LIBRPMA_INCLUDE_DIRS}) target_link_libraries(${name} rpma ${LIBRT_LIBRARIES}) endfunction() add_example(server server.c) add_example(client client.c) rpma-1.3.0/examples/01-connection/README.md000066400000000000000000000011031443364775400201400ustar00rootroot00000000000000Example of establishing an RPMA connection === The connection example implements two parts of the connection establishing process: - a server which will be waiting for incoming connections using single RPMA endpoint - a client which will initiate establishing the connection to the server **Note**: This example, after establishing the connection, just disconnects. For examples of how to make real use of the connection, please see other examples. ## Usage ```bash [user@server]$ ./server $server_address $port ``` ```bash [user@client]$ ./client $server_address $port ``` rpma-1.3.0/examples/01-connection/client.c000066400000000000000000000066671443364775400203270ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * client.c -- a client of the connection example */ #include #include #include #include #include #include #ifdef TEST_MOCK_MAIN #define main client_main #endif #define MAX_RETRY 10 #define RETRY_DELAY 5 int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "usage: %s \n", argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* parameters */ char *addr = argv[1]; char *port = argv[2]; /* resources */ struct ibv_context *ibv_ctx = NULL; struct rpma_peer *peer = NULL; struct rpma_conn_req *req = NULL; struct rpma_conn *conn = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; int ret = 0; /* obtain an IBV context for a remote IP address */ ret = rpma_utils_get_ibv_context(addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); if (ret) return ret; /* create a new peer object */ ret = rpma_peer_new(ibv_ctx, &peer); if (ret) return ret; /* prepare a connection's private data */ const char *msg = "Hello server!"; struct rpma_conn_private_data pdata; pdata.ptr = (void *)msg; pdata.len = (strlen(msg) + 1) * sizeof(char); for (int retry = 0; retry < MAX_RETRY; retry++) { /* create a connection request */ ret = rpma_conn_req_new(peer, addr, port, NULL, &req); if (ret) goto err_peer_delete; ret = rpma_conn_req_connect(&req, &pdata, &conn); if (ret) goto err_peer_delete; /* wait for the connection to establish */ ret = rpma_conn_next_event(conn, &conn_event); if (ret) { goto err_conn_disconnect; } else if (conn_event == RPMA_CONN_ESTABLISHED) { break; } else if (conn_event == RPMA_CONN_REJECTED) { (void) rpma_conn_disconnect(conn); (void) rpma_conn_delete(&conn); if (retry < MAX_RETRY - 1) { /* Wait for the server */ fprintf(stderr, "Retrying...\n"); sleep(RETRY_DELAY); } else { fprintf(stderr, "The retry number exceeded. Closing.\n"); } } else { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); goto err_conn_disconnect; } } if (conn == NULL) goto err_peer_delete; /* here you can use the newly established connection */ (void) rpma_conn_get_private_data(conn, &pdata); if (pdata.ptr) { char *msg = pdata.ptr; fprintf(stdout, "Received a message: %s\n", msg); } else { fprintf(stdout, "No message received\n"); } /* wait for the connection to being closed */ ret = rpma_conn_next_event(conn, &conn_event); if (ret) { goto err_conn_disconnect; } else if (conn_event != RPMA_CONN_CLOSED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); goto err_conn_disconnect; } /* disconnect the connection */ ret = rpma_conn_disconnect(conn); if (ret) goto err_conn_delete; /* delete the connection object */ ret = rpma_conn_delete(&conn); if (ret) goto err_peer_delete; /* delete the peer object */ ret = rpma_peer_delete(&peer); if (ret) goto err_exit; return 0; err_conn_disconnect: (void) rpma_conn_disconnect(conn); err_conn_delete: (void) rpma_conn_delete(&conn); err_peer_delete: (void) rpma_peer_delete(&peer); err_exit: return ret; } rpma-1.3.0/examples/01-connection/server.c000066400000000000000000000062601443364775400203440ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * server.c -- a server of the connection example */ #include #include #include #include #include #ifdef TEST_MOCK_MAIN #define main server_main #endif int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "usage: %s \n", argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* parameters */ char *addr = argv[1]; char *port = argv[2]; /* resources */ struct ibv_context *ibv_ctx = NULL; struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn_req *req = NULL; struct rpma_conn *conn = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; int ret = 0; /* obtain an IBV context for a local IP address */ ret = rpma_utils_get_ibv_context(addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); if (ret) return ret; /* create a new peer object */ ret = rpma_peer_new(ibv_ctx, &peer); if (ret) return ret; /* create a new endpoint object */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* obtain an incoming connection request */ ret = rpma_ep_next_conn_req(ep, NULL, &req); if (ret) goto err_ep_shutdown; /* * connect / accept the connection request and obtain the connection object */ const char *msg = "Hello client!"; struct rpma_conn_private_data pdata; pdata.ptr = (void *)msg; pdata.len = (strlen(msg) + 1) * sizeof(char); ret = rpma_conn_req_connect(&req, &pdata, &conn); if (ret) goto err_ep_shutdown; /* wait for the connection to being establish */ ret = rpma_conn_next_event(conn, &conn_event); if (ret) { goto err_conn_delete; } else if (conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); goto err_conn_delete; } /* here you can use the newly established connection */ (void) rpma_conn_get_private_data(conn, &pdata); if (pdata.ptr) { char *msg = pdata.ptr; fprintf(stdout, "Received a message: %s\n", msg); } else { fprintf(stdout, "No message received\n"); } /* disconnect the connection */ ret = rpma_conn_disconnect(conn); if (ret) goto err_conn_delete; /* wait for the connection to being closed */ ret = rpma_conn_next_event(conn, &conn_event); if (ret) { goto err_conn_delete; } else if (conn_event != RPMA_CONN_CLOSED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); goto err_conn_delete; } /* delete the connection object */ ret = rpma_conn_delete(&conn); if (ret) goto err_ep_shutdown; /* shutdown the endpoint */ ret = rpma_ep_shutdown(&ep); if (ret) goto err_peer_delete; /* delete the peer object */ ret = rpma_peer_delete(&peer); if (ret) goto err_exit; return 0; err_conn_delete: (void) rpma_conn_delete(&conn); err_ep_shutdown: (void) rpma_ep_shutdown(&ep); err_peer_delete: (void) rpma_peer_delete(&peer); err_exit: return ret; } rpma-1.3.0/examples/02-read-to-volatile/000077500000000000000000000000001443364775400177005ustar00rootroot00000000000000rpma-1.3.0/examples/02-read-to-volatile/CMakeLists.txt000066400000000000000000000021341443364775400224400ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # Copyright 2021, Fujitsu # cmake_minimum_required(VERSION 3.3) project(read-example C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PUBLIC ${LIBRPMA_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) endfunction() add_example(server server.c ../common/common-conn.c) add_example(client client.c ../common/common-conn.c) rpma-1.3.0/examples/02-read-to-volatile/README.md000066400000000000000000000012151443364775400211560ustar00rootroot00000000000000Example of performing an RPMA read to a volatile memory region === The read example implements two parts of the read process: - a server which will register a volatile memory region as a read source - a client which will register a volatile memory region as a read destination, post a read request and wait for its completion **Note**: For the sake of this example, the memory region being read from is transferred via connection's private data. In general, it can be transferred via an out-of-band or the in-band channel. ## Usage ```bash [user@server]$ ./server $server_address $port ``` ```bash [user@client]$ ./client $server_address $port ``` rpma-1.3.0/examples/02-read-to-volatile/client.c000066400000000000000000000102651443364775400213260ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * client.c -- a client of the read-to-volatile example * * The client in this example reads data from the remote memory to a local volatile one. */ #include #include #include #include #include "common-conn.h" #ifdef TEST_USE_CMOCKA #include "cmocka_headers.h" #include "cmocka_alloc.h" #endif #ifdef TEST_MOCK_MAIN #define main client_main #endif int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "usage: %s \n", argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* parameters */ char *addr = argv[1]; char *port = argv[2]; /* resources - general */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; struct ibv_wc wc; /* * resources - memory regions: * - src_* - a remote one which is a source for the read * - dst_* - a local, volatile one which is a destination for the read */ void *dst_ptr = NULL; struct rpma_mr_local *dst_mr = NULL; struct rpma_mr_remote *src_mr = NULL; size_t src_size = 0; /* * lookup an ibv_context via the address and create a new peer using it */ int ret = client_peer_via_address(addr, &peer); if (ret) return ret; /* allocate a memory */ dst_ptr = malloc_aligned(KILOBYTE); if (dst_ptr == NULL) { ret = -1; goto err_peer_delete; } /* register the memory */ ret = rpma_mr_reg(peer, dst_ptr, KILOBYTE, RPMA_MR_USAGE_READ_DST, &dst_mr); if (ret) goto err_mr_free; /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, NULL, NULL, &conn); if (ret) goto err_mr_dereg; /* receive a memory info from the server */ struct rpma_conn_private_data pdata; ret = rpma_conn_get_private_data(conn, &pdata); if (ret) { goto err_conn_disconnect; } else if (pdata.ptr == NULL) { fprintf(stderr, "The server has not provided a remote memory region. (the connection's private data is empty): %s", strerror(ret)); goto err_conn_disconnect; } /* * Create a remote memory registration structure from the received descriptor. */ struct common_data *dst_data = pdata.ptr; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &src_mr); if (ret) goto err_conn_disconnect; /* get the remote memory region size */ ret = rpma_mr_remote_get_size(src_mr, &src_size); if (ret) { goto err_mr_remote_delete; } else if (src_size > KILOBYTE) { fprintf(stderr, "Remote memory region size too big to reading to the sink buffer of the assumed size (%zu > %d)\n", src_size, KILOBYTE); goto err_mr_remote_delete; } /* post an RDMA read operation */ ret = rpma_read(conn, dst_mr, 0, src_mr, 0, src_size, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) goto err_mr_remote_delete; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_mr_remote_delete; /* wait for the completion to be ready */ ret = rpma_cq_wait(cq); if (ret) goto err_mr_remote_delete; /* wait for a completion of the RDMA read */ ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_mr_remote_delete; if (wc.status != IBV_WC_SUCCESS) { ret = -1; (void) fprintf(stderr, "rpma_read() failed: %s\n", ibv_wc_status_str(wc.status)); goto err_mr_remote_delete; } if (wc.opcode != IBV_WC_RDMA_READ) { ret = -1; (void) fprintf(stderr, "unexpected wc.opcode value (%d != %d)\n", wc.opcode, IBV_WC_RDMA_READ); goto err_mr_remote_delete; } (void) fprintf(stdout, "Read a message: %s\n", (char *)dst_ptr); err_mr_remote_delete: /* delete the remote memory region's structure */ (void) rpma_mr_remote_delete(&src_mr); err_conn_disconnect: (void) common_disconnect_and_wait_for_conn_close(&conn); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&dst_mr); err_mr_free: /* free the memory */ free(dst_ptr); err_peer_delete: /* delete the peer */ (void) rpma_peer_delete(&peer); return ret; } rpma-1.3.0/examples/02-read-to-volatile/server.c000066400000000000000000000057701443364775400213630ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * server.c -- a server of the read-to-volatile example * * The server in this example exposes its local memory to a client and allows * him reading its contents. */ #include #include #include #include #include #include "common-conn.h" #define HELLO_STR "Hello client!" #ifdef TEST_USE_CMOCKA #include "cmocka_headers.h" #include "cmocka_alloc.h" #endif #ifdef TEST_MOCK_MAIN #define main server_main #endif int main(int argc, char *argv[]) { if (argc < 3) { fprintf(stderr, "usage: %s \n", argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* parameters */ char *addr = argv[1]; char *port = argv[2]; /* resources - general */ struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn *conn = NULL; /* resources - memory region */ void *mr_ptr = NULL; size_t mr_size = 0; struct rpma_mr_local *mr = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ int ret = server_peer_via_address(addr, &peer); if (ret) return ret; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* allocate a memory */ mr_size = strlen(HELLO_STR) + 1; mr_ptr = malloc_aligned(mr_size); if (mr_ptr == NULL) { ret = -1; goto err_ep_shutdown; } /* fill the memory with a content */ memcpy(mr_ptr, HELLO_STR, mr_size); /* register the memory */ ret = rpma_mr_reg(peer, mr_ptr, mr_size, RPMA_MR_USAGE_READ_SRC, &mr); if (ret) goto err_mr_free; /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; struct common_data data = {0}; data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); /* * Wait for an incoming connection request, accept it and wait for its establishment. */ ret = server_accept_connection(ep, NULL, &pdata, &conn); if (ret) goto err_mr_dereg; /* * Between the connection being established and the connection being closed * the client will perform the RDMA read. */ /* * Wait for RPMA_CONN_CLOSED, disconnect and delete the connection structure. */ (void) common_wait_for_conn_close_and_disconnect(&conn); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&mr); err_mr_free: /* free the memory */ free(mr_ptr); err_ep_shutdown: /* shutdown the endpoint */ (void) rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); return ret; } rpma-1.3.0/examples/03-read-to-persistent/000077500000000000000000000000001443364775400202625ustar00rootroot00000000000000rpma-1.3.0/examples/03-read-to-persistent/CMakeLists.txt000066400000000000000000000023431443364775400230240ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021, Fujitsu # cmake_minimum_required(VERSION 3.3) project(read-to-persistent C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../cmake/common.cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # check if all required IBV_ADVISE_MR* flags are supported are_ibv_advise_flags_supported(IBV_ADVISE_MR_FLAGS_SUPPORTED) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(LIBPMEM_REQUIRED_VERSION 1.6) find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) pkg_check_modules(LIBPMEM QUIET libpmem>=${LIBPMEM_REQUIRED_VERSION}) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() if(NOT LIBPMEM_FOUND) find_package(LIBPMEM ${LIBPMEM_REQUIRED_VERSION} QUIET) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) add_example_with_pmem(NAME server SRCS server.c) add_example_with_pmem(NAME client SRCS client.c ../common/common-utils.c) rpma-1.3.0/examples/03-read-to-persistent/README.md000066400000000000000000000027531443364775400215500ustar00rootroot00000000000000Example of performing an RPMA read to a persistent memory region === The read example implements two parts of the read process: - The server, if provided (and capable of), prepares a local persistent memory and registers it as a reading destination. After the connection is established the server receives the client's memory region registered as a reading source. The servers performs a read from the remote memory region to a local memory region. - The client, if provided (and capable of), prepares a local persistent memory (including its contents), registers it as a reading source, and exposes the memory description along with other parameters required to perform a RDMA read. After the connection is established, the client just waits for the server to disconnect. **Note**: If either server or client does not have a pmem path (or it is not capable to use pmem at all) it uses DRAM instead. **Note**: For the sake of this example, the memory region being read from is transferred via connection's private data. In general, it can be transferred via an out-of-band or the in-band channel. ## Usage ```bash [user@server]$ ./server $server_address $port [] ``` ```bash [user@client]$ ./client $server_address $port [ []] ``` where `` can be: - a Device DAX (`/dev/dax0.0` for example) or - a file on File System DAX (`/mnt/pmem/file` for example) and `` is an offset inside the above mentioned PMem device where the user data begins from. rpma-1.3.0/examples/03-read-to-persistent/client.c000066400000000000000000000073461443364775400217160ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * client.c -- a client of the read-to-persistent example * * Please see README.md for a detailed description of this example. */ #include #include #include #include "common-conn.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #include "common-utils.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s [ []]\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_local *mr = NULL; struct hello_t *hello = NULL; #ifdef USE_PMEM if (argc >= 4) { char *path = argv[3]; if (argc >= 5) mem.offset = strtoul_noerror(argv[4]); ret = common_pmem_map_file_with_signature_check(path, HELLO_T_SIZE, &mem, init_hello); if (ret) goto err_free; hello = (struct hello_t *)((uintptr_t)mem.mr_ptr + mem.data_offset); } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_T_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_T_SIZE; hello = (struct hello_t *)mem.mr_ptr; /* write an initial value */ write_hello_str(hello, en); } (void) printf("Next value: %s\n", hello->str); /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_free; /* register the memory */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_READ_SRC, &mr); if (ret) goto err_peer_delete; /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; /* calculate data for the server read */ struct common_data data = {0}; data.data_offset = mem.data_offset + HELLO_STR_OFFSET; data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; /* establish a new connection to a server listening at addr:port */ struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); ret = client_connect(peer, addr, port, NULL, &pdata, &conn); if (ret) goto err_mr_dereg; /* * Between the connection being established and the connection being * closed the server will perform the RDMA read. */ (void) common_wait_for_conn_close_and_disconnect(&conn); /* * Translate the message so the next time the greeting will be * surprising. */ translate(hello); #ifdef USE_PMEM if (mem.is_pmem) { mem.persist(hello, HELLO_T_SIZE); } #endif /* USE_PMEM */ (void) printf("Translation: %s\n", hello->str); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&mr); err_peer_delete: /* delete the peer */ (void) rpma_peer_delete(&peer); err_free: #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) { free(mem.mr_ptr); } return ret; } rpma-1.3.0/examples/03-read-to-persistent/server.c000066400000000000000000000117721443364775400217440ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * server.c -- a server of the read-to-persistent example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s []\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_local *dst_mr = NULL; struct rpma_mr_remote *src_mr = NULL; #ifdef USE_PMEM char *pmem_path = NULL; if (argc >= 4) { pmem_path = argv[3]; ret = common_pmem_map_file_with_signature_check(pmem_path, HELLO_STR_SIZE, &mem, NULL); if (ret) goto err_free; } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_STR_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_STR_SIZE; } /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn *conn = NULL; struct ibv_wc wc; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* register the memory */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_READ_DST, &dst_mr); if (ret) goto err_ep_shutdown; #if defined USE_PMEM && defined IBV_ADVISE_MR_FLAGS_SUPPORTED /* rpma_mr_advise() should be called only in case of FsDAX */ if (mem.is_pmem && strstr(pmem_path, "/dev/dax") == NULL) { ret = rpma_mr_advise(dst_mr, 0, mem.mr_size, IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE, IBV_ADVISE_MR_FLAG_FLUSH); if (ret) goto err_mr_dereg; } #endif /* USE_PMEM */ /* * Wait for an incoming connection request, accept it and wait for its * establishment. */ ret = server_accept_connection(ep, NULL, NULL, &conn); if (ret) goto err_mr_dereg; /* obtain the remote memory description */ struct rpma_conn_private_data pdata; ret = rpma_conn_get_private_data(conn, &pdata); if (ret != 0 || pdata.len < sizeof(struct common_data)) goto err_disconnect; struct common_data *src_data = pdata.ptr; ret = rpma_mr_remote_from_descriptor(&src_data->descriptors[0], src_data->mr_desc_size, &src_mr); if (ret) goto err_disconnect; /* if the string content is not empty */ if (((char *)mem.mr_ptr + mem.data_offset)[0] != '\0') { (void) printf("Old value: %s\n", (char *)mem.mr_ptr + mem.data_offset); } ret = rpma_read(conn, dst_mr, mem.data_offset, src_mr, src_data->data_offset, HELLO_STR_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) goto err_mr_remote_delete; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_mr_remote_delete; /* wait for the completion to be ready */ ret = rpma_cq_wait(cq); if (ret) goto err_mr_remote_delete; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_mr_remote_delete; if (wc.status != IBV_WC_SUCCESS) { ret = -1; (void) fprintf(stderr, "rpma_read() failed: %s\n", ibv_wc_status_str(wc.status)); goto err_mr_remote_delete; } if (wc.opcode != IBV_WC_RDMA_READ) { ret = -1; (void) fprintf(stderr, "unexpected wc.opcode value (%d != %d)\n", wc.opcode, IBV_WC_RDMA_READ); goto err_mr_remote_delete; } #ifdef USE_PMEM if (mem.is_pmem) { mem.persist((char *)mem.mr_ptr + mem.data_offset, HELLO_STR_SIZE); } #endif /* USE_PMEM */ (void) printf("New value: %s\n", (char *)mem.mr_ptr + mem.data_offset); err_mr_remote_delete: (void) rpma_mr_remote_delete(&src_mr); err_disconnect: /* * Wait for RPMA_CONN_CLOSED, disconnect and delete the connection * structure. */ ret |= common_disconnect_and_wait_for_conn_close(&conn); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&dst_mr); err_ep_shutdown: /* shutdown the endpoint */ (void) rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_free: #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) free(mem.mr_ptr); return ret ? -1 : 0; } rpma-1.3.0/examples/04-write-to-persistent/000077500000000000000000000000001443364775400205025ustar00rootroot00000000000000rpma-1.3.0/examples/04-write-to-persistent/CMakeLists.txt000066400000000000000000000023441443364775400232450ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021, Fujitsu # cmake_minimum_required(VERSION 3.3) project(write-to-persistent C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../cmake/common.cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # check if all required IBV_ADVISE_MR* flags are supported are_ibv_advise_flags_supported(IBV_ADVISE_MR_FLAGS_SUPPORTED) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(LIBPMEM_REQUIRED_VERSION 1.6) find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) pkg_check_modules(LIBPMEM QUIET libpmem>=${LIBPMEM_REQUIRED_VERSION}) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() if(NOT LIBPMEM_FOUND) find_package(LIBPMEM ${LIBPMEM_REQUIRED_VERSION} QUIET) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) add_example_with_pmem(NAME server SRCS server.c) add_example_with_pmem(NAME client SRCS client.c ../common/common-utils.c) rpma-1.3.0/examples/04-write-to-persistent/README.md000066400000000000000000000031011443364775400217540ustar00rootroot00000000000000Example of performing an RPMA write to a persistent memory region === The write example implements two parts of the write process: - The server, if provided (and capable of), prepares a local persistent memory and exposes the memory description along with other parameters required to perform a RDMA write. After the connection is established the server waits for the client to disconnect. - The client, if provided (and capable of), prepares a local persistent memory (including its contents) and registers it as a writing source. After the connection is established the client receives the server's memory regions registered as a writing destination. The client performs a write from the local memory region to a remote memory region. For flushing the posted write, it reads eight bytes from the same remote buffer to a local read-after-write DRAM buffer. **Note**: If either server or client does not have a pmem path (or it is not capable to use pmem at all) it uses DRAM instead. **Note**: For the sake of this example, the memory region being write from is transferred via connection's private data. In general, it can be transferred via an out-of-band or the in-band channel. ## Usage ```bash [user@server]$ ./server $server_address $port [] ``` ```bash [user@client]$ ./client $server_address $port [ []] ``` where `` can be: - a Device DAX (`/dev/dax0.0` for example) or - a file on File System DAX (`/mnt/pmem/file` for example) and `` is an offset inside the above mentioned PMem device where the user data begins from. rpma-1.3.0/examples/04-write-to-persistent/client.c000066400000000000000000000136511443364775400221320ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * client.c -- a client of the write-to-persistent example * * Please see README.md for a detailed description of this example. */ #include #include #include #include "common-conn.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #include "common-utils.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s [ []]\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ #ifdef TEST_USE_CMOCKA #include "cmocka_headers.h" #include "cmocka_alloc.h" #endif #ifdef TEST_MOCK_MAIN #define main client_main #endif /* read-after-write buffer size */ #define RAW_BUFFER_SIZE 8 int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_remote *dst_mr = NULL; size_t dst_size = 0; size_t dst_offset = 0; struct rpma_mr_local *src_mr = NULL; struct ibv_wc wc; /* read-after-write memory region */ void *raw = NULL; struct rpma_mr_local *raw_mr = NULL; struct hello_t *hello = NULL; #ifdef USE_PMEM if (argc >= 4) { char *path = argv[3]; if (argc >= 5) mem.offset = strtoul_noerror(argv[4]); ret = common_pmem_map_file_with_signature_check(path, HELLO_T_SIZE, &mem, init_hello); if (ret) goto err_free; hello = (struct hello_t *)((uintptr_t)mem.mr_ptr + mem.data_offset); } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_T_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_T_SIZE; hello = (struct hello_t *)mem.mr_ptr; /* write an initial value */ write_hello_str(hello, en); } /* alloc memory for the read-after-write buffer (RAW) */ raw = malloc_aligned(RAW_BUFFER_SIZE); if (raw == NULL) { ret = -1; goto err_free; } (void) printf("Next value: %s\n", hello->str); /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_free; /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, NULL, NULL, &conn); if (ret) goto err_peer_delete; /* register the memory RDMA write */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_WRITE_SRC, &src_mr); if (ret) goto err_conn_disconnect; /* register the RAW buffer */ ret = rpma_mr_reg(peer, raw, RAW_BUFFER_SIZE, RPMA_MR_USAGE_READ_DST, &raw_mr); if (ret) goto err_mr_dereg; /* obtain the remote memory description */ struct rpma_conn_private_data pdata; ret = rpma_conn_get_private_data(conn, &pdata); if (ret != 0 || pdata.len < sizeof(struct common_data)) goto err_mr_dereg; /* * Create a remote memory registration structure from the received * descriptor. */ struct common_data *dst_data = pdata.ptr; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &dst_mr); if (ret) goto err_mr_dereg; dst_offset = dst_data->data_offset; /* get the remote memory region size */ ret = rpma_mr_remote_get_size(dst_mr, &dst_size); if (ret) { goto err_mr_remote_delete; } else if (dst_size - dst_offset < HELLO_STR_SIZE) { ret = -1; fprintf(stderr, "Remote memory region size too small for writing the data of the assumed size (%zu < %d)\n", dst_size - dst_offset, HELLO_STR_SIZE); goto err_mr_remote_delete; } ret = rpma_write(conn, dst_mr, dst_offset, src_mr, (mem.data_offset + HELLO_STR_OFFSET), HELLO_STR_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); if (ret) goto err_mr_remote_delete; /* the read serves here as flushing primitive */ ret = rpma_read(conn, raw_mr, 0, dst_mr, 0, RAW_BUFFER_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) goto err_mr_remote_delete; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_mr_remote_delete; /* wait for the completion to be ready */ ret = rpma_cq_wait(cq); if (ret) goto err_mr_remote_delete; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_mr_remote_delete; if (wc.status != IBV_WC_SUCCESS) { ret = -1; (void) fprintf(stderr, "rpma_read() failed: %s\n", ibv_wc_status_str(wc.status)); goto err_mr_remote_delete; } if (wc.opcode != IBV_WC_RDMA_READ) { ret = -1; (void) fprintf(stderr, "unexpected wc.opcode value (%d != %d)\n", wc.opcode, IBV_WC_RDMA_READ); goto err_mr_remote_delete; } /* * Translate the message so the next time the greeting will be * surprising. */ translate(hello); #ifdef USE_PMEM if (mem.is_pmem) { mem.persist(hello, HELLO_T_SIZE); } #endif /* USE_PMEM */ (void) printf("Translation: %s\n", hello->str); err_mr_remote_delete: /* delete the remote memory region's structure */ (void) rpma_mr_remote_delete(&dst_mr); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&src_mr); (void) rpma_mr_dereg(&raw_mr); err_conn_disconnect: (void) common_disconnect_and_wait_for_conn_close(&conn); err_peer_delete: /* delete the peer */ (void) rpma_peer_delete(&peer); err_free: #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) free(mem.mr_ptr); if (raw != NULL) free(raw); return ret; } rpma-1.3.0/examples/04-write-to-persistent/server.c000066400000000000000000000105111443364775400221520ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * server.c -- a server of the write-to-persistent example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s []\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ #ifdef TEST_USE_CMOCKA #include "cmocka_headers.h" #include "cmocka_alloc.h" #endif #ifdef TEST_MOCK_MAIN #define main server_main #endif int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_local *mr = NULL; #ifdef USE_PMEM char *pmem_path = NULL; if (argc >= 4) { pmem_path = argv[3]; /* * All of the space under the offset is intended for * the string contents. Space is assumed to be at least 1 KiB. */ ret = common_pmem_map_file_with_signature_check(pmem_path, HELLO_STR_SIZE, &mem, NULL); if (ret) goto err_free; } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_STR_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_STR_SIZE; mem.data_offset = 0; } /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn *conn = NULL; /* if the string content is not empty */ if (((char *)mem.mr_ptr + mem.data_offset)[0] != '\0') { (void) printf("Old value: %s\n", (char *)mem.mr_ptr + mem.data_offset); } /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* register the memory */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_WRITE_DST | RPMA_MR_USAGE_READ_SRC, &mr); if (ret) goto err_ep_shutdown; #if defined USE_PMEM && defined IBV_ADVISE_MR_FLAGS_SUPPORTED /* rpma_mr_advise() should be called only in case of FsDAX */ if (mem.is_pmem && strstr(pmem_path, "/dev/dax") == NULL) { ret = rpma_mr_advise(mr, 0, mem.mr_size, IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE, IBV_ADVISE_MR_FLAG_FLUSH); if (ret) goto err_mr_dereg; } #endif /* USE_PMEM */ /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; /* calculate data for the client write */ struct common_data data = {0}; data.data_offset = mem.data_offset; data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; /* * Wait for an incoming connection request, accept it and wait for its * establishment. */ struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); ret = server_accept_connection(ep, NULL, &pdata, &conn); if (ret) goto err_mr_dereg; /* * Wait for RPMA_CONN_CLOSED, disconnect and delete the connection * structure. */ ret = common_wait_for_conn_close_and_disconnect(&conn); if (ret) goto err_mr_dereg; (void) printf("New value: %s\n", (char *)mem.mr_ptr + mem.data_offset); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&mr); err_ep_shutdown: /* shutdown the endpoint */ (void) rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_free: #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) free(mem.mr_ptr); return ret; } rpma-1.3.0/examples/05-flush-to-persistent/000077500000000000000000000000001443364775400204725ustar00rootroot00000000000000rpma-1.3.0/examples/05-flush-to-persistent/CMakeLists.txt000066400000000000000000000023441443364775400232350ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021, Fujitsu # cmake_minimum_required(VERSION 3.3) project(flush-to-persistent C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../cmake/common.cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # check if all required IBV_ADVISE_MR* flags are supported are_ibv_advise_flags_supported(IBV_ADVISE_MR_FLAGS_SUPPORTED) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(LIBPMEM_REQUIRED_VERSION 1.6) find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) pkg_check_modules(LIBPMEM QUIET libpmem>=${LIBPMEM_REQUIRED_VERSION}) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() if(NOT LIBPMEM_FOUND) find_package(LIBPMEM ${LIBPMEM_REQUIRED_VERSION} QUIET) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) add_example_with_pmem(NAME server SRCS server.c) add_example_with_pmem(NAME client SRCS client.c ../common/common-utils.c) rpma-1.3.0/examples/05-flush-to-persistent/README.md000066400000000000000000000035131443364775400217530ustar00rootroot00000000000000Example of performing an RPMA write and flushing it to persistence === The write example implements two parts of the write process: - The server, if provided (and capable of), prepares a local persistent memory and exposes the memory description along with other parameters required to perform a RDMA write and RDMA flush. After the connection is established the server waits for the client to disconnect. - The client, if provided (and capable of), prepares a local persistent memory (including its contents) and registers it as a writing source. After the connection is established the client receives the server's memory regions registered as a writing destination. The client performs a write from the local memory region to a remote memory region followed by RPMA flush. **Note**: For a server with PMem support it is allowed to configure assumed direct PMem write support which indicates whether RPMA_FLUSH_TYPE_PERSISTENT is supported. The client has to apply this configuration to be aware of what types of flush it can perform on the server's memory. **Note**: If either server or client does not have a pmem path (or it is not capable to use pmem at all) it uses DRAM instead. **Note**: For the sake of this example, the memory region being written to and the server's peer configuration are transferred via the connection's private data. In general, it can be transferred via an out-of-band or the in-band channel. ## Usage ```bash [user@server]$ ./server $server_address $port [] [] ``` ```bash [user@client]$ ./client $server_address $port [ []] ``` where `` can be: - a Device DAX (`/dev/dax0.0` for example) or - a file on File System DAX (`/mnt/pmem/file` for example) and `` is an offset inside the above mentioned PMem device where the user data begins from. rpma-1.3.0/examples/05-flush-to-persistent/client.c000066400000000000000000000144331443364775400221210ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * client.c -- a client of the flush-to-persistent example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #include "common-utils.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s [ []]\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ #define FLUSH_ID (void *)0xF01D /* a random identifier */ int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_remote *dst_mr = NULL; size_t dst_size = 0; size_t dst_offset = 0; struct rpma_mr_local *src_mr = NULL; struct ibv_wc wc; struct hello_t *hello = NULL; #ifdef USE_PMEM if (argc >= 4) { char *path = argv[3]; if (argc >= 5) mem.offset = strtoul_noerror(argv[4]); ret = common_pmem_map_file_with_signature_check(path, HELLO_T_SIZE, &mem, init_hello); if (ret) goto err_free; hello = (struct hello_t *)((uintptr_t)mem.mr_ptr + mem.data_offset); } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_T_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_T_SIZE; hello = (struct hello_t *)mem.mr_ptr; /* write an initial value */ write_hello_str(hello, en); } (void) printf("Next value: %s\n", hello->str); /* RPMA resources */ struct rpma_peer_cfg *pcfg = NULL; struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; bool direct_write_to_pmem = false; enum rpma_flush_type flush_type; /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_free; /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, NULL, NULL, &conn); if (ret) goto err_peer_delete; /* register the memory RDMA write */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_WRITE_SRC, &src_mr); if (ret) goto err_conn_disconnect; /* obtain the remote side resources description */ struct rpma_conn_private_data pdata; ret = rpma_conn_get_private_data(conn, &pdata); if (ret != 0 || pdata.len < sizeof(struct common_data)) goto err_mr_dereg; /* * Create a remote peer configuration structure from the received * descriptor and apply it to the current connection. */ struct common_data *dst_data = pdata.ptr; ret = rpma_peer_cfg_from_descriptor( &dst_data->descriptors[dst_data->mr_desc_size], dst_data->pcfg_desc_size, &pcfg); if (ret) goto err_mr_dereg; ret = rpma_peer_cfg_get_direct_write_to_pmem(pcfg, &direct_write_to_pmem); ret |= rpma_conn_apply_remote_peer_cfg(conn, pcfg); (void) rpma_peer_cfg_delete(&pcfg); /* either get or apply failed */ if (ret) goto err_mr_dereg; /* * Create a remote memory registration structure from the received * descriptor. */ ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &dst_mr); if (ret) goto err_mr_dereg; dst_offset = dst_data->data_offset; /* get the remote memory region size */ ret = rpma_mr_remote_get_size(dst_mr, &dst_size); if (ret) { goto err_mr_remote_delete; } else if (dst_size - dst_offset < HELLO_STR_SIZE) { fprintf(stderr, "Remote memory region size too small for writing the data of the assumed size (%zu < %d)\n", dst_size - dst_offset, HELLO_STR_SIZE); goto err_mr_remote_delete; } ret = rpma_write(conn, dst_mr, dst_offset, src_mr, (mem.data_offset + HELLO_STR_OFFSET), HELLO_STR_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); if (ret) goto err_mr_remote_delete; /* determine the flush type */ if (direct_write_to_pmem) { printf("RPMA_FLUSH_TYPE_PERSISTENT is supported\n"); flush_type = RPMA_FLUSH_TYPE_PERSISTENT; } else { printf( "RPMA_FLUSH_TYPE_PERSISTENT is NOT supported, RPMA_FLUSH_TYPE_VISIBILITY is used instead\n"); flush_type = RPMA_FLUSH_TYPE_VISIBILITY; } ret = rpma_flush(conn, dst_mr, dst_offset, HELLO_STR_SIZE, flush_type, RPMA_F_COMPLETION_ALWAYS, FLUSH_ID); if (ret) goto err_mr_remote_delete; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_mr_remote_delete; /* wait for the completion to be ready */ ret = rpma_cq_wait(cq); if (ret) goto err_mr_remote_delete; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_mr_remote_delete; if (wc.wr_id != (uintptr_t)FLUSH_ID) { ret = -1; (void) fprintf(stderr, "unexpected wc.wr_id value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", wc.wr_id, (uintptr_t)FLUSH_ID); goto err_mr_remote_delete; } if (wc.status != IBV_WC_SUCCESS) { ret = -1; (void) fprintf(stderr, "rpma_flush() failed: %s\n", ibv_wc_status_str(wc.status)); goto err_mr_remote_delete; } /* * Translate the message so the next time the greeting will be * surprising. */ translate(hello); #ifdef USE_PMEM if (mem.is_pmem) { mem.persist(hello, HELLO_T_SIZE); } else #endif /* USE_PMEM */ (void) printf("Translation: %s\n", hello->str); err_mr_remote_delete: /* delete the remote memory region's structure */ (void) rpma_mr_remote_delete(&dst_mr); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&src_mr); err_conn_disconnect: (void) common_disconnect_and_wait_for_conn_close(&conn); err_peer_delete: /* delete the peer */ (void) rpma_peer_delete(&peer); err_free: #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) { free(mem.mr_ptr); } return ret; } rpma-1.3.0/examples/05-flush-to-persistent/server.c000066400000000000000000000122771443364775400221550ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * server.c -- a server of the flush-to-persistent example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #ifdef USE_PMEM #define USAGE_STR \ "usage: %s [] [direct-pmem-write]\n"\ PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ #ifdef USE_PMEM #define ON_STR "on" #endif /* USE_PMEM */ int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_local *mr = NULL; #ifdef USE_PMEM char *pmem_path = NULL; if (argc >= 4) { pmem_path = argv[3]; ret = common_pmem_map_file_with_signature_check(pmem_path, HELLO_STR_SIZE, &mem, NULL); if (ret) goto err_free; } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_STR_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_STR_SIZE; mem.data_offset = 0; } /* RPMA resources */ struct rpma_peer_cfg *pcfg = NULL; struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn *conn = NULL; /* if the string content is not empty */ if (((char *)mem.mr_ptr + mem.data_offset)[0] != '\0') { (void) printf("Old value: %s\n", (char *)mem.mr_ptr + mem.data_offset); } /* create a peer configuration structure */ ret = rpma_peer_cfg_new(&pcfg); if (ret) goto err_free; #ifdef USE_PMEM /* configure peer's direct write to pmem support */ if (argc >= 5) { ret = rpma_peer_cfg_set_direct_write_to_pmem(pcfg, (strcmp(argv[4], ON_STR) == 0)); if (ret) { (void) rpma_peer_cfg_delete(&pcfg); goto err_free; } } #endif /* USE_PMEM */ /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_pcfg_delete; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* register the memory */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_WRITE_DST | (mem.is_pmem ? (RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT | RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY) : RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY), &mr); if (ret) goto err_ep_shutdown; #if defined USE_PMEM && defined IBV_ADVISE_MR_FLAGS_SUPPORTED /* rpma_mr_advise() should be called only in case of FsDAX */ if (mem.is_pmem && strstr(pmem_path, "/dev/dax") == NULL) { ret = rpma_mr_advise(mr, 0, mem.mr_size, IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE, IBV_ADVISE_MR_FLAG_FLUSH); if (ret) goto err_mr_dereg; } #endif /* USE_PMEM */ /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; /* get size of the peer config descriptor */ size_t pcfg_desc_size; ret = rpma_peer_cfg_get_descriptor_size(pcfg, &pcfg_desc_size); if (ret) goto err_mr_dereg; /* calculate data for the client write */ struct common_data data = {0}; data.data_offset = mem.data_offset; data.mr_desc_size = mr_desc_size; data.pcfg_desc_size = pcfg_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; /* * Get the peer's configuration descriptor. * The pcfg_desc descriptor is saved in the `descriptors[]` array * just after the mr_desc descriptor. */ ret = rpma_peer_cfg_get_descriptor(pcfg, &data.descriptors[mr_desc_size]); if (ret) goto err_mr_dereg; /* * Wait for an incoming connection request, accept it and wait for its * establishment. */ struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); ret = server_accept_connection(ep, NULL, &pdata, &conn); if (ret) goto err_mr_dereg; /* * Wait for RPMA_CONN_CLOSED, disconnect and delete the connection * structure. */ ret = common_wait_for_conn_close_and_disconnect(&conn); if (ret) goto err_mr_dereg; (void) printf("New value: %s\n", (char *)mem.mr_ptr + mem.data_offset); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&mr); err_ep_shutdown: /* shutdown the endpoint */ (void) rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_pcfg_delete: (void) rpma_peer_cfg_delete(&pcfg); err_free: #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) free(mem.mr_ptr); return ret; } rpma-1.3.0/examples/06-multiple-connections/000077500000000000000000000000001443364775400207075ustar00rootroot00000000000000rpma-1.3.0/examples/06-multiple-connections/CMakeLists.txt000066400000000000000000000031121443364775400234440ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # Copyright 2021, Fujitsu # cmake_minimum_required(VERSION 3.3) project(multiple-connections C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(LIBPMEM_REQUIRED_VERSION 1.6) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) pkg_check_modules(LIBPMEM QUIET libpmem>=${LIBPMEM_REQUIRED_VERSION}) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() if(NOT LIBPMEM_FOUND) find_package(LIBPMEM ${LIBPMEM_REQUIRED_VERSION} QUIET) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) if(LIBPMEM_FOUND) target_include_directories(${name} PRIVATE ${LIBPMEM_INCLUDE_DIRS}) target_link_libraries(${name} ${LIBPMEM_LIBRARIES}) target_compile_definitions(${name} PRIVATE USE_LIBPMEM) endif() endfunction() add_example(server server.c ../common/common-conn.c ../common/common-epoll.c) add_example(client client.c ../common/common-conn.c) rpma-1.3.0/examples/06-multiple-connections/README.md000066400000000000000000000027671443364775400222020ustar00rootroot00000000000000Example of handling multiple connections === The multiple connections example implements two parts of the connection process: - The server starts a listening endpoint and waits for incoming connections using epoll (a type of scalable I/O technique). When a new connection request appears it is accepted, if a free client slot is available, or rejected otherwise. The new connection's file descriptors are added to the same epoll (both an event file descriptor and a completion file descriptor) so the server can use a single epoll to wait for incoming connections, connection-related events and completions. When a specific client's connection is established (for what the server waits asynchronously using epoll) the server performs a read from the client's remote memory region to a local memory region prepared for the client's slot. It should generate an IBV_WC_RDMA_READ completion (which is also notified via epoll) after which the server displays the read data (the client's name) and disconnects the client. - The client picks randomly its name and copies it into a memory region that is exposed to the server. When the connection is established it waits for the server to disconnect. **Note**: For the sake of this example, the memory region being write from is transferred via connection's private data. In general, it can be transferred via an out-of-band or the in-band channel. ## Usage ```bash [user@server]$ ./server $server_address $port ``` ```bash [user@client]$ ./client $server_address $port $seed ``` rpma-1.3.0/examples/06-multiple-connections/client.c000066400000000000000000000057741443364775400223460ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * client.c -- a client of the multiple-connections example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include #include #include "common-conn.h" #include "multiple-connections-common.h" static char *Names[] = { "Andy", "Chet", "Derek", "Janek", "Kacper", "Lukasz", "Oksana", "Pawel", "Piotr", "Tomasz", "Xiang", "Xiaoran", "Xiaoyan" }; #define NAMES_NUM (sizeof(Names) / sizeof(Names[0])) #define USAGE_STR "usage: %s \n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 4) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; long unsigned seed = strtoul(argv[3], NULL, 10); if (seed == ULONG_MAX && errno == ERANGE) { (void) fprintf(stderr, "strtoul(seed) overflowed\n"); return -1; } int ret; /* RPMA resources - general */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* resources - memory region */ void *mr_ptr = NULL; struct rpma_mr_local *mr = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) return ret; /* allocate a memory */ mr_ptr = malloc_aligned(MAX_NAME_SIZE); if (mr_ptr == NULL) { ret = -1; goto err_peer_delete; } /* pick a name */ srand(seed % UINT_MAX); const char *name = Names[(long unsigned int)rand() % NAMES_NUM]; (void) strncpy((char *)mr_ptr, name, (MAX_NAME_SIZE - 1)); ((char *)mr_ptr)[MAX_NAME_SIZE - 1] = '\0'; printf("My names is: %s\n", (char *)mr_ptr); /* register the memory */ ret = rpma_mr_reg(peer, mr_ptr, MAX_NAME_SIZE, RPMA_MR_USAGE_READ_SRC, &mr); if (ret) goto err_mr_free; /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; struct common_data data = {0}; data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, NULL, &pdata, &conn); if (ret) goto err_mr_dereg; ret = common_wait_for_conn_close_and_disconnect(&conn); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&mr); err_mr_free: /* free the memory */ free(mr_ptr); err_peer_delete: /* delete the peer */ (void) rpma_peer_delete(&peer); return ret; } rpma-1.3.0/examples/06-multiple-connections/multiple-connections-common.h000066400000000000000000000005211443364775400265170ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * multiple-connections-common.h -- a common declarations for the 06 example */ #ifndef EXAMPLES_MULTIPLE_CONNECTIONS_COMMON #define EXAMPLES_MULTIPLE_CONNECTIONS_COMMON #define MAX_NAME_SIZE 32 #endif /* EXAMPLES_MULTIPLE_CONNECTIONS_COMMON */ rpma-1.3.0/examples/06-multiple-connections/server.c000066400000000000000000000265001443364775400223640ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * server.c -- a server of the multiple-connections example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include #define USAGE_STR "usage: %s \n" #include "common-conn.h" #include "common-epoll.h" #include "multiple-connections-common.h" #define CLIENT_MAX 10 struct client_res { /* RPMA resources */ struct rpma_conn *conn; struct rpma_cq *cq; /* resources - memory regions */ size_t offset; /* events */ struct custom_event ev_conn_event; struct custom_event ev_conn_cmpl; /* parent and identifier */ struct server_res *svr; int client_id; }; struct server_res { /* RPMA resources */ struct rpma_ep *ep; /* resources - memory region */ void *dst_ptr; struct rpma_mr_local *dst_mr; /* epoll and event */ int epoll; struct custom_event ev_incoming; /* client's resources */ struct client_res clients[CLIENT_MAX]; }; /* * server_init -- initialize server's resources */ int server_init(struct server_res *svr, struct rpma_peer *peer) { int ret = 0; svr->epoll = epoll_create1(EPOLL_CLOEXEC); if (svr->epoll == -1) return errno; /* allocate a memory */ size_t dst_size = MAX_NAME_SIZE * CLIENT_MAX; svr->dst_ptr = malloc_aligned(dst_size); if (svr->dst_ptr == NULL) { close(svr->epoll); return -1; } /* register the memory */ ret = rpma_mr_reg(peer, svr->dst_ptr, dst_size, RPMA_MR_USAGE_READ_DST, &svr->dst_mr); if (ret) { free(svr->dst_ptr); close(svr->epoll); } return ret; } /* * server_fini -- release server's resources */ int server_fini(struct server_res *svr) { /* deregister the memory region */ int ret = rpma_mr_dereg(&svr->dst_mr); /* free the memory */ free(svr->dst_ptr); /* close the epoll */ if (close(svr->epoll)) { if (!ret) ret = errno; } return ret; } /* * client_new -- find a slot for the incoming client */ struct client_res * client_new(struct server_res *svr) { /* find the first free slot */ struct client_res *clnt = NULL; for (int i = 0; i < CLIENT_MAX; ++i) { clnt = &svr->clients[i]; if (clnt->conn != NULL) continue; clnt->client_id = i; clnt->svr = svr; clnt->offset = (size_t)MAX_NAME_SIZE * (size_t)i; clnt->ev_conn_cmpl.fd = -1; clnt->ev_conn_event.fd = -1; break; } return clnt; } void client_handle_completion(struct custom_event *ce); void client_handle_connection_event(struct custom_event *ce); /* * client_add_to_epoll -- add all client's file descriptors to epoll */ int client_add_to_epoll(struct client_res *clnt, int epoll) { /* get the connection's event fd and add it to epoll */ int fd; int ret = rpma_conn_get_event_fd(clnt->conn, &fd); if (ret) return ret; ret = epoll_add(epoll, fd, clnt, client_handle_connection_event, &clnt->ev_conn_event); if (ret) return ret; /* get the connection's completion fd and add it to epoll */ ret = rpma_cq_get_fd(clnt->cq, &fd); if (ret) { epoll_delete(epoll, &clnt->ev_conn_event); return ret; } ret = epoll_add(epoll, fd, clnt, client_handle_completion, &clnt->ev_conn_cmpl); if (ret) epoll_delete(epoll, &clnt->ev_conn_event); return ret; } /* * client_delete -- release client's resources */ void client_delete(struct client_res *clnt) { struct server_res *svr = clnt->svr; if (clnt->ev_conn_cmpl.fd != -1) epoll_delete(svr->epoll, &clnt->ev_conn_cmpl); if (clnt->ev_conn_event.fd != -1) epoll_delete(svr->epoll, &clnt->ev_conn_event); /* delete the connection and set conn to NULL */ (void) rpma_conn_delete(&clnt->conn); } /* * client_handle_name -- print client's name */ void client_handle_name(struct client_res *clnt, void *ptr) { /* print received name of the client */ char *name = (char *)ptr + clnt->offset; printf("- %s\n", name); } /* * client_handle_completion -- callback on completion is ready * * The only expected completion in this example is a success of * the IBV_WC_RDMA_READ after which the read client's name is printed * to the output. * No matter what, the disconnection process will be initiated. */ void client_handle_completion(struct custom_event *ce) { struct client_res *clnt = (struct client_res *)ce->arg; const struct server_res *svr = clnt->svr; /* wait for the completion to be ready */ int ret = rpma_cq_wait(clnt->cq); if (ret) { /* no completion is ready - continue */ if (ret == RPMA_E_NO_COMPLETION) return; /* another error occurred - disconnect */ (void) rpma_conn_disconnect(clnt->conn); return; } /* get next completion */ struct ibv_wc wc; ret = rpma_cq_get_wc(clnt->cq, 1, &wc, NULL); if (ret) { /* no completion is ready - continue */ if (ret == RPMA_E_NO_COMPLETION) return; /* another error occurred - disconnect */ (void) rpma_conn_disconnect(clnt->conn); return; } /* validate received completion */ if (wc.status != IBV_WC_SUCCESS) { (void) fprintf(stderr, "[%d] rpma_read() failed: %s\n", clnt->client_id, ibv_wc_status_str(wc.status)); (void) rpma_conn_disconnect(clnt->conn); return; } if (wc.opcode != IBV_WC_RDMA_READ) { (void) fprintf(stderr, "[%d] received unexpected wc.opcode value (%d != %d)\n", clnt->client_id, wc.opcode, IBV_WC_RDMA_READ); (void) rpma_conn_disconnect(clnt->conn); return; } /* print received name of the client */ client_handle_name(clnt, svr->dst_ptr); /* initiate disconnection process */ (void) rpma_conn_disconnect(clnt->conn); } /* * client_fetch_name -- read the client's name from the remote memory region */ int client_fetch_name(struct client_res *clnt, struct rpma_mr_local *dst) { /* get connection's private data */ struct rpma_conn_private_data pdata; int ret = rpma_conn_get_private_data(clnt->conn, &pdata); if (ret) { (void) fprintf(stderr, "rpma_conn_get_private_data() failed\n"); return -1; } if (pdata.len < sizeof(struct common_data)) { (void) fprintf(stderr, "[%d] received connection's private data is too small (%d < %zu)\n", clnt->client_id, pdata.len, sizeof(struct common_data)); return -1; } /* prepare a remote memory region */ struct common_data *dst_data = pdata.ptr; struct rpma_mr_remote *src_mr; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &src_mr); if (ret) return ret; /* read client's name from the remote memory region */ ret = rpma_read(clnt->conn, dst, clnt->offset, src_mr, 0, MAX_NAME_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) { (void) rpma_mr_remote_delete(&src_mr); return ret; } /* delete the remote memory region's object */ (void) rpma_mr_remote_delete(&src_mr); return 0; } /* * client_handle_is_ready -- callback on connection is established * * The client should send via the private data the remote memory regions * descriptor which allows reading client's name. * If any of the required steps fail the client will be disconnected. */ void client_handle_is_ready(struct client_res *clnt) { const struct server_res *svr = clnt->svr; if (client_fetch_name(clnt, svr->dst_mr)) (void) rpma_conn_disconnect(clnt->conn); } /* * client_handle_connection_event -- callback on connection's next event */ void client_handle_connection_event(struct custom_event *ce) { struct client_res *clnt = (struct client_res *)ce->arg; /* get next connection's event */ enum rpma_conn_event event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(clnt->conn, &event); if (ret) { if (ret == RPMA_E_NO_EVENT) return; (void) rpma_conn_disconnect(clnt->conn); return; } /* proceed to the callback specific to the received event */ switch (event) { case RPMA_CONN_ESTABLISHED: client_handle_is_ready(clnt); break; case RPMA_CONN_CLOSED: default: client_delete(clnt); break; } } /* * server_handle_incoming_client -- callback on endpoint's next incoming * connection * * Get the connection request. If there is not free slots reject it. Otherwise, * accept the incoming connection, get the event and completion file * descriptors, set O_NONBLOCK flag for both of them and add events to * the epoll. * If error will occur at any of the required steps the client is disconnected. */ void server_handle_incoming_client(struct custom_event *ce) { struct server_res *svr = (struct server_res *)ce->arg; /* receive an incoming connection request */ struct rpma_conn_req *req = NULL; if (rpma_ep_next_conn_req(svr->ep, NULL, &req)) return; /* if no free slot is available */ struct client_res *clnt = NULL; if ((clnt = client_new(svr)) == NULL) { rpma_conn_req_delete(&req); return; } /* accept the connection request and obtain the connection object */ if (rpma_conn_req_connect(&req, NULL, &clnt->conn)) /* * When rpma_conn_req_connect() fails the connection pointer * remains unchanged (in this case it is NULL) so the server * would choose the same client slot if another client will * come. No additional cleanup needed. */ return; /* get the connection's main CQ */ if (rpma_conn_get_cq(clnt->conn, &clnt->cq)) { /* an error occurred - disconnect */ (void) rpma_conn_disconnect(clnt->conn); return; } if (client_add_to_epoll(clnt, svr->epoll)) (void) rpma_conn_disconnect(clnt->conn); } int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret, ret2; /* RPMA resources - general */ struct rpma_peer *peer = NULL; /* server resource */ struct server_res svr = {0}; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) return ret; /* initialize the server's structure */ ret = server_init(&svr, peer); if (ret) goto err_peer_delete; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &svr.ep); if (ret) goto err_server_fini; /* get the endpoint's event file descriptor and add it to epoll */ int ep_fd; ret = rpma_ep_get_fd(svr.ep, &ep_fd); if (ret) goto err_ep_shutdown; ret = epoll_add(svr.epoll, ep_fd, &svr, server_handle_incoming_client, &svr.ev_incoming); if (ret) goto err_ep_shutdown; (void) printf("Today I have got in touch with:\n"); /* process epoll's events */ struct epoll_event event = {0}; struct custom_event *ce; while ((ret = epoll_wait(svr.epoll, &event, 1 /* # of events */, TIMEOUT_15S)) == 1) { ce = (struct custom_event *)event.data.ptr; ce->func(ce); } /* disconnect all remaining client's */ for (int i = 0; i < CLIENT_MAX; ++i) { if (svr.clients[i].conn == NULL) continue; (void) rpma_conn_disconnect(svr.clients[i].conn); (void) rpma_conn_delete(&svr.clients[i].conn); } if (ret == 0) (void) fprintf(stderr, "Server timed out.\n"); err_ep_shutdown: /* shutdown the endpoint */ ret2 = rpma_ep_shutdown(&svr.ep); if (!ret) ret = ret2; err_server_fini: /* release the server's resources */ ret2 = server_fini(&svr); if (!ret) ret = ret2; err_peer_delete: /* delete the peer object */ ret2 = rpma_peer_delete(&peer); if (!ret) ret = ret2; return ret; } rpma-1.3.0/examples/06scch-multiple-connections/000077500000000000000000000000001443364775400215505ustar00rootroot00000000000000rpma-1.3.0/examples/06scch-multiple-connections/CMakeLists.txt000066400000000000000000000031121443364775400243050ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # Copyright 2021, Fujitsu # cmake_minimum_required(VERSION 3.3) project(multiple-connections C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(LIBPMEM_REQUIRED_VERSION 1.6) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) pkg_check_modules(LIBPMEM QUIET libpmem>=${LIBPMEM_REQUIRED_VERSION}) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() if(NOT LIBPMEM_FOUND) find_package(LIBPMEM ${LIBPMEM_REQUIRED_VERSION} QUIET) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) if(LIBPMEM_FOUND) target_include_directories(${name} PRIVATE ${LIBPMEM_INCLUDE_DIRS}) target_link_libraries(${name} ${LIBPMEM_LIBRARIES}) target_compile_definitions(${name} PRIVATE USE_LIBPMEM) endif() endfunction() add_example(server server.c ../common/common-conn.c ../common/common-epoll.c) add_example(client client.c ../common/common-conn.c) rpma-1.3.0/examples/06scch-multiple-connections/README.md000066400000000000000000000033031443364775400230260ustar00rootroot00000000000000Example of handling multiple connections with shared completion channel === The multiple connections example implements two parts of the connection process: - The server starts a listening endpoint and waits for incoming connections using epoll (a type of scalable I/O technique). When a new connection request appears it is accepted, if a free client slot is available, or rejected otherwise. The new connection's file descriptors are added to the same epoll (both an event file descriptor and a completion file descriptor) so the server can use a single epoll to wait for incoming connections, connection-related events and completions. When a specific client's connection is established (for what the server waits asynchronously using epoll) the server performs a read from the client's remote memory region to a local memory region prepared for the client's slot. It should generate an IBV_WC_RDMA_READ completion (which is also notified via epoll) after which the server displays the read data (the client's name) and disconnects the client. - The client picks randomly its name and copies it into a memory region that is exposed to the server. When the connection is established it waits for the server to disconnect. **Note**: For the sake of this example, the memory region being write from is transferred via connection's private data. In general, it can be transferred via an out-of-band or the in-band channel. **Note**: In this example, we use the shared completion event channel for CQ for completions of send. We use the rpma_conn_wait() function to wait for completions' events. ## Usage ```bash [user@server]$ ./server $server_address $port ``` ```bash [user@client]$ ./client $server_address $port $seed ``` rpma-1.3.0/examples/06scch-multiple-connections/client.c000066400000000000000000000057741443364775400232070ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * client.c -- a client of the multiple-connections example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include #include #include "common-conn.h" #include "multiple-connections-common.h" static char *Names[] = { "Andy", "Chet", "Derek", "Janek", "Kacper", "Lukasz", "Oksana", "Pawel", "Piotr", "Tomasz", "Xiang", "Xiaoran", "Xiaoyan" }; #define NAMES_NUM (sizeof(Names) / sizeof(Names[0])) #define USAGE_STR "usage: %s \n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 4) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; long unsigned seed = strtoul(argv[3], NULL, 10); if (seed == ULONG_MAX && errno == ERANGE) { (void) fprintf(stderr, "strtoul(seed) overflowed\n"); return -1; } int ret; /* RPMA resources - general */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* resources - memory region */ void *mr_ptr = NULL; struct rpma_mr_local *mr = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) return ret; /* allocate a memory */ mr_ptr = malloc_aligned(MAX_NAME_SIZE); if (mr_ptr == NULL) { ret = -1; goto err_peer_delete; } /* pick a name */ srand(seed % UINT_MAX); const char *name = Names[(long unsigned int)rand() % NAMES_NUM]; (void) strncpy((char *)mr_ptr, name, (MAX_NAME_SIZE - 1)); ((char *)mr_ptr)[MAX_NAME_SIZE - 1] = '\0'; printf("My names is: %s\n", (char *)mr_ptr); /* register the memory */ ret = rpma_mr_reg(peer, mr_ptr, MAX_NAME_SIZE, RPMA_MR_USAGE_READ_SRC, &mr); if (ret) goto err_mr_free; /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; struct common_data data = {0}; data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, NULL, &pdata, &conn); if (ret) goto err_mr_dereg; ret = common_wait_for_conn_close_and_disconnect(&conn); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&mr); err_mr_free: /* free the memory */ free(mr_ptr); err_peer_delete: /* delete the peer */ (void) rpma_peer_delete(&peer); return ret; } rpma-1.3.0/examples/06scch-multiple-connections/multiple-connections-common.h000066400000000000000000000005211443364775400273600ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * multiple-connections-common.h -- a common declarations for the 06 example */ #ifndef EXAMPLES_MULTIPLE_CONNECTIONS_COMMON #define EXAMPLES_MULTIPLE_CONNECTIONS_COMMON #define MAX_NAME_SIZE 32 #endif /* EXAMPLES_MULTIPLE_CONNECTIONS_COMMON */ rpma-1.3.0/examples/06scch-multiple-connections/server.c000066400000000000000000000275011443364775400232270ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * server.c -- a server of the multiple-connections with shared completion channel example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include #define USAGE_STR "usage: %s \n" #include "common-conn.h" #include "common-epoll.h" #include "multiple-connections-common.h" #define CLIENT_MAX 10 struct client_res { /* RPMA resources */ struct rpma_conn *conn; struct rpma_cq *cq; /* resources - memory regions */ size_t offset; /* events */ struct custom_event ev_conn_event; struct custom_event ev_conn_cmpl; /* parent and identifier */ struct server_res *svr; int client_id; }; struct server_res { /* RPMA resources */ struct rpma_ep *ep; /* resources - memory region */ void *dst_ptr; struct rpma_mr_local *dst_mr; /* epoll and event */ int epoll; struct custom_event ev_incoming; /* client's resources */ struct client_res clients[CLIENT_MAX]; /* connection configuration */ struct rpma_conn_cfg *cfg; }; /* * server_init -- initialize server's resources */ int server_init(struct server_res *svr, struct rpma_peer *peer) { int ret = 0; svr->epoll = epoll_create1(EPOLL_CLOEXEC); if (svr->epoll == -1) return errno; /* allocate a memory */ size_t dst_size = MAX_NAME_SIZE * CLIENT_MAX; svr->dst_ptr = malloc_aligned(dst_size); if (svr->dst_ptr == NULL) { close(svr->epoll); return -1; } /* register the memory */ ret = rpma_mr_reg(peer, svr->dst_ptr, dst_size, RPMA_MR_USAGE_READ_DST, &svr->dst_mr); if (ret) goto err_mr_free; /* create connection configuration */ svr->cfg = NULL; ret = rpma_conn_cfg_new(&svr->cfg); if (ret) goto err_mr_dereg; ret = rpma_conn_cfg_set_compl_channel(svr->cfg, true); if (ret) goto err_cfg_delete; return ret; err_cfg_delete: rpma_conn_cfg_delete(&svr->cfg); err_mr_dereg: rpma_mr_dereg(&svr->dst_mr); err_mr_free: free(svr->dst_ptr); close(svr->epoll); return ret; } /* * server_fini -- release server's resources */ int server_fini(struct server_res *svr) { /* delete connection configuration */ int ret = rpma_conn_cfg_delete(&svr->cfg); /* deregister the memory region */ int ret2 = rpma_mr_dereg(&svr->dst_mr); if (!ret) ret = ret2; /* free the memory */ free(svr->dst_ptr); /* close the epoll */ if (close(svr->epoll)) { if (!ret) ret = errno; } return ret; } /* * client_new -- find a slot for the incoming client */ struct client_res * client_new(struct server_res *svr) { /* find the first free slot */ struct client_res *clnt = NULL; for (int i = 0; i < CLIENT_MAX; ++i) { clnt = &svr->clients[i]; if (clnt->conn != NULL) continue; clnt->client_id = i; clnt->svr = svr; clnt->offset = (size_t)MAX_NAME_SIZE * (size_t)i; clnt->ev_conn_cmpl.fd = -1; clnt->ev_conn_event.fd = -1; break; } return clnt; } void client_handle_completion(struct custom_event *ce); void client_handle_connection_event(struct custom_event *ce); /* * client_add_to_epoll -- add all client's file descriptors to epoll */ int client_add_to_epoll(struct client_res *clnt, int epoll) { /* get the connection's event fd and add it to epoll */ int fd; int ret = rpma_conn_get_event_fd(clnt->conn, &fd); if (ret) return ret; ret = epoll_add(epoll, fd, clnt, client_handle_connection_event, &clnt->ev_conn_event); if (ret) return ret; /* get the connection's completion fd and add it to epoll */ ret = rpma_cq_get_fd(clnt->cq, &fd); if (ret) { epoll_delete(epoll, &clnt->ev_conn_event); return ret; } ret = epoll_add(epoll, fd, clnt, client_handle_completion, &clnt->ev_conn_cmpl); if (ret) epoll_delete(epoll, &clnt->ev_conn_event); return ret; } /* * client_delete -- release client's resources */ void client_delete(struct client_res *clnt) { struct server_res *svr = clnt->svr; if (clnt->ev_conn_cmpl.fd != -1) epoll_delete(svr->epoll, &clnt->ev_conn_cmpl); if (clnt->ev_conn_event.fd != -1) epoll_delete(svr->epoll, &clnt->ev_conn_event); /* delete the connection and set conn to NULL */ (void) rpma_conn_delete(&clnt->conn); } /* * client_handle_name -- print client's name */ void client_handle_name(struct client_res *clnt, void *ptr) { /* print received name of the client */ char *name = (char *)ptr + clnt->offset; printf("- %s\n", name); } /* * client_handle_completion -- callback on completion is ready * * The only expected completion in this example is a success of * the IBV_WC_RDMA_READ after which the read client's name is printed * to the output. * No matter what, the disconnection process will be initiated. */ void client_handle_completion(struct custom_event *ce) { struct client_res *clnt = (struct client_res *)ce->arg; const struct server_res *svr = clnt->svr; struct rpma_cq *cq = NULL; /* wait for the completion to be ready */ int ret = rpma_conn_wait(clnt->conn, 0, &cq, NULL); if (ret) { /* no completion is ready - continue */ if (ret == RPMA_E_NO_COMPLETION) return; /* another error occurred - disconnect */ (void) rpma_conn_disconnect(clnt->conn); return; } /* get next completion */ struct ibv_wc wc; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) { /* no completion is ready - continue */ if (ret == RPMA_E_NO_COMPLETION) return; /* another error occurred - disconnect */ (void) rpma_conn_disconnect(clnt->conn); return; } /* validate received completion */ if (wc.status != IBV_WC_SUCCESS) { (void) fprintf(stderr, "[%d] rpma_read() failed: %s\n", clnt->client_id, ibv_wc_status_str(wc.status)); (void) rpma_conn_disconnect(clnt->conn); return; } if (wc.opcode != IBV_WC_RDMA_READ) { (void) fprintf(stderr, "[%d] received unexpected wc.opcode value (%d != %d)\n", clnt->client_id, wc.opcode, IBV_WC_RDMA_READ); (void) rpma_conn_disconnect(clnt->conn); return; } /* print received name of the client */ client_handle_name(clnt, svr->dst_ptr); /* initiate disconnection process */ (void) rpma_conn_disconnect(clnt->conn); } /* * client_fetch_name -- read the client's name from the remote memory region */ int client_fetch_name(struct client_res *clnt, struct rpma_mr_local *dst) { /* get connection's private data */ struct rpma_conn_private_data pdata; int ret = rpma_conn_get_private_data(clnt->conn, &pdata); if (ret != 0 || pdata.len < sizeof(struct common_data)) { (void) fprintf(stderr, "[%d] received connection's private data is too small (%d < %zu)\n", clnt->client_id, pdata.len, sizeof(struct common_data)); return -1; } /* prepare a remote memory region */ struct common_data *dst_data = pdata.ptr; struct rpma_mr_remote *src_mr; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &src_mr); if (ret) return ret; /* read client's name from the remote memory region */ ret = rpma_read(clnt->conn, dst, clnt->offset, src_mr, 0, MAX_NAME_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) { (void) rpma_mr_remote_delete(&src_mr); return ret; } /* delete the remote memory region's object */ (void) rpma_mr_remote_delete(&src_mr); return 0; } /* * client_handle_is_ready -- callback on connection is established * * The client should send via the private data the remote memory regions * descriptor which allows reading client's name. * If any of the required steps fail the client will be disconnected. */ void client_handle_is_ready(struct client_res *clnt) { const struct server_res *svr = clnt->svr; if (client_fetch_name(clnt, svr->dst_mr)) (void) rpma_conn_disconnect(clnt->conn); } /* * client_handle_connection_event -- callback on connection's next event */ void client_handle_connection_event(struct custom_event *ce) { struct client_res *clnt = (struct client_res *)ce->arg; /* get next connection's event */ enum rpma_conn_event event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(clnt->conn, &event); if (ret) { if (ret == RPMA_E_NO_EVENT) return; (void) rpma_conn_disconnect(clnt->conn); return; } /* proceed to the callback specific to the received event */ switch (event) { case RPMA_CONN_ESTABLISHED: client_handle_is_ready(clnt); break; case RPMA_CONN_CLOSED: default: client_delete(clnt); break; } } /* * server_handle_incoming_client -- callback on endpoint's next incoming * connection * * Get the connection request. If there is not free slots reject it. Otherwise, * accept the incoming connection, get the event and completion file * descriptors, set O_NONBLOCK flag for both of them and add events to * the epoll. * If error will occur at any of the required steps the client is disconnected. */ void server_handle_incoming_client(struct custom_event *ce) { struct server_res *svr = (struct server_res *)ce->arg; /* receive an incoming connection request */ struct rpma_conn_req *req = NULL; if (rpma_ep_next_conn_req(svr->ep, svr->cfg, &req)) return; /* if no free slot is available */ struct client_res *clnt = NULL; if ((clnt = client_new(svr)) == NULL) { rpma_conn_req_delete(&req); return; } /* accept the connection request and obtain the connection object */ if (rpma_conn_req_connect(&req, NULL, &clnt->conn)) /* * When rpma_conn_req_connect() fails the connection pointer * remains unchanged (in this case it is NULL) so the server * would choose the same client slot if another client will * come. No additional cleanup needed. */ return; /* get the connection's main CQ */ if (rpma_conn_get_cq(clnt->conn, &clnt->cq)) { /* an error occurred - disconnect */ (void) rpma_conn_disconnect(clnt->conn); return; } if (client_add_to_epoll(clnt, svr->epoll)) (void) rpma_conn_disconnect(clnt->conn); } int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret, ret2; /* RPMA resources - general */ struct rpma_peer *peer = NULL; /* server resource */ struct server_res svr = {0}; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) return ret; /* initialize the server's structure */ ret = server_init(&svr, peer); if (ret) goto err_peer_delete; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &svr.ep); if (ret) goto err_server_fini; /* get the endpoint's event file descriptor and add it to epoll */ int ep_fd; ret = rpma_ep_get_fd(svr.ep, &ep_fd); if (ret) goto err_ep_shutdown; ret = epoll_add(svr.epoll, ep_fd, &svr, server_handle_incoming_client, &svr.ev_incoming); if (ret) goto err_ep_shutdown; (void) printf("Today I have got in touch with:\n"); /* process epoll's events */ struct epoll_event event = {0}; struct custom_event *ce; while ((ret = epoll_wait(svr.epoll, &event, 1 /* # of events */, TIMEOUT_15S)) == 1) { ce = (struct custom_event *)event.data.ptr; ce->func(ce); } /* disconnect all remaining client's */ for (int i = 0; i < CLIENT_MAX; ++i) { if (svr.clients[i].conn == NULL) continue; (void) rpma_conn_disconnect(svr.clients[i].conn); (void) rpma_conn_delete(&svr.clients[i].conn); } if (ret == 0) (void) fprintf(stderr, "Server timed out.\n"); err_ep_shutdown: /* shutdown the endpoint */ ret2 = rpma_ep_shutdown(&svr.ep); if (!ret) ret = ret2; err_server_fini: /* release the server's resources */ ret2 = server_fini(&svr); if (!ret) ret = ret2; err_peer_delete: /* delete the peer object */ ret2 = rpma_peer_delete(&peer); if (!ret) ret = ret2; return ret; } rpma-1.3.0/examples/07-atomic-write/000077500000000000000000000000001443364775400171415ustar00rootroot00000000000000rpma-1.3.0/examples/07-atomic-write/CMakeLists.txt000066400000000000000000000023661443364775400217100ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021, Fujitsu # cmake_minimum_required(VERSION 3.3) project(atomic-write C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../cmake/common.cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # check if all required IBV_ADVISE_MR* flags are supported are_ibv_advise_flags_supported(IBV_ADVISE_MR_FLAGS_SUPPORTED) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(LIBPMEM_REQUIRED_VERSION 1.6) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) pkg_check_modules(LIBPMEM QUIET libpmem>=${LIBPMEM_REQUIRED_VERSION}) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() if(NOT LIBPMEM_FOUND) find_package(LIBPMEM ${LIBPMEM_REQUIRED_VERSION} QUIET) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) add_example_with_pmem(NAME server SRCS server.c) add_example_with_pmem(NAME client SRCS client.c) rpma-1.3.0/examples/07-atomic-write/README.md000066400000000000000000000037721443364775400204310ustar00rootroot00000000000000Example of performing an RPMA atomic write to a persistent memory region === The atomic write example shows a simple remote log manipulation where two parts of the process are implemented: - The server, if provided (and capable of), prepares a local persistent memory. The memory is filled with an initial log state. The simplest log structure looks as follow: ``` struct log { char signature[LOG_SIGNATURE_SIZE]; /* last written data (note the field is aligned to RPMA_ATOMIC_WRITE_ALIGNMENT) */ uint64_t used; char data[LOG_DATA_SIZE]; }; ``` The client along with the exposed memory descriptor receives an offset of the used field. Which is enough to read the current state of the log and perform an append. After the connection is established the server waits for the client to disconnect. - The client, after the connection is established, receives the server's memory descriptor and the used offset. The client reads the used value. Having that, the client writes and flushes the new data at the end of the log. Without waiting for both operations completions the client can perform an atomic write of the new used value and flush it. The client has to wait for the last flush completion before appending another entry to the log. When the client is done it simply disconnects from the server. **Note**: For the sake of simplicity, the client assumes unilaterally that the server has the persistent flush support. **Note**: If server does not have a pmem path (or it is not capable to use pmem at all) it uses DRAM instead. **Note**: For the sake of this example, the memory region being write from is transferred via connection's private data. In general, it can be transferred via an out-of-band or the in-band channel. ## Usage ```bash [user@server]$ ./server $server_address $port [] ``` ```bash [user@client]$ ./client $server_address $port $word1 [] [<...>] ``` where `` can be: - a Device DAX (`/dev/dax0.0` for example) or - a file on File System DAX (`/mnt/pmem/file` for example). rpma-1.3.0/examples/07-atomic-write/client.c000066400000000000000000000140241443364775400205640ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * client.c -- a client of the atomic-write example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #define USAGE_STR "usage: %s [] [..]\n" #define FLUSH_ID (void *)0xF01D /* a random identifier */ #define MAX_WORD_LENGTH (KILOBYTE - 1) int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 4) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ void *mr_ptr; size_t mr_size; struct rpma_mr_remote *remote_mr = NULL; size_t remote_size = 0; size_t dst_used_offset = 0; struct rpma_mr_local *local_mr = NULL; struct ibv_wc wc; union { uint64_t uint64; char buf[8]; /* atomic write requires exactly 8-bytes buffer */ } used; /* prepare memory */ mr_size = KILOBYTE; mr_ptr = malloc_aligned(mr_size); if (mr_ptr == NULL) return -1; /* RPMA resources */ struct rpma_peer_cfg *pcfg = NULL; struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_free; /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, NULL, NULL, &conn); if (ret) goto err_peer_delete; /* * Create a remote peer's configuration structure, enable persistent flush support * and apply it to the current connection. (unilaterally) */ ret = rpma_peer_cfg_new(&pcfg); if (ret) goto err_conn_disconnect; ret = rpma_peer_cfg_set_direct_write_to_pmem(pcfg, true); if (ret) goto err_peer_cfg_delete; ret = rpma_conn_apply_remote_peer_cfg(conn, pcfg); if (ret) goto err_peer_cfg_delete; /* register the memory for the remote log manipulation */ ret = rpma_mr_reg(peer, mr_ptr, mr_size, RPMA_MR_USAGE_WRITE_SRC | RPMA_MR_USAGE_READ_DST, &local_mr); if (ret) goto err_peer_cfg_delete; /* obtain the remote memory description */ struct rpma_conn_private_data pdata; ret = rpma_conn_get_private_data(conn, &pdata); if (ret != 0 || pdata.len < sizeof(struct common_data)) goto err_mr_dereg; /* * Create a remote memory registration structure from the received descriptor. */ struct common_data *dst_data = pdata.ptr; dst_used_offset = dst_data->data_offset; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &remote_mr); if (ret) goto err_mr_dereg; /* get the remote memory region size */ ret = rpma_mr_remote_get_size(remote_mr, &remote_size); if (ret) goto err_mr_remote_delete; /* read the used value */ ret = rpma_read(conn, local_mr, 0, remote_mr, dst_used_offset, sizeof(uint64_t), RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) goto err_mr_remote_delete; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_mr_remote_delete; /* wait for the completion to be ready */ ret = rpma_cq_wait(cq); if (ret) goto err_mr_remote_delete; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_mr_remote_delete; if (wc.status != IBV_WC_SUCCESS) { (void) fprintf(stderr, "rpma_read() failed: %s\n", ibv_wc_status_str(wc.status)); ret = -1; goto err_mr_remote_delete; } memcpy(&used, mr_ptr, sizeof(used)); printf("used value: %lu\n", used.uint64); if (remote_size <= used.uint64) { fprintf(stderr, "Log size exhausted.\n"); ret = -1; goto err_mr_remote_delete; } enum rpma_flush_type flush_type; int remote_flush_type; ret = rpma_mr_remote_get_flush_type(remote_mr, &remote_flush_type); if (ret) { fprintf(stderr, "rpma_mr_remote_get_flush_type() failed\n"); goto err_mr_remote_delete; } if (remote_flush_type & RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT) flush_type = RPMA_FLUSH_TYPE_PERSISTENT; else flush_type = RPMA_FLUSH_TYPE_VISIBILITY; for (int i = 3; i < argc; ++i) { char *word = mr_ptr; strncpy(word, argv[i], MAX_WORD_LENGTH); /* make sure the word is always null-terminated */ word[MAX_WORD_LENGTH] = 0; size_t word_size = strlen(word) + 1; ret = rpma_write(conn, remote_mr, used.uint64, local_mr, 0, word_size, RPMA_F_COMPLETION_ON_ERROR, NULL); if (ret) break; ret = rpma_flush(conn, remote_mr, used.uint64, word_size, flush_type, RPMA_F_COMPLETION_ON_ERROR, NULL); if (ret) break; used.uint64 += word_size; ret = rpma_atomic_write(conn, remote_mr, dst_used_offset, used.buf, RPMA_F_COMPLETION_ON_ERROR, NULL); if (ret) break; ret = rpma_flush(conn, remote_mr, dst_used_offset, sizeof(uint64_t), flush_type, RPMA_F_COMPLETION_ALWAYS, FLUSH_ID); if (ret) break; /* wait for the completion to be ready */ ret = rpma_cq_wait(cq); if (ret) break; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) break; if (wc.wr_id != (uintptr_t)FLUSH_ID) { (void) fprintf(stderr, "unexpected wc.wr_id value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", wc.wr_id, (uintptr_t)FLUSH_ID); ret = -1; break; } if (wc.status != IBV_WC_SUCCESS) { (void) fprintf(stderr, "rpma_flush() failed: %s\n", ibv_wc_status_str(wc.status)); ret = -1; break; } } err_mr_remote_delete: /* delete the remote memory region's structure */ (void) rpma_mr_remote_delete(&remote_mr); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&local_mr); err_peer_cfg_delete: (void) rpma_peer_cfg_delete(&pcfg); err_conn_disconnect: (void) common_disconnect_and_wait_for_conn_close(&conn); err_peer_delete: /* delete the peer */ (void) rpma_peer_delete(&peer); err_free: free(mr_ptr); return ret ? -2 : 0; } rpma-1.3.0/examples/07-atomic-write/server.c000066400000000000000000000117221443364775400206160ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * server.c -- a server of the atomic-write example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include "common-conn.h" #include "common-pmem_map_file.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s []\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ #ifdef USE_PMEM #define LOG_HDR_SIGNATURE "LOG" #endif /* USE_PMEM */ #define LOG_SIGNATURE_SIZE 8 #define LOG_DATA_SIZE 1024 #define LOG_SIZE (sizeof(struct log)) /* defined log structure */ struct log { char signature[LOG_SIGNATURE_SIZE]; /* last written data (aligned to RPMA_ATOMIC_WRITE_ALIGNMENT) */ uint64_t used; char data[LOG_DATA_SIZE]; }; int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_local *mr = NULL; struct log *log = NULL; #ifdef USE_PMEM char *pmem_path = NULL; if (argc >= 4) { pmem_path = argv[3]; ret = common_pmem_map_file(pmem_path, LOG_SIZE, &mem); if (ret) goto err_free; log = (struct log *)mem.mr_ptr; /* * If the signature is not in place the persistent content has to be initialized * and persisted. */ if (strncmp(mem.mr_ptr, LOG_HDR_SIGNATURE, LOG_SIGNATURE_SIZE)) { /* initialize used value and persist it */ log->used = offsetof(struct log, data); mem.persist(&log->used, sizeof(uint64_t)); /* write the signature to mark the content as valid */ strncpy(mem.mr_ptr, LOG_HDR_SIGNATURE, LOG_SIGNATURE_SIZE); mem.persist(mem.mr_ptr, LOG_SIGNATURE_SIZE); } } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(LOG_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = LOG_SIZE; log = (struct log *)mem.mr_ptr; log->used = offsetof(struct log, data); } /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn *conn = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* register the memory */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_WRITE_DST | RPMA_MR_USAGE_READ_SRC | (mem.is_pmem ? RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT : RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY), &mr); if (ret) goto err_ep_shutdown; #if defined USE_PMEM && defined IBV_ADVISE_MR_FLAGS_SUPPORTED /* rpma_mr_advise() should be called only in case of FsDAX */ if (mem.is_pmem && strstr(pmem_path, "/dev/dax") == NULL) { ret = rpma_mr_advise(mr, 0, mem.mr_size, IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE, IBV_ADVISE_MR_FLAG_FLUSH); if (ret) goto err_mr_dereg; } #endif /* USE_PMEM */ /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; /* calculate data for the client write */ struct common_data data = {0}; data.data_offset = offsetof(struct log, used); data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; /* * Wait for an incoming connection request, accept it and wait for its establishment. */ struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); ret = server_accept_connection(ep, NULL, &pdata, &conn); if (ret) goto err_mr_dereg; /* * Wait for RPMA_CONN_CLOSED, disconnect and delete the connection structure. */ ret = common_wait_for_conn_close_and_disconnect(&conn); if (ret) goto err_mr_dereg; /* print the saved data */ char *ptr = log->data; char *log_data_tail = (char *)log + log->used; while (ptr < log_data_tail) { if (*ptr) putc(*ptr, stdout); else putc('\n', stdout); ptr++; } err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&mr); err_ep_shutdown: /* shutdown the endpoint */ (void) rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_free: #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) { free(mem.mr_ptr); } return ret ? -2 : 0; } rpma-1.3.0/examples/08-messages-ping-pong/000077500000000000000000000000001443364775400202415ustar00rootroot00000000000000rpma-1.3.0/examples/08-messages-ping-pong/CMakeLists.txt000066400000000000000000000024361443364775400230060ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # cmake_minimum_required(VERSION 3.3) project(messages-ping-pong C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() link_directories(${LIBRPMA_LIBRARY_DIRS} ${LIBIBVERBS_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ${LIBIBVERBS_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) endfunction() add_example(server server.c ../common/common-conn.c ../common/common-messages-ping-pong.c) add_example(client client.c ../common/common-conn.c ../common/common-messages-ping-pong.c ../common/common-utils.c) rpma-1.3.0/examples/08-messages-ping-pong/README.md000066400000000000000000000015751443364775400215300ustar00rootroot00000000000000Example of using messaging === The messages ping pong example implements two parts of the messaging process: - The server starts a listening endpoint and waits for an incoming connection. When a new connection request appears it is accepted. The client sends to the server its current counter value. When the server receives the message from the client, its content is incremented and send back to the client. When the server receives the I_M_DONE message it waits for disconnection. - The client connects to the server and sends to it its current counter value. When the client gets the new value from the server it repeats the process for the configured number of rounds. When it is quitting, it send the I_M_DONE message and disconnects. ## Usage ```bash [user@server]$ ./server $server_address $port ``` ```bash [user@client]$ ./client $server_address $port $seed $rounds [$sleep] ``` rpma-1.3.0/examples/08-messages-ping-pong/client.c000066400000000000000000000067001443364775400216660ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * client.c -- a client of the messages-ping-pong example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "common-messages-ping-pong.h" #include "common-utils.h" #define USAGE_STR "usage: %s []\n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 5) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; uint64_t cntr = strtoul_noerror(argv[3]); uint64_t rounds = strtoul_noerror(argv[4]); uint64_t sleep_usec = 0; if (argc >= 6) sleep_usec = strtoul_noerror(argv[5]); int ret; /* RPMA resources - general */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; uint64_t *recv = malloc_aligned(MSG_SIZE); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(MSG_SIZE); if (send == NULL) { free(recv); return -1; } /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_mr_free; /* register the memory */ ret = rpma_mr_reg(peer, recv, MSG_SIZE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_peer_delete; ret = rpma_mr_reg(peer, send, MSG_SIZE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) { (void) rpma_mr_dereg(&recv_mr); goto err_peer_delete; } /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, NULL, NULL, &conn); if (ret) goto err_mr_dereg; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_disconnect; while (--rounds) { /* prepare a receive for the server's response */ ret = rpma_recv(conn, recv_mr, 0, MSG_SIZE, recv); if (ret) break; /* send a message to the server */ (void) printf("Value sent: %" PRIu64 "\n", cntr); *send = cntr; /* * XXX when using RPMA_F_COMPLETION_ON_ERROR * after few rounds rpma_send() returns ENOMEM. */ ret = rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) break; int send_cmpl = 0; int recv_cmpl = 0; /* get completions and process them */ ret = wait_and_process_completions(cq, recv, &send_cmpl, &recv_cmpl); if (ret) break; /* copy the new value of the counter and print it out */ cntr = *recv; printf("Value received: %" PRIu64 "\n", cntr); /* sleep if required */ if (sleep_usec > 0) (void) usleep(sleep_usec); } /* send the I_M_DONE message */ *send = I_M_DONE; ret |= rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); err_conn_disconnect: ret |= common_disconnect_and_wait_for_conn_close(&conn); err_mr_dereg: /* deregister the memory regions */ ret |= rpma_mr_dereg(&send_mr); ret |= rpma_mr_dereg(&recv_mr); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_mr_free: /* free the memory */ free(send); free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/08-messages-ping-pong/server.c000066400000000000000000000101771443364775400217210ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * server.c -- a server of the messages-ping-pong example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #define USAGE_STR "usage: %s \n" #include "common-conn.h" #include "common-messages-ping-pong.h" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; uint64_t *recv = malloc_aligned(MSG_SIZE); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(MSG_SIZE); if (send == NULL) { free(recv); return -1; } /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn_req *req = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; struct rpma_conn *conn = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* register the memory */ ret = rpma_mr_reg(peer, recv, MSG_SIZE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_ep_shutdown; ret = rpma_mr_reg(peer, send, MSG_SIZE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) { (void) rpma_mr_dereg(&recv_mr); goto err_ep_shutdown; } /* receive an incoming connection request */ ret = rpma_ep_next_conn_req(ep, NULL, &req); if (ret) goto err_mr_dereg; /* * Put an initial receive to be prepared for the first message of the client's ping-pong. */ ret = rpma_conn_req_recv(req, recv_mr, 0, MSG_SIZE, recv); if (ret) { (void) rpma_conn_req_delete(&req); goto err_mr_dereg; } /* accept the connection request and obtain the connection object */ ret = rpma_conn_req_connect(&req, NULL, &conn); if (ret) goto err_mr_dereg; /* wait for the connection to be established */ ret = rpma_conn_next_event(conn, &conn_event); if (ret) goto err_conn_disconnect; if (conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); goto err_conn_disconnect; } /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_disconnect; /* IBV_WC_SEND completion in the first round is not present */ int send_cmpl = 1; int recv_cmpl = 0; while (1) { /* get completions and process them */ ret = wait_and_process_completions(cq, recv, &send_cmpl, &recv_cmpl); if (ret) break; if (*recv == I_M_DONE) break; /* print the received old value of the client's counter */ (void) printf("Value received: %" PRIu64 "\n", *recv); /* calculate a new counter's value */ *send = *recv + 1; /* prepare a receive for the client's response */ ret = rpma_recv(conn, recv_mr, 0, MSG_SIZE, recv); if (ret) break; /* send the new value to the client */ (void) printf("Value sent: %" PRIu64 "\n", *send); /* * XXX when using RPMA_F_COMPLETION_ON_ERROR * after few rounds rpma_send() returns ENOMEM. */ ret = rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) break; /* reset */ send_cmpl = 0; recv_cmpl = 0; } err_conn_disconnect: ret |= common_disconnect_and_wait_for_conn_close(&conn); err_mr_dereg: /* deregister the memory regions */ ret |= rpma_mr_dereg(&send_mr); ret |= rpma_mr_dereg(&recv_mr); err_ep_shutdown: ret |= rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_free: free(send); free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/08srq-simple-messages-ping-pong-with-srq/000077500000000000000000000000001443364775400240325ustar00rootroot00000000000000rpma-1.3.0/examples/08srq-simple-messages-ping-pong-with-srq/CMakeLists.txt000066400000000000000000000024361443364775400265770ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # cmake_minimum_required(VERSION 3.3) project(messages-ping-pong C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() link_directories(${LIBRPMA_LIBRARY_DIRS} ${LIBIBVERBS_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ${LIBIBVERBS_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) endfunction() add_example(server server.c ../common/common-conn.c ../common/common-messages-ping-pong.c) add_example(client client.c ../common/common-conn.c ../common/common-messages-ping-pong.c ../common/common-utils.c) rpma-1.3.0/examples/08srq-simple-messages-ping-pong-with-srq/README.md000066400000000000000000000024231443364775400253120ustar00rootroot00000000000000Example of using messaging with shared RQ and a single connection === The single-connection messages ping pong example with shared RQ implements two parts of the messaging process: - The server starts a listening endpoint and waits for an incoming connection. When a new connection request appears it is accepted. The client sends to the server its current counter value. When the server receives the message from the client, its content is incremented and send back to the client. When the server receives the I_M_DONE message it disconnects. - The client connects to the server and sends to it its current counter value. When the client gets the new value from the server it repeats the process for the configured number of rounds. When it is quitting, it send the I_M_DONE message and waits for disconnection. ## Usage ```bash [user@server]$ ./server $server_address $port [m|r] ``` **Note** The third parameter can be one of the following values: - unspecified - get receive completions by the receive CQ of the shared RQ - m - get receive completions by the main CQ of the connection - r - get receive completions by the separate receive CQ of the connection ```bash [user@client]$ ./client $server_address $port $seed $rounds [$sleep] ``` **Note** seed is the initial value of the counter rpma-1.3.0/examples/08srq-simple-messages-ping-pong-with-srq/client.c000066400000000000000000000066651443364775400254710ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * client.c -- a client of the simple-messages-ping-pong-with-srq example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include #include #include "common-conn.h" #include "common-messages-ping-pong.h" #include "common-utils.h" #define USAGE_STR "usage: %s []\n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 5) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; uint64_t cntr = strtoul_noerror(argv[3]); uint64_t rounds = strtoul_noerror(argv[4]); uint64_t sleep_usec = 0; if (argc >= 6) sleep_usec = strtoul_noerror(argv[5]); int ret; /* RPMA resources - general */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; uint64_t *recv = malloc_aligned(MSG_SIZE); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(MSG_SIZE); if (send == NULL) { free(recv); return -1; } /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_mr_free; /* register the memory */ ret = rpma_mr_reg(peer, recv, MSG_SIZE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_peer_delete; ret = rpma_mr_reg(peer, send, MSG_SIZE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) { (void) rpma_mr_dereg(&recv_mr); goto err_peer_delete; } /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, NULL, NULL, &conn); if (ret) goto err_mr_dereg; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_disconnect; do { /* prepare a receive for the server's response */ ret = rpma_recv(conn, recv_mr, 0, MSG_SIZE, recv); if (ret) goto err_conn_disconnect; /* send a message to the server */ (void) printf("Value sent: %" PRIu64 "\n", cntr); *send = cntr; ret = rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) goto err_conn_disconnect; int send_cmpl = 0; int recv_cmpl = 0; /* get completions and process them */ ret = wait_and_process_completions(cq, recv, &send_cmpl, &recv_cmpl); if (ret) goto err_conn_disconnect; /* copy the new value of the counter and print it out */ cntr = *recv; printf("Value received: %" PRIu64 "\n", cntr); /* sleep if required */ if (sleep_usec > 0) (void) usleep(sleep_usec); } while (--rounds); /* send the M_DONE message */ *send = I_M_DONE; ret |= rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); err_conn_disconnect: ret |= common_wait_for_conn_close_and_disconnect(&conn); err_mr_dereg: /* deregister the memory regions */ ret |= rpma_mr_dereg(&send_mr); ret |= rpma_mr_dereg(&recv_mr); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_mr_free: /* free the memory */ free(send); free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/08srq-simple-messages-ping-pong-with-srq/server.c000066400000000000000000000141031443364775400255030ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright (c) 2022 Fujitsu Limited */ /* * server.c -- a server of the simple-messages-ping-pong-with-srq example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include "common-conn.h" #include "common-messages-ping-pong.h" #define USAGE_STR "usage: %s [m|r]\n" #define RCQ_SIZE 10 int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; char rcq_flag; int ret; /* validate the third parameter */ if (argv[3]) { if (strcmp(argv[3], "m") == 0) { rcq_flag = 'm'; } else if (strcmp(argv[3], "r") == 0) { rcq_flag = 'r'; } else { (void) fprintf(stderr, "The third parameter should be one of m or r (%s given)\n", argv[3]); return -1; } } else { /* set rcq_flag to the default 's' when the third parameter is not specified. */ rcq_flag = 's'; } /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; uint64_t *recv = malloc_aligned(MSG_SIZE); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(MSG_SIZE); if (send == NULL) { ret = -1; goto err_free_recv; } /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_srq *srq = NULL; struct rpma_srq_cfg *srq_cfg = NULL; struct rpma_conn *conn = NULL; struct rpma_conn_cfg *conn_cfg = NULL; struct rpma_ep *ep = NULL; struct rpma_cq *rcq = NULL; int num_got = 0; uint32_t qp_num; struct ibv_wc wc; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free_send; ret = rpma_srq_cfg_new(&srq_cfg); if (ret) goto err_peer_delete; if (rcq_flag != 's') { ret = rpma_srq_cfg_set_rcq_size(srq_cfg, 0); if (ret) goto err_srq_cfg_delete; } /* create a shared RQ object with given configuration */ ret = rpma_srq_new(peer, srq_cfg, &srq); if (ret) goto err_srq_cfg_delete; /* create a new connection configuration */ ret = rpma_conn_cfg_new(&conn_cfg); if (ret) goto err_srq_delete; /* The default receive CQ size of the connection is 0 when rcq_flag is not 'r'. */ if (rcq_flag == 'r') { ret = rpma_conn_cfg_set_rcq_size(conn_cfg, RCQ_SIZE); if (ret) goto err_conn_cfg_delete; } /* set the shared RQ object for the connection configuration */ ret = rpma_conn_cfg_set_srq(conn_cfg, srq); if (ret) goto err_conn_cfg_delete; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_conn_cfg_delete; /* register the memory */ ret = rpma_mr_reg(peer, recv, MSG_SIZE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_ep_shutdown; ret = rpma_mr_reg(peer, send, MSG_SIZE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) goto err_recv_mr_dereg; /* * Put an initial receive for a connection to be prepared for the first message * of the client's ping-pong. */ ret = rpma_srq_recv(srq, recv_mr, 0, MSG_SIZE, recv); if (ret) goto err_conn_disconnect; /* * Wait for an incoming connection request, accept it and wait for its establishment. */ ret = server_accept_connection(ep, conn_cfg, NULL, &conn); if (ret) goto err_conn_disconnect; /* get the qp_num of the connection */ ret = rpma_conn_get_qp_num(conn, &qp_num); if (ret) goto err_conn_disconnect; switch (rcq_flag) { case 's': /* get the receive CQ of the shared RQ */ ret = rpma_srq_get_rcq(srq, &rcq); break; case 'm': /* get the main CQ of the connection */ ret = rpma_conn_get_cq(conn, &rcq); break; case 'r': /* get the separate receive CQ of the connection */ ret = rpma_conn_get_rcq(conn, &rcq); } if (ret) goto err_conn_disconnect; int recv_cmpl = 0; while (1) { do { /* wait for the receive completion to be ready */ ret = rpma_cq_wait(rcq); if (ret) goto err_conn_disconnect; /* reset num_got to 0 */ num_got = 0; /* get the next recv completion */ ret = rpma_cq_get_wc(rcq, 1, &wc, &num_got); if (ret) /* lack of completion is not an error */ if (ret != RPMA_E_NO_COMPLETION) goto err_conn_disconnect; if (num_got) { /* validate the received completion */ int send_cmpl; /* not used */ ret = validate_wc(&wc, recv, &send_cmpl, &recv_cmpl); if (ret) goto err_conn_disconnect; } } while (recv_cmpl == 0); if (*recv == I_M_DONE) break; if (qp_num != wc.qp_num) { ret = -1; (void) fprintf(stderr, "Error: number of QP (qp_num) in the received completion " "(%" PRIu32 ") differs from the one of the connection " "(%" PRIu32 ")\n", wc.qp_num, qp_num); break; } /* print the received old value of the client's counter */ (void) printf("Value received: %" PRIu64 "\n", *recv); *send = *recv + 1; /* prepare a receive for the client's request */ ret = rpma_srq_recv(srq, recv_mr, 0, MSG_SIZE, recv); if (ret) break; /* send the new value to the client */ (void) printf("Value sent: %" PRIu64 "\n", *send); /* send a message to the client */ ret = rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); if (ret) break; } err_conn_disconnect: /* disconnect the client */ (void) rpma_conn_disconnect(conn); (void) rpma_conn_delete(&conn); ret |= rpma_mr_dereg(&send_mr); err_recv_mr_dereg: ret |= rpma_mr_dereg(&recv_mr); err_ep_shutdown: ret |= rpma_ep_shutdown(&ep); err_conn_cfg_delete: ret |= rpma_conn_cfg_delete(&conn_cfg); err_srq_delete: ret |= rpma_srq_delete(&srq); err_srq_cfg_delete: ret |= rpma_srq_cfg_delete(&srq_cfg); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_free_send: free(send); err_free_recv: free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/09-flush-to-persistent-GPSPM/000077500000000000000000000000001443364775400213625ustar00rootroot00000000000000rpma-1.3.0/examples/09-flush-to-persistent-GPSPM/CMakeLists.txt000066400000000000000000000027211443364775400241240ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021, Fujitsu # cmake_minimum_required(VERSION 3.3) project(flush-to-persistent C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../cmake/common.cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # check if all required IBV_ADVISE_MR* flags are supported are_ibv_advise_flags_supported(IBV_ADVISE_MR_FLAGS_SUPPORTED) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(LIBPMEM_REQUIRED_VERSION 1.6) find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) pkg_check_modules(LIBPROTOBUFC libprotobuf-c) pkg_check_modules(LIBPMEM QUIET libpmem>=${LIBPMEM_REQUIRED_VERSION}) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() if(NOT LIBPROTOBUFC_FOUND) find_package(LIBPROTOBUFC REQUIRED libprotobuf-c) endif() if(NOT LIBPMEM_FOUND) find_package(LIBPMEM ${LIBPMEM_REQUIRED_VERSION} QUIET) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) add_example_with_pmem(NAME server USE_LIBPROTOBUFC SRCS server.c ../common/gpspm/GPSPM_flush.pb-c.c) add_example_with_pmem(NAME client USE_LIBPROTOBUFC SRCS client.c ../common/gpspm/GPSPM_flush.pb-c.c ../common/common-utils.c) rpma-1.3.0/examples/09-flush-to-persistent-GPSPM/README.md000066400000000000000000000043261443364775400226460ustar00rootroot00000000000000Example of performing an RPMA write and flushing it to persistence via GPSPM === The write example implements two parts of the write process: - The server, if provided (and capable of), prepares a local persistent memory and exposes the memory description along with other parameters required to perform an RDMA write. After the connection is established the server waits for a flush request to execute `pmem_persist(3)` according to provided flush parameters. When it is done the server sends back a flush response and waits for the client to disconnect. - The client, if provided (and capable of), prepares a local persistent memory (including its contents) and registers it as a writing source. After the connection is established the client receives the server's memory regions registered as a writing destination. The client performs a write from the local memory region to a remote memory region and sends to the server a flush request and waits for a flush response which indicates the flush operation's completion. **Note**: This example does not require RPMA_FLUSH_TYPE_PERSISTENT support to provide remote PMem persistency. For the sake of simplicity, the client does not need to know server's peer configuration. For an example of peer's configuration exchange please see the 05 example. **Note**: The flush request and response are sent and received via the RPMA's messaging API (`rpma_send()` and `rpma_recv()`). Both types of messages are serialized and deserialized using the protobuf-c library. **Note**: If either server or client does not have a PMem path (or it is not capable of using PMem at all) it uses DRAM instead. **Note**: For the sake of this example, the memory region being written to is transferred via the connection's private data. In general, it can be transferred via an out-of-band or the in-band channel. ## Usage ```bash [user@server]$ ./server $server_address $port [] ``` ```bash [user@client]$ ./client $server_address $port [ []] ``` where `` can be: - a Device DAX (`/dev/dax0.0` for example) or - a file on File System DAX (`/mnt/pmem/file` for example) and `` is an offset inside the above mentioned PMem device where the user data begins from. rpma-1.3.0/examples/09-flush-to-persistent-GPSPM/client.c000066400000000000000000000205641443364775400230130ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * client.c -- a client of the flush-to-persistent-GPSPM example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "gpspm/flush-to-persistent-GPSPM.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #include "common-utils.h" /* Generated by the protocol buffer compiler from: GPSPM_flush.proto */ #include "gpspm/GPSPM_flush.pb-c.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s [ []]\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ #define FLUSH_ID (void *)0xF01D /* a random identifier */ int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_remote *dst_mr = NULL; size_t dst_size = 0; size_t dst_offset = 0; struct rpma_mr_local *src_mr = NULL; struct ibv_wc wc; /* messaging resources */ void *msg_ptr = NULL; void *send_ptr = NULL; void *recv_ptr = NULL; struct rpma_mr_local *msg_mr = NULL; GPSPMFlushRequest flush_req = GPSPM_FLUSH_REQUEST__INIT; size_t flush_req_size = 0; GPSPMFlushResponse *flush_resp = NULL; struct hello_t *hello = NULL; #ifdef USE_PMEM if (argc >= 4) { char *path = argv[3]; if (argc >= 5) mem.offset = strtoul_noerror(argv[4]); ret = common_pmem_map_file_with_signature_check(path, HELLO_T_SIZE, &mem, init_hello); if (ret) goto err_free; hello = (struct hello_t *)((uintptr_t)mem.mr_ptr + mem.data_offset); } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_T_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_T_SIZE; hello = (struct hello_t *)mem.mr_ptr; /* write an initial value */ write_hello_str(hello, en); } (void) printf("Next value: %s\n", hello->str); /* allocate messaging buffer */ msg_ptr = malloc_aligned(HELLO_STR_SIZE); if (msg_ptr == NULL) { ret = -1; goto err_free; } send_ptr = (char *)msg_ptr + SEND_OFFSET; recv_ptr = (char *)msg_ptr + RECV_OFFSET; /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_free; struct rpma_conn_cfg *cfg = NULL; ret = rpma_conn_cfg_new(&cfg); if (ret) goto err_peer_delete; ret = rpma_conn_cfg_set_rcq_size(cfg, RCQ_SIZE); if (ret) goto err_cfg_delete; /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, cfg, NULL, &conn); if (ret) goto err_cfg_delete; /* register the memory RDMA write */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_WRITE_SRC, &src_mr); if (ret) goto err_conn_disconnect; /* register the messaging memory */ ret = rpma_mr_reg(peer, msg_ptr, HELLO_STR_SIZE, RPMA_MR_USAGE_SEND | RPMA_MR_USAGE_RECV, &msg_mr); if (ret) { (void) rpma_mr_dereg(&src_mr); goto err_conn_disconnect; } /* obtain the remote side resources description */ struct rpma_conn_private_data pdata; ret = rpma_conn_get_private_data(conn, &pdata); if (ret != 0 || pdata.len < sizeof(struct common_data)) goto err_mr_dereg; /* * Create a remote memory registration structure from the received descriptor. */ struct common_data *dst_data = pdata.ptr; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &dst_mr); if (ret) goto err_mr_dereg; dst_offset = dst_data->data_offset; /* get the remote memory region size */ ret = rpma_mr_remote_get_size(dst_mr, &dst_size); if (ret) { goto err_mr_remote_delete; } else if (dst_size - dst_offset < HELLO_STR_SIZE) { fprintf(stderr, "Size of the remote memory region is too small for writing the data of the assumed size (%zu < %d)\n", dst_size - dst_offset, HELLO_STR_SIZE); goto err_mr_remote_delete; } ret = rpma_write(conn, dst_mr, dst_offset, src_mr, (mem.data_offset + HELLO_STR_OFFSET), HELLO_STR_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); if (ret) goto err_mr_remote_delete; /* prepare a response buffer */ ret = rpma_recv(conn, msg_mr, RECV_OFFSET, MSG_SIZE_MAX, NULL); if (ret) goto err_mr_remote_delete; /* prepare a flush message and pack it to a send buffer */ flush_req.offset = dst_offset; flush_req.length = HELLO_STR_SIZE; flush_req.op_context = (uint64_t)FLUSH_ID; flush_req_size = gpspm_flush_request__get_packed_size(&flush_req); if (flush_req_size > MSG_SIZE_MAX) { fprintf(stderr, "Packed flush request size is bigger than available send buffer space (%" PRIu64 " > %d\n", flush_req_size, MSG_SIZE_MAX); goto err_mr_remote_delete; } (void) gpspm_flush_request__pack(&flush_req, send_ptr); /* send the flush message */ ret = rpma_send(conn, msg_mr, SEND_OFFSET, flush_req_size, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) goto err_mr_remote_delete; /* wait for the send completion to be ready */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_mr_remote_delete; ret = rpma_cq_wait(cq); if (ret) goto err_mr_remote_delete; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_mr_remote_delete; /* validate the send completion */ if (wc.status != IBV_WC_SUCCESS) { ret = -1; (void) fprintf(stderr, "rpma_send() failed: %s\n", ibv_wc_status_str(wc.status)); goto err_mr_remote_delete; } if (wc.opcode != IBV_WC_SEND) { ret = -1; (void) fprintf(stderr, "unexpected wc.opcode value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", (uintptr_t)wc.opcode, (uintptr_t)IBV_WC_SEND); goto err_mr_remote_delete; } /* wait for the receive completion to be ready */ struct rpma_cq *rcq = NULL; ret = rpma_conn_get_rcq(conn, &rcq); if (ret) goto err_mr_remote_delete; ret = rpma_cq_wait(rcq); if (ret) goto err_mr_remote_delete; ret = rpma_cq_get_wc(rcq, 1, &wc, NULL); if (ret) goto err_mr_remote_delete; /* validate the receive completion */ if (wc.status != IBV_WC_SUCCESS) { ret = -1; (void) fprintf(stderr, "rpma_recv() failed: %s\n", ibv_wc_status_str(wc.status)); goto err_mr_remote_delete; } if (wc.opcode != IBV_WC_RECV) { ret = -1; (void) fprintf(stderr, "unexpected wc.opcode value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", (uintptr_t)wc.opcode, (uintptr_t)IBV_WC_RECV); goto err_mr_remote_delete; } /* unpack a response from the received buffer */ flush_resp = gpspm_flush_response__unpack(NULL, wc.byte_len, recv_ptr); if (flush_resp == NULL) { fprintf(stderr, "Cannot unpack the flush response buffer\n"); goto err_mr_remote_delete; } if (flush_resp->op_context != (uint64_t)FLUSH_ID) { (void) fprintf(stderr, "unexpected flush_resp->op_context value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", (uintptr_t)flush_resp->op_context, (uintptr_t)FLUSH_ID); goto err_mr_remote_delete; } gpspm_flush_response__free_unpacked(flush_resp, NULL); /* * Translate the message so the next time the greeting will be surprising. */ translate(hello); #ifdef USE_PMEM if (mem.is_pmem) { mem.persist(hello, HELLO_T_SIZE); } else #endif /* USE_PMEM */ (void) printf("Translation: %s\n", hello->str); err_mr_remote_delete: /* delete the remote memory region's structure */ (void) rpma_mr_remote_delete(&dst_mr); err_mr_dereg: (void) rpma_mr_dereg(&msg_mr); (void) rpma_mr_dereg(&src_mr); err_conn_disconnect: (void) common_disconnect_and_wait_for_conn_close(&conn); err_cfg_delete: (void) rpma_conn_cfg_delete(&cfg); err_peer_delete: /* delete the peer */ (void) rpma_peer_delete(&peer); err_free: free(msg_ptr); #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) { free(mem.mr_ptr); } return ret; } rpma-1.3.0/examples/09-flush-to-persistent-GPSPM/server.c000066400000000000000000000216511443364775400230410ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * server.c -- a server of the flush-to-persistent-GPSPM example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #include "gpspm/flush-to-persistent-GPSPM.h" /* Generated by the protocol buffer compiler from: GPSPM_flush.proto */ #include "gpspm/GPSPM_flush.pb-c.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s []\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_local *mr = NULL; /* messaging resources */ void *msg_ptr = NULL; void *send_ptr = NULL; void *recv_ptr = NULL; struct rpma_mr_local *msg_mr = NULL; GPSPMFlushRequest *flush_req; GPSPMFlushResponse flush_resp = GPSPM_FLUSH_RESPONSE__INIT; size_t flush_resp_size = 0; #ifdef USE_PMEM char *pmem_path = NULL; if (argc >= 4) { pmem_path = argv[3]; ret = common_pmem_map_file_with_signature_check(pmem_path, HELLO_STR_SIZE, &mem, NULL); if (ret) goto err_free; } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_STR_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_STR_SIZE; mem.data_offset = 0; } /* allocate messaging buffer */ msg_ptr = malloc_aligned(HELLO_STR_SIZE); if (msg_ptr == NULL) { ret = -1; goto err_free; } send_ptr = (char *)msg_ptr + SEND_OFFSET; recv_ptr = (char *)msg_ptr + RECV_OFFSET; /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn_req *req = NULL; struct rpma_conn *conn = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; struct ibv_wc wc; /* if the string content is not empty */ if (((char *)mem.mr_ptr + mem.data_offset)[0] != '\0') { (void) printf("Old value: %s\n", (char *)mem.mr_ptr + mem.data_offset); } /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* register the memory */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_WRITE_DST | (mem.is_pmem ? RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT : RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY), &mr); if (ret) goto err_ep_shutdown; #if defined USE_PMEM && defined IBV_ADVISE_MR_FLAGS_SUPPORTED /* rpma_mr_advise() should be called only in case of FsDAX */ if (mem.is_pmem && strstr(pmem_path, "/dev/dax") == NULL) { ret = rpma_mr_advise(mr, 0, mem.mr_size, IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE, IBV_ADVISE_MR_FLAG_FLUSH); if (ret) goto err_mr_dereg; } #endif /* USE_PMEM && IBV_ADVISE_MR_FLAGS_SUPPORTED */ /* register the messaging memory */ ret = rpma_mr_reg(peer, msg_ptr, HELLO_STR_SIZE, RPMA_MR_USAGE_SEND | RPMA_MR_USAGE_RECV | RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY, &msg_mr); if (ret) { (void) rpma_mr_dereg(&mr); goto err_ep_shutdown; } /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; /* calculate data for the server read */ struct common_data data = {0}; data.data_offset = mem.data_offset; data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; struct rpma_conn_cfg *cfg = NULL; ret = rpma_conn_cfg_new(&cfg); if (ret) goto err_mr_dereg; ret = rpma_conn_cfg_set_rcq_size(cfg, RCQ_SIZE); if (ret) goto err_cfg_delete; /* * Wait for an incoming connection request, accept it and wait for its establishment. */ struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); /* receive an incoming connection request */ ret = rpma_ep_next_conn_req(ep, cfg, &req); if (ret) goto err_req_delete; /* prepare buffer for a flush request */ ret = rpma_conn_req_recv(req, msg_mr, RECV_OFFSET, MSG_SIZE_MAX, NULL); if (ret) goto err_req_delete; /* accept the connection request and obtain the connection object */ ret = rpma_conn_req_connect(&req, &pdata, &conn); if (ret) goto err_cfg_delete; /* wait for the connection to be established */ ret = rpma_conn_next_event(conn, &conn_event); if (!ret && conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); ret = -1; } if (ret) goto err_conn_delete; /* wait for the receive completion to be ready */ struct rpma_cq *rcq = NULL; ret = rpma_conn_get_rcq(conn, &rcq); if (ret) goto err_conn_delete; ret = rpma_cq_wait(rcq); if (ret) goto err_conn_delete; ret = rpma_cq_get_wc(rcq, 1, &wc, NULL); if (ret) goto err_conn_delete; /* validate the receive completion */ if (wc.status != IBV_WC_SUCCESS) { ret = -1; (void) fprintf(stderr, "rpma_recv() failed: %s\n", ibv_wc_status_str(wc.status)); goto err_conn_delete; } if (wc.opcode != IBV_WC_RECV) { ret = -1; (void) fprintf(stderr, "unexpected wc.opcode value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", (uintptr_t)wc.opcode, (uintptr_t)IBV_WC_RECV); goto err_conn_delete; } /* unpack a flush request from the received buffer */ flush_req = gpspm_flush_request__unpack(NULL, wc.byte_len, recv_ptr); if (flush_req == NULL) { fprintf(stderr, "Cannot unpack the flush request buffer\n"); goto err_conn_delete; } (void) printf("Flush request received: {offset: 0x%" PRIXPTR ", length: 0x%" PRIXPTR ", op_context: 0x%" PRIXPTR "}\n", flush_req->offset, flush_req->length, flush_req->op_context); #ifdef USE_PMEM if (mem.is_pmem) { void *op_ptr = (char *)mem.mr_ptr + flush_req->offset; mem.persist(op_ptr, flush_req->length); } #else (void) printf( "At this point, persist function should be called if persistent memory will be in use\n"); #endif /* USE_PMEM */ /* prepare a flush response and pack it to a send buffer */ flush_resp.op_context = flush_req->op_context; flush_resp_size = gpspm_flush_response__get_packed_size(&flush_resp); if (flush_resp_size > MSG_SIZE_MAX) { fprintf(stderr, "Size of the packed flush response is bigger than the available space of the send buffer (%" PRIu64 " > %u\n", flush_resp_size, MSG_SIZE_MAX); goto err_conn_delete; } (void) gpspm_flush_response__pack(&flush_resp, send_ptr); gpspm_flush_request__free_unpacked(flush_req, NULL); /* send the flush response */ ret = rpma_send(conn, msg_mr, SEND_OFFSET, flush_resp_size, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) goto err_conn_delete; /* wait for the send completion to be ready */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_delete; ret = rpma_cq_wait(cq); if (ret) goto err_conn_delete; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_conn_delete; /* validate the send completion */ if (wc.status != IBV_WC_SUCCESS) { ret = -1; (void) fprintf(stderr, "rpma_send() failed: %s\n", ibv_wc_status_str(wc.status)); goto err_conn_delete; } if (wc.opcode != IBV_WC_SEND) { ret = -1; (void) fprintf(stderr, "unexpected wc.opcode value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", (uintptr_t)wc.opcode, (uintptr_t)IBV_WC_SEND); goto err_conn_delete; } /* * Wait for RPMA_CONN_CLOSED, disconnect and delete the connection structure. */ ret = common_wait_for_conn_close_and_disconnect(&conn); if (ret) goto err_conn_delete; (void) printf("New value: %s\n", (char *)mem.mr_ptr + mem.data_offset); err_conn_delete: (void) rpma_conn_delete(&conn); goto err_cfg_delete; err_req_delete: (void) rpma_conn_req_delete(&req); err_cfg_delete: (void) rpma_conn_cfg_delete(&cfg); err_mr_dereg: (void) rpma_mr_dereg(&msg_mr); (void) rpma_mr_dereg(&mr); err_ep_shutdown: /* shutdown the endpoint */ (void) rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_free: free(msg_ptr); #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) free(mem.mr_ptr); return ret; } rpma-1.3.0/examples/09scch-flush-to-persistent-GPSPM/000077500000000000000000000000001443364775400222235ustar00rootroot00000000000000rpma-1.3.0/examples/09scch-flush-to-persistent-GPSPM/CMakeLists.txt000066400000000000000000000027211443364775400247650ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021, Fujitsu # cmake_minimum_required(VERSION 3.3) project(flush-to-persistent C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../cmake/common.cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # check if all required IBV_ADVISE_MR* flags are supported are_ibv_advise_flags_supported(IBV_ADVISE_MR_FLAGS_SUPPORTED) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(LIBPMEM_REQUIRED_VERSION 1.6) find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) pkg_check_modules(LIBPROTOBUFC libprotobuf-c) pkg_check_modules(LIBPMEM QUIET libpmem>=${LIBPMEM_REQUIRED_VERSION}) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() if(NOT LIBPROTOBUFC_FOUND) find_package(LIBPROTOBUFC REQUIRED libprotobuf-c) endif() if(NOT LIBPMEM_FOUND) find_package(LIBPMEM ${LIBPMEM_REQUIRED_VERSION} QUIET) endif() link_directories(${LIBRPMA_LIBRARY_DIRS}) add_example_with_pmem(NAME server USE_LIBPROTOBUFC SRCS server.c ../common/gpspm/GPSPM_flush.pb-c.c) add_example_with_pmem(NAME client USE_LIBPROTOBUFC SRCS client.c ../common/gpspm/GPSPM_flush.pb-c.c ../common/common-utils.c) rpma-1.3.0/examples/09scch-flush-to-persistent-GPSPM/README.md000066400000000000000000000050611443364775400235040ustar00rootroot00000000000000Example of performing an RPMA write and flushing it to persistence via GPSPM with shared completion channel === The write example implements two parts of the write process: - The server, if provided (and capable of), prepares a local persistent memory and exposes the memory description along with other parameters required to perform an RDMA write. After the connection is established the server waits for a flush request to execute `pmem_persist(3)` according to provided flush parameters. When it is done the server sends back a flush response and waits for the client to disconnect. - The client, if provided (and capable of), prepares a local persistent memory (including its contents) and registers it as a writing source. After the connection is established the client receives the server's memory regions registered as a writing destination. The client performs a write from the local memory region to a remote memory region and sends to the server a flush request and waits for a flush response which indicates the flush operation's completion. **Note**: In this example, we use the shared completion event channel for CQ and RCQ. The most interesting part of this example is the mechanism for collecting and processing completions. It is implemented in the wait_and_validate_completion() function. We use the rpma_conn_wait() function to collect completions. **Note**: This example does not require RPMA_FLUSH_TYPE_PERSISTENT support to provide remote PMem persistency. For the sake of simplicity, the client does not need to know server's peer configuration. For an example of peer's configuration exchange please see the 05 example. **Note**: The flush request and response are sent and received via the RPMA's messaging API (`rpma_send()` and `rpma_recv()`). Both types of messages are serialized and deserialized using the protobuf-c library. **Note**: If either server or client does not have a PMem path (or it is not capable of using PMem at all) it uses DRAM instead. **Note**: For the sake of this example, the memory region being written to is transferred via the connection's private data. In general, it can be transferred via an out-of-band or the in-band channel. ## Usage ```bash [user@server]$ ./server $server_address $port [] ``` ```bash [user@client]$ ./client $server_address $port [ []] ``` where `` can be: - a Device DAX (`/dev/dax0.0` for example) or - a file on File System DAX (`/mnt/pmem/file` for example) and `` is an offset inside the above mentioned PMem device where the user data begins from. rpma-1.3.0/examples/09scch-flush-to-persistent-GPSPM/client.c000066400000000000000000000164751443364775400236620ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * client.c -- a client of the flush-to-persistent-GPSPM example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "gpspm/flush-to-persistent-GPSPM.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #include "common-utils.h" /* Generated by the protocol buffer compiler from: GPSPM_flush.proto */ #include "gpspm/GPSPM_flush.pb-c.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s [ []]\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ #define FLUSH_ID (void *)0xF01D /* a random identifier */ int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_remote *dst_mr = NULL; size_t dst_size = 0; size_t dst_offset = 0; struct rpma_mr_local *src_mr = NULL; struct ibv_wc wc; /* messaging resources */ void *msg_ptr = NULL; void *send_ptr = NULL; void *recv_ptr = NULL; struct rpma_mr_local *msg_mr = NULL; GPSPMFlushRequest flush_req = GPSPM_FLUSH_REQUEST__INIT; size_t flush_req_size = 0; GPSPMFlushResponse *flush_resp = NULL; struct hello_t *hello = NULL; #ifdef USE_PMEM if (argc >= 4) { char *path = argv[3]; if (argc >= 5) mem.offset = strtoul_noerror(argv[4]); ret = common_pmem_map_file_with_signature_check(path, HELLO_T_SIZE, &mem, init_hello); if (ret) goto err_free; hello = (struct hello_t *)((uintptr_t)mem.mr_ptr + mem.data_offset); } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_T_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_T_SIZE; hello = (struct hello_t *)mem.mr_ptr; /* write an initial value */ write_hello_str(hello, en); } (void) printf("Next value: %s\n", hello->str); /* allocate messaging buffer */ msg_ptr = malloc_aligned(HELLO_STR_SIZE); if (msg_ptr == NULL) { ret = -1; goto err_free; } send_ptr = (char *)msg_ptr + SEND_OFFSET; recv_ptr = (char *)msg_ptr + RECV_OFFSET; /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_free; struct rpma_conn_cfg *cfg = NULL; ret = rpma_conn_cfg_new(&cfg); if (ret) goto err_peer_delete; ret = rpma_conn_cfg_set_rcq_size(cfg, RCQ_SIZE); if (ret) goto err_cfg_delete; ret = rpma_conn_cfg_set_compl_channel(cfg, true); if (ret) goto err_cfg_delete; /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, cfg, NULL, &conn); if (ret) goto err_cfg_delete; /* register the memory RDMA write */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_WRITE_SRC, &src_mr); if (ret) goto err_conn_disconnect; /* register the messaging memory */ ret = rpma_mr_reg(peer, msg_ptr, HELLO_STR_SIZE, RPMA_MR_USAGE_SEND | RPMA_MR_USAGE_RECV, &msg_mr); if (ret) { (void) rpma_mr_dereg(&src_mr); goto err_conn_disconnect; } /* obtain the remote side resources description */ struct rpma_conn_private_data pdata; ret = rpma_conn_get_private_data(conn, &pdata); if (ret != 0 || pdata.len < sizeof(struct common_data)) goto err_mr_dereg; /* * Create a remote memory registration structure from the received descriptor. */ struct common_data *dst_data = pdata.ptr; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &dst_mr); if (ret) goto err_mr_dereg; dst_offset = dst_data->data_offset; /* get the remote memory region size */ ret = rpma_mr_remote_get_size(dst_mr, &dst_size); if (ret) { goto err_mr_remote_delete; } else if (dst_size - dst_offset < HELLO_STR_SIZE) { fprintf(stderr, "Size of the remote memory region is too small for writing the data of the assumed size (%zu < %d)\n", dst_size - dst_offset, HELLO_STR_SIZE); goto err_mr_remote_delete; } ret = rpma_write(conn, dst_mr, dst_offset, src_mr, (mem.data_offset + HELLO_STR_OFFSET), HELLO_STR_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); if (ret) goto err_mr_remote_delete; /* prepare a response buffer */ ret = rpma_recv(conn, msg_mr, RECV_OFFSET, MSG_SIZE_MAX, NULL); if (ret) goto err_mr_remote_delete; /* prepare a flush message and pack it to a send buffer */ flush_req.offset = dst_offset; flush_req.length = HELLO_STR_SIZE; flush_req.op_context = (uint64_t)FLUSH_ID; flush_req_size = gpspm_flush_request__get_packed_size(&flush_req); if (flush_req_size > MSG_SIZE_MAX) { fprintf(stderr, "Packed flush request size is bigger than available send buffer space (%" PRIu64 " > %d\n", flush_req_size, MSG_SIZE_MAX); goto err_mr_remote_delete; } (void) gpspm_flush_request__pack(&flush_req, send_ptr); /* send the flush message */ ret = rpma_send(conn, msg_mr, SEND_OFFSET, flush_req_size, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) goto err_mr_remote_delete; /* wait for the send completion to be ready */ ret = wait_and_validate_completion(conn, IBV_WC_SEND, &wc); if (ret) goto err_mr_remote_delete; /* wait for the receive completion to be ready */ ret = wait_and_validate_completion(conn, IBV_WC_RECV, &wc); if (ret) goto err_mr_remote_delete; /* unpack a response from the received buffer */ flush_resp = gpspm_flush_response__unpack(NULL, wc.byte_len, recv_ptr); if (flush_resp == NULL) { fprintf(stderr, "Cannot unpack the flush response buffer\n"); goto err_mr_remote_delete; } if (flush_resp->op_context != (uint64_t)FLUSH_ID) { (void) fprintf(stderr, "unexpected flush_resp->op_context value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", (uintptr_t)flush_resp->op_context, (uintptr_t)FLUSH_ID); goto err_mr_remote_delete; } gpspm_flush_response__free_unpacked(flush_resp, NULL); /* * Translate the message so the next time the greeting will be surprising. */ translate(hello); #ifdef USE_PMEM if (mem.is_pmem) { mem.persist(hello, HELLO_T_SIZE); } else #endif /* USE_PMEM */ (void) printf("Translation: %s\n", hello->str); err_mr_remote_delete: /* delete the remote memory region's structure */ (void) rpma_mr_remote_delete(&dst_mr); err_mr_dereg: (void) rpma_mr_dereg(&msg_mr); (void) rpma_mr_dereg(&src_mr); err_conn_disconnect: (void) common_disconnect_and_wait_for_conn_close(&conn); err_cfg_delete: (void) rpma_conn_cfg_delete(&cfg); err_peer_delete: /* delete the peer */ (void) rpma_peer_delete(&peer); err_free: free(msg_ptr); #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) free(mem.mr_ptr); return ret; } rpma-1.3.0/examples/09scch-flush-to-persistent-GPSPM/server.c000066400000000000000000000175661443364775400237140ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * server.c -- a server of the flush-to-persistent-GPSPM example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "common-hello.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" #include "gpspm/flush-to-persistent-GPSPM.h" /* Generated by the protocol buffer compiler from: GPSPM_flush.proto */ #include "gpspm/GPSPM_flush.pb-c.h" #ifdef USE_PMEM #define USAGE_STR "usage: %s []\n"PMEM_USAGE #else #define USAGE_STR "usage: %s \n" #endif /* USE_PMEM */ int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* resources - memory region */ struct common_mem mem; memset(&mem, 0, sizeof(mem)); struct rpma_mr_local *mr = NULL; /* messaging resources */ void *msg_ptr = NULL; void *send_ptr = NULL; void *recv_ptr = NULL; struct rpma_mr_local *msg_mr = NULL; GPSPMFlushRequest *flush_req; GPSPMFlushResponse flush_resp = GPSPM_FLUSH_RESPONSE__INIT; size_t flush_resp_size = 0; #ifdef USE_PMEM char *pmem_path = NULL; if (argc >= 4) { pmem_path = argv[3]; ret = common_pmem_map_file_with_signature_check(pmem_path, HELLO_STR_SIZE, &mem, NULL); if (ret) goto err_free; } #endif /* USE_PMEM */ /* if no pmem support or it is not provided */ if (mem.mr_ptr == NULL) { (void) fprintf(stderr, NO_PMEM_MSG); mem.mr_ptr = malloc_aligned(HELLO_STR_SIZE); if (mem.mr_ptr == NULL) return -1; mem.mr_size = HELLO_STR_SIZE; mem.data_offset = 0; } /* allocate messaging buffer */ msg_ptr = malloc_aligned(HELLO_STR_SIZE); if (msg_ptr == NULL) { ret = -1; goto err_free; } send_ptr = (char *)msg_ptr + SEND_OFFSET; recv_ptr = (char *)msg_ptr + RECV_OFFSET; /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn_req *req = NULL; struct rpma_conn *conn = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; struct ibv_wc wc; /* if the string content is not empty */ if (((char *)mem.mr_ptr + mem.data_offset)[0] != '\0') { (void) printf("Old value: %s\n", (char *)mem.mr_ptr + mem.data_offset); } /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* register the memory */ ret = rpma_mr_reg(peer, mem.mr_ptr, mem.mr_size, RPMA_MR_USAGE_WRITE_DST | (mem.is_pmem ? RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT : RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY), &mr); if (ret) goto err_ep_shutdown; #if defined USE_PMEM && defined IBV_ADVISE_MR_FLAGS_SUPPORTED /* rpma_mr_advise() should be called only in case of FsDAX */ if (mem.is_pmem && strstr(pmem_path, "/dev/dax") == NULL) { ret = rpma_mr_advise(mr, 0, mem.mr_size, IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE, IBV_ADVISE_MR_FLAG_FLUSH); if (ret) goto err_mr_dereg; } #endif /* USE_PMEM && IBV_ADVISE_MR_FLAGS_SUPPORTED */ /* register the messaging memory */ ret = rpma_mr_reg(peer, msg_ptr, HELLO_STR_SIZE, RPMA_MR_USAGE_SEND | RPMA_MR_USAGE_RECV | RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY, &msg_mr); if (ret) { (void) rpma_mr_dereg(&mr); goto err_ep_shutdown; } /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; /* calculate data for the server read */ struct common_data data = {0}; data.data_offset = mem.data_offset; data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; struct rpma_conn_cfg *cfg = NULL; ret = rpma_conn_cfg_new(&cfg); if (ret) goto err_mr_dereg; ret = rpma_conn_cfg_set_rcq_size(cfg, RCQ_SIZE); if (ret) goto err_cfg_delete; ret = rpma_conn_cfg_set_compl_channel(cfg, true); if (ret) goto err_cfg_delete; /* * Wait for an incoming connection request, accept it and wait for its establishment. */ struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); /* receive an incoming connection request */ ret = rpma_ep_next_conn_req(ep, cfg, &req); if (ret) goto err_req_delete; /* prepare buffer for a flush request */ ret = rpma_conn_req_recv(req, msg_mr, RECV_OFFSET, MSG_SIZE_MAX, NULL); if (ret) goto err_req_delete; /* accept the connection request and obtain the connection object */ ret = rpma_conn_req_connect(&req, &pdata, &conn); if (ret) goto err_cfg_delete; /* wait for the connection to be established */ ret = rpma_conn_next_event(conn, &conn_event); if (!ret && conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); ret = -1; } if (ret) goto err_conn_delete; /* wait for the receive completion to be ready */ ret = wait_and_validate_completion(conn, IBV_WC_RECV, &wc); if (ret) goto err_conn_delete; /* unpack a flush request from the received buffer */ flush_req = gpspm_flush_request__unpack(NULL, wc.byte_len, recv_ptr); if (flush_req == NULL) { fprintf(stderr, "Cannot unpack the flush request buffer\n"); goto err_conn_delete; } (void) printf("Flush request received: {offset: 0x%" PRIXPTR ", length: 0x%" PRIXPTR ", op_context: 0x%" PRIXPTR "}\n", flush_req->offset, flush_req->length, flush_req->op_context); #ifdef USE_PMEM if (mem.is_pmem) { void *op_ptr = (char *)mem.mr_ptr + flush_req->offset; mem.persist(op_ptr, flush_req->length); } #else (void) printf( "At this point, persistent function should be called if persistent memory will be in use\n"); #endif /* USE_PMEM */ /* prepare a flush response and pack it to a send buffer */ flush_resp.op_context = flush_req->op_context; flush_resp_size = gpspm_flush_response__get_packed_size(&flush_resp); if (flush_resp_size > MSG_SIZE_MAX) { fprintf(stderr, "Size of the packed flush response is bigger than the available space of the send buffer (%" PRIu64 " > %u\n", flush_resp_size, MSG_SIZE_MAX); goto err_conn_delete; } (void) gpspm_flush_response__pack(&flush_resp, send_ptr); gpspm_flush_request__free_unpacked(flush_req, NULL); /* send the flush response */ ret = rpma_send(conn, msg_mr, SEND_OFFSET, flush_resp_size, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) goto err_conn_delete; /* wait for the send completion to be ready */ ret = wait_and_validate_completion(conn, IBV_WC_SEND, &wc); if (ret) goto err_conn_delete; /* * Wait for RPMA_CONN_CLOSED, disconnect and delete the connection structure. */ ret = common_wait_for_conn_close_and_disconnect(&conn); if (ret) goto err_conn_delete; (void) printf("New value: %s\n", (char *)mem.mr_ptr + mem.data_offset); err_conn_delete: (void) rpma_conn_delete(&conn); goto err_cfg_delete; err_req_delete: (void) rpma_conn_req_delete(&req); err_cfg_delete: (void) rpma_conn_cfg_delete(&cfg); err_mr_dereg: (void) rpma_mr_dereg(&msg_mr); (void) rpma_mr_dereg(&mr); err_ep_shutdown: /* shutdown the endpoint */ (void) rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_free: free(msg_ptr); #ifdef USE_PMEM if (mem.is_pmem) { common_pmem_unmap_file(&mem); } else #endif /* USE_PMEM */ if (mem.mr_ptr != NULL) free(mem.mr_ptr); return ret; } rpma-1.3.0/examples/10-send-with-imm/000077500000000000000000000000001443364775400172115ustar00rootroot00000000000000rpma-1.3.0/examples/10-send-with-imm/CMakeLists.txt000066400000000000000000000022321443364775400217500ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2020 Fujitsu # Copyright 2021, Intel Corporation # cmake_minimum_required(VERSION 3.3) project(send-with-imm C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() link_directories(${LIBRPMA_LIBRARY_DIRS} ${LIBIBVERBS_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ${LIBIBVERBS_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) endfunction() add_example(server server.c ../common/common-conn.c) add_example(client client.c ../common/common-conn.c) rpma-1.3.0/examples/10-send-with-imm/README.md000066400000000000000000000012141443364775400204660ustar00rootroot00000000000000Example of rpma send with immediate data === The rpma send with immediate data implements two parts of the process: - The client connects to the server and sends a message with immediate data to the server. - The server receives a message with immediate data from client. The immediate data is compared with the expected immediate data sent by the client as the private data during establishing the connection. **Note**: The client sends 0B message with immediate data if the 'word' argument is not specified. ## Usage ```bash [user@server]$ ./server $server_address $port ``` ```bash [user@client]$ ./client $server_address $port $imm [word] ``` rpma-1.3.0/examples/10-send-with-imm/client.c000066400000000000000000000070041443364775400206340ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2020-2021 Fujitsu */ /* Copyright 2021-2022, Intel Corporation */ /* * client.c -- a client of the send-with-imm example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include "common-conn.h" #define USAGE_STR "usage: %s [word]\n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 4) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; char *word = NULL; uint64_t imm = strtoul(argv[3], NULL, 10); if (imm == ULONG_MAX && errno == ERANGE) { fprintf(stderr, "strtoul() overflow\n"); return -1; } if (imm > UINT32_MAX) { fprintf(stderr, "The provided immediate data is too big (%lu > %u)\n", imm, UINT32_MAX); return -1; } if (argc >= 5) word = argv[4]; /* RPMA resources - general */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; struct rpma_mr_local *send_mr = NULL; struct ibv_wc wc; uint64_t len = 0; char *send = NULL; int ret; if (word) { /* prepare memory */ len = KILOBYTE; send = malloc_aligned(len); if (!send) return -1; } /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_mr_free; if (word) { /* register the memory */ ret = rpma_mr_reg(peer, send, KILOBYTE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) goto err_peer_delete; } /* * establish a new connection to a server and send an immediate data for validation * on the server side */ struct rpma_conn_private_data pdata; pdata.ptr = (uint32_t *)&imm; pdata.len = sizeof(uint32_t); ret = client_connect(peer, addr, port, NULL, &pdata, &conn); if (ret) goto err_mr_dereg; if (word) { /* send a message with immediate data to the server */ fprintf(stdout, "Sending value '%s' with immediate data '%u'\n", word, (uint32_t)imm); strncpy(send, word, KILOBYTE - 1); } else { /* send a 0B message with immediate data to the server */ fprintf(stdout, "Sending immediate data '%u'\n", (uint32_t)imm); } ret = rpma_send_with_imm(conn, send_mr, 0, len, RPMA_F_COMPLETION_ALWAYS, (uint32_t)imm, NULL); if (ret) goto err_conn_disconnect; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_disconnect; /* prepare completions, get one and validate it */ ret = rpma_cq_wait(cq); if (ret) goto err_conn_disconnect; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_conn_disconnect; if (wc.status != IBV_WC_SUCCESS) { fprintf(stderr, "Received unexpected completion: %s\n", ibv_wc_status_str(wc.status)); ret = -1; goto err_conn_disconnect; } if (wc.opcode != IBV_WC_SEND) { fprintf(stderr, "Received unexpected type of operation (%d != %d)\n", wc.opcode, IBV_WC_SEND); ret = -1; } err_conn_disconnect: (void) common_disconnect_and_wait_for_conn_close(&conn); err_mr_dereg: /* deregister the memory regions */ (void) rpma_mr_dereg(&send_mr); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_mr_free: /* free the memory */ free(send); return ret; } rpma-1.3.0/examples/10-send-with-imm/server.c000066400000000000000000000104431443364775400206650ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2020-2021 Fujitsu */ /* Copyright 2021-2022, Intel Corporation */ /* * server.c -- a server of the send-with-imm example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include "common-conn.h" #define USAGE_STR "usage: %s \n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; /* prepare memory */ char *recv = malloc_aligned(KILOBYTE); if (!recv) return -1; /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_mr_local *recv_mr = NULL; struct rpma_ep *ep = NULL; struct rpma_conn_req *req = NULL; struct rpma_conn *conn = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; struct ibv_wc wc; int ret; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_mr_free; /* register the memory */ ret = rpma_mr_reg(peer, recv, KILOBYTE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_peer_delete; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_mr_dereg; /* receive an incoming connection request */ ret = rpma_ep_next_conn_req(ep, NULL, &req); if (ret) goto err_ep_shutdown; /* prepare to receive a message with immediate data from the client */ ret = rpma_conn_req_recv(req, recv_mr, 0, KILOBYTE, NULL); if (ret) { rpma_conn_req_delete(&req); goto err_ep_shutdown; } /* accept the connection request and obtain the connection object */ ret = rpma_conn_req_connect(&req, NULL, &conn); if (ret) goto err_ep_shutdown; /* wait for the connection to be established */ ret = rpma_conn_next_event(conn, &conn_event); if (ret) goto err_conn_disconnect; if (conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event() returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); ret = -1; goto err_conn_disconnect; } /* get the expected immediate data from the connection's private data */ struct rpma_conn_private_data pdata; ret = rpma_conn_get_private_data(conn, &pdata); if (ret) goto err_conn_disconnect; if (pdata.len < sizeof(uint32_t)) { fprintf(stderr, "Received connection's private data is too small (%u < %zu)\n", pdata.len, sizeof(uint32_t)); ret = -1; goto err_conn_disconnect; } uint32_t *exp_imm = pdata.ptr; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_disconnect; /* prepare completions, get one and validate it */ ret = rpma_cq_wait(cq); if (ret) goto err_conn_disconnect; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_conn_disconnect; if (wc.status != IBV_WC_SUCCESS) { fprintf(stderr, "Received unexpected completion: %s\n", ibv_wc_status_str(wc.status)); ret = -1; goto err_conn_disconnect; } if (wc.opcode != IBV_WC_RECV) { fprintf(stderr, "Received unexpected type of operation (%d != %d)\n", wc.opcode, IBV_WC_RECV); ret = -1; goto err_conn_disconnect; } if (!(wc.wc_flags & IBV_WC_WITH_IMM)) { fprintf(stderr, "Received unexpected completion flag (no IBV_WC_WITH_IMM)\n"); ret = -1; goto err_conn_disconnect; } uint32_t imm = ntohl(wc.imm_data); if (imm != *exp_imm) { fprintf(stderr, "Received unexpected immediate data (%u != %u)\n", imm, *exp_imm); ret = -1; } else { if (wc.byte_len == 0) recv[0] = '\0'; printf("Received value '%s' with immediate data '%u'\n", recv, imm); } err_conn_disconnect: (void) common_wait_for_conn_close_and_disconnect(&conn); err_ep_shutdown: (void) rpma_ep_shutdown(&ep); err_mr_dereg: /* deregister the memory regions */ (void) rpma_mr_dereg(&recv_mr); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_mr_free: /* free the memory */ free(recv); return ret; } rpma-1.3.0/examples/11-write-with-imm/000077500000000000000000000000001443364775400174135ustar00rootroot00000000000000rpma-1.3.0/examples/11-write-with-imm/CMakeLists.txt000066400000000000000000000021671443364775400221610ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2021 Fujitsu # cmake_minimum_required(VERSION 3.3) project(write-with-imm C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() link_directories(${LIBRPMA_LIBRARY_DIRS} ${LIBIBVERBS_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ${LIBIBVERBS_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) endfunction() add_example(server server.c ../common/common-conn.c) add_example(client client.c ../common/common-conn.c) rpma-1.3.0/examples/11-write-with-imm/README.md000066400000000000000000000010311443364775400206650ustar00rootroot00000000000000Example of rpma write with immediate data === The rpma write with immediate data implements two parts of the process: - The client connects to the server and writes a message with immediate data to the server. - The server receives an immediate data from the client. The immediate data has the same value as the message written by the client so the immediate data is compared with the expected message. ## Usage ```bash [user@server]$ ./server $server_address $port ``` ```bash [user@client]$ ./client $server_address $port $imm ``` rpma-1.3.0/examples/11-write-with-imm/client.c000066400000000000000000000074721443364775400210470ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2021 Fujitsu */ /* Copyright 2021-2022, Intel Corporation */ /* * client.c -- a client of the write-with-imm example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include "common-conn.h" #define USAGE_STR "usage: %s \n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 4) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; uint64_t imm = strtoul(argv[3], NULL, 10); if (imm == ULONG_MAX && errno == ERANGE) { fprintf(stderr, "strtoul() overflow\n"); return -1; } if (imm > UINT32_MAX) { fprintf(stderr, "The provided immediate data is too big (%lu > %u)\n", imm, UINT32_MAX); return -1; } /* RPMA resources - general */ struct rpma_peer *peer = NULL; struct rpma_mr_local *src_mr = NULL; struct rpma_mr_remote *dst_mr = NULL; struct rpma_conn *conn = NULL; struct ibv_wc wc; int ret; /* prepare memory */ uint32_t *src = malloc_aligned(KILOBYTE); if (!src) return -1; /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_mr_free; /* register the memory */ ret = rpma_mr_reg(peer, src, KILOBYTE, RPMA_MR_USAGE_WRITE_SRC, &src_mr); if (ret) goto err_peer_delete; /* establish a new connection to a server */ ret = client_connect(peer, addr, port, NULL, NULL, &conn); if (ret) goto err_mr_dereg; /* obtain the remote memory description */ struct rpma_conn_private_data pdata; ret = rpma_conn_get_private_data(conn, &pdata); if (ret) goto err_conn_disconnect; if (pdata.len < sizeof(struct common_data)) { fprintf(stderr, "Received connection's private data is too small (%u < %zu)\n", pdata.len, sizeof(struct common_data)); ret = -1; goto err_conn_disconnect; } struct common_data *dst_data = pdata.ptr; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &dst_mr); if (ret) goto err_conn_disconnect; /* write a message with immediate data to the server */ memcpy(src, (uint32_t *)&imm, sizeof(uint32_t)); fprintf(stdout, "Writing value '%u' with immediate data '%u'\n", *src, (uint32_t)imm); ret = rpma_write_with_imm(conn, dst_mr, dst_data->data_offset, src_mr, 0, KILOBYTE, RPMA_F_COMPLETION_ALWAYS, (uint32_t)imm, NULL); if (ret) goto err_mr_remote_delete; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_mr_remote_delete; /* prepare completions, get one and validate it */ ret = rpma_cq_wait(cq); if (ret) goto err_mr_remote_delete; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_mr_remote_delete; if (wc.status != IBV_WC_SUCCESS) { fprintf(stderr, "Received unexpected completion: %s\n", ibv_wc_status_str(wc.status)); ret = -1; goto err_mr_remote_delete; } if (wc.opcode != IBV_WC_RDMA_WRITE) { fprintf(stderr, "Received unexpected type of operation (%d != %d)\n", wc.opcode, IBV_WC_RDMA_WRITE); ret = -1; } err_mr_remote_delete: /* delete the remote memory region's structure */ (void) rpma_mr_remote_delete(&dst_mr); err_conn_disconnect: (void) common_disconnect_and_wait_for_conn_close(&conn); err_mr_dereg: /* deregister the memory regions */ (void) rpma_mr_dereg(&src_mr); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_mr_free: /* free the memory */ free(src); return ret; } rpma-1.3.0/examples/11-write-with-imm/server.c000066400000000000000000000113221443364775400210640ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2021 Fujitsu */ /* Copyright 2021-2022, Intel Corporation */ /* * server.c -- a server of the write-with-imm example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include "common-conn.h" #define USAGE_STR "usage: %s \n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; /* prepare memory */ uint32_t *dst = malloc_aligned(KILOBYTE); if (!dst) return -1; uint32_t *recv = malloc_aligned(1); if (!recv) { free(dst); return -1; } /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_mr_local *dst_mr = NULL; struct rpma_mr_local *recv_mr = NULL; struct rpma_ep *ep = NULL; struct rpma_conn_req *req = NULL; struct rpma_conn *conn = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; struct ibv_wc wc; int ret; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_mr_free; /* register the memory */ ret = rpma_mr_reg(peer, dst, KILOBYTE, RPMA_MR_USAGE_WRITE_DST, &dst_mr); if (ret) goto err_peer_delete; ret = rpma_mr_reg(peer, recv, 1, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) { rpma_mr_dereg(&dst_mr); goto err_peer_delete; } /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(dst_mr, &mr_desc_size); if (ret) goto err_mr_dereg; /* calculate data for the client write */ struct common_data dst_data = {0}; dst_data.data_offset = 0; dst_data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(dst_mr, &dst_data.descriptors[0]); if (ret) goto err_mr_dereg; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_mr_dereg; /* receive an incoming connection request */ ret = rpma_ep_next_conn_req(ep, NULL, &req); if (ret) goto err_ep_shutdown; /* prepare to receive an immediate data from the client */ ret = rpma_conn_req_recv(req, recv_mr, 0, 1, NULL); if (ret) { rpma_conn_req_delete(&req); goto err_ep_shutdown; } /* accept the connection request and obtain the connection object */ struct rpma_conn_private_data pdata; pdata.ptr = &dst_data; pdata.len = sizeof(struct common_data); ret = rpma_conn_req_connect(&req, &pdata, &conn); if (ret) goto err_ep_shutdown; /* wait for the connection to be established */ ret = rpma_conn_next_event(conn, &conn_event); if (ret) goto err_conn_disconnect; if (conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); ret = -1; goto err_conn_disconnect; } /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_disconnect; /* prepare completions, get one and validate it */ ret = rpma_cq_wait(cq); if (ret) goto err_conn_disconnect; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) goto err_conn_disconnect; if (wc.status != IBV_WC_SUCCESS) { fprintf(stderr, "Received unexpected completion: %s\n", ibv_wc_status_str(wc.status)); ret = -1; goto err_conn_disconnect; } if (wc.opcode != IBV_WC_RECV_RDMA_WITH_IMM) { fprintf(stderr, "Received unexpected type of operation (%d != %d)\n", wc.opcode, IBV_WC_RECV_RDMA_WITH_IMM); ret = -1; goto err_conn_disconnect; } if (!(wc.wc_flags & IBV_WC_WITH_IMM)) { fprintf(stderr, "Received an unexpected completion flag (no IBV_WC_WITH_IMM)\n"); ret = -1; goto err_conn_disconnect; } uint32_t *exp_imm = dst; uint32_t imm = ntohl(wc.imm_data); if (imm != *exp_imm) { fprintf(stderr, "Received unexpected immediate data (%u != %u)\n", imm, *exp_imm); ret = -1; } else { printf("The value '%u' was written together with immediate data '%u'\n", *exp_imm, imm); } err_conn_disconnect: (void) common_wait_for_conn_close_and_disconnect(&conn); err_ep_shutdown: (void) rpma_ep_shutdown(&ep); err_mr_dereg: /* deregister the memory regions */ (void) rpma_mr_dereg(&recv_mr); (void) rpma_mr_dereg(&dst_mr); err_peer_delete: /* delete the peer object */ (void) rpma_peer_delete(&peer); err_mr_free: /* free the memory */ free(dst); free(recv); return ret; } rpma-1.3.0/examples/12-receive-completion-queue/000077500000000000000000000000001443364775400214445ustar00rootroot00000000000000rpma-1.3.0/examples/12-receive-completion-queue/CMakeLists.txt000066400000000000000000000023451443364775400242100ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Intel Corporation # cmake_minimum_required(VERSION 3.3) project(receive-completion-queue C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() link_directories(${LIBRPMA_LIBRARY_DIRS} ${LIBIBVERBS_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ${LIBIBVERBS_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) endfunction() add_example(server server.c ../common/common-conn.c receive-completion-queue-common.c) add_example(client client.c ../common/common-conn.c receive-completion-queue-common.c ../common/common-utils.c) rpma-1.3.0/examples/12-receive-completion-queue/README.md000066400000000000000000000020251443364775400227220ustar00rootroot00000000000000Example of using receive completion queue (RCQ) === The RCQ example implements two parts of the messaging process: - The server starts a listening endpoint and waits for an incoming connection. When a new connection request appears it is accepted. The client sends to the server its current counter value. When the server receives the message from the client, its content is incremented and send back to the client. When the server receives the I_M_DONE message it waits for disconnection. - The client connects to the server and sends to it its current counter value. When the client gets the new value from the server it repeats the process for the configured number of rounds. When it is quitting, it send the I_M_DONE message and disconnects. **Note**: In this example, we use two separate completion queues: - CQ for completions of sends and - RCQ for completions of receives. ## Usage ```bash [user@server]$ ./server $server_address $port ``` ```bash [user@client]$ ./client $server_address $port $start_value $rounds [] ``` rpma-1.3.0/examples/12-receive-completion-queue/client.c000066400000000000000000000075531443364775400231000ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * client.c -- a client of the receive completion queue example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "receive-completion-queue-common.h" #include "common-utils.h" #define USAGE_STR "usage: %s []\n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 5) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; uint64_t counter = strtoul_noerror(argv[3]); uint64_t rounds = strtoul_noerror(argv[4]); uint64_t sleep_usec = 0; if (argc >= 6) sleep_usec = strtoul_noerror(argv[5]); int ret; /* RPMA resources - general */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; uint64_t *recv = malloc_aligned(MSG_SIZE); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(MSG_SIZE); if (send == NULL) { free(recv); return -1; } /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_mr_free; /* register the memory */ ret = rpma_mr_reg(peer, recv, MSG_SIZE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_peer_delete; ret = rpma_mr_reg(peer, send, MSG_SIZE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) { (void) rpma_mr_dereg(&recv_mr); goto err_peer_delete; } /* create a new connection configuration and set RCQ size */ struct rpma_conn_cfg *cfg = NULL; ret = rpma_conn_cfg_new(&cfg); if (ret) goto err_mr_dereg; ret = rpma_conn_cfg_set_rcq_size(cfg, RCQ_SIZE); if (ret) goto err_cfg_delete; /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, cfg, NULL, &conn); if (ret) goto err_cfg_delete; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_disconnect; /* get the connection's RCQ */ struct rpma_cq *rcq = NULL; ret = rpma_conn_get_rcq(conn, &rcq); if (ret) goto err_conn_disconnect; while (--rounds) { /* prepare a receive for the server's response */ ret = rpma_recv(conn, recv_mr, 0, MSG_SIZE, recv); if (ret) break; /* send a message to the server */ (void) printf("CLIENT: Value sent: %" PRIu64 "\n", counter); *send = counter; ret = rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) break; /* get one send completion and validate it */ ret = get_wc_and_validate(cq, IBV_WC_SEND, "rpma_send()"); if (ret) break; /* get one receive completion and validate it */ ret = get_wc_and_validate(rcq, IBV_WC_RECV, "rpma_recv()"); if (ret) break; /* copy the new value of the counter and print it out */ counter = *recv; printf("CLIENT: Value received: %" PRIu64 "\n", counter); /* sleep if required */ if (sleep_usec > 0) (void) usleep(sleep_usec); } /* send the I_M_DONE message */ *send = I_M_DONE; ret |= rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); err_conn_disconnect: ret |= common_disconnect_and_wait_for_conn_close(&conn); err_cfg_delete: ret |= rpma_conn_cfg_delete(&cfg); err_mr_dereg: /* deregister the memory regions */ ret |= rpma_mr_dereg(&send_mr); ret |= rpma_mr_dereg(&recv_mr); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_mr_free: /* free the memory */ free(send); free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/12-receive-completion-queue/receive-completion-queue-common.c000066400000000000000000000015051443364775400300120ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * receive-completion-queue-common.c -- a common declarations for the 12 example */ #include #include #include "receive-completion-queue-common.h" int get_wc_and_validate(struct rpma_cq *cq, enum ibv_wc_opcode opcode, char *func_name) { struct ibv_wc wc; int ret = rpma_cq_wait(cq); if (ret) return ret; ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) return ret; if (wc.status != IBV_WC_SUCCESS) { (void) fprintf(stderr, "%s failed: %s\n", func_name, ibv_wc_status_str(wc.status)); return -1; } if (wc.opcode != opcode) { (void) fprintf(stderr, "unexpected wc.opcode value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", (uintptr_t)wc.opcode, (uintptr_t)opcode); return -1; } return 0; } rpma-1.3.0/examples/12-receive-completion-queue/receive-completion-queue-common.h000066400000000000000000000011541443364775400300170ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * receive-completion-queue-common.h -- a common declarations for the 12 example */ #ifndef EXAMPLES_RECEIVE_COMPLETION_QUEUE_COMMON #define EXAMPLES_RECEIVE_COMPLETION_QUEUE_COMMON #define MSG_SIZE sizeof(uint64_t) /* Both buffers are allocated one after another. */ #define RECV_OFFSET 0 #define SEND_OFFSET MSG_SIZE #define RCQ_SIZE 10 #define I_M_DONE (uint64_t)UINT64_MAX int get_wc_and_validate(struct rpma_cq *cq, enum ibv_wc_opcode opcode, char *func_name); #endif /* EXAMPLES_RECEIVE_COMPLETION_QUEUE_COMMON */ rpma-1.3.0/examples/12-receive-completion-queue/server.c000066400000000000000000000110261443364775400231160ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * server.c -- a server of the receive-completion-queue example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #define USAGE_STR "usage: %s \n" #include "common-conn.h" #include "receive-completion-queue-common.h" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; uint64_t *recv = malloc_aligned(MSG_SIZE); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(MSG_SIZE); if (send == NULL) { free(recv); return -1; } /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn_req *req = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; struct rpma_conn *conn = NULL; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* register the memory */ ret = rpma_mr_reg(peer, recv, MSG_SIZE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_ep_shutdown; ret = rpma_mr_reg(peer, send, MSG_SIZE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) { (void) rpma_mr_dereg(&recv_mr); goto err_ep_shutdown; } /* create a new connection configuration and set RCQ size */ struct rpma_conn_cfg *cfg = NULL; ret = rpma_conn_cfg_new(&cfg); if (ret) goto err_mr_dereg; ret = rpma_conn_cfg_set_rcq_size(cfg, RCQ_SIZE); if (ret) goto err_cfg_delete; /* receive an incoming connection request */ ret = rpma_ep_next_conn_req(ep, cfg, &req); if (ret) goto err_cfg_delete; /* * Post an initial receive to be prepared for the first message of the client's ping-pong. */ ret = rpma_conn_req_recv(req, recv_mr, 0, MSG_SIZE, recv); if (ret) { (void) rpma_conn_req_delete(&req); goto err_cfg_delete; } /* accept the connection request and obtain the connection object */ ret = rpma_conn_req_connect(&req, NULL, &conn); if (ret) goto err_cfg_delete; /* wait for the connection to be established */ ret = rpma_conn_next_event(conn, &conn_event); if (ret) goto err_conn_disconnect; if (conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); goto err_conn_disconnect; } /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_disconnect; /* get the connection's RCQ */ struct rpma_cq *rcq = NULL; ret = rpma_conn_get_rcq(conn, &rcq); if (ret) goto err_conn_disconnect; while (1) { /* get one receive completion and validate it */ ret = get_wc_and_validate(rcq, IBV_WC_RECV, "rpma_recv()"); if (ret) break; if (*recv == I_M_DONE) break; /* print the received old value of the client's counter */ (void) printf("SERVER: Value received: %" PRIu64 "\n", *recv); /* calculate a new counter's value */ *send = *recv + 1; /* prepare a receive for the client's response */ ret = rpma_recv(conn, recv_mr, 0, MSG_SIZE, recv); if (ret) break; /* send the new value to the client */ (void) printf("SERVER: Value sent: %" PRIu64 "\n", *send); /* * XXX when using RPMA_F_COMPLETION_ON_ERROR * after few rounds rpma_send() returns ENOMEM. */ ret = rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) break; /* get one send completion and validate it */ ret = get_wc_and_validate(cq, IBV_WC_SEND, "rpma_send()"); if (ret) break; } err_conn_disconnect: ret |= common_disconnect_and_wait_for_conn_close(&conn); err_cfg_delete: ret |= rpma_conn_cfg_delete(&cfg); err_mr_dereg: /* deregister the memory regions */ ret |= rpma_mr_dereg(&send_mr); ret |= rpma_mr_dereg(&recv_mr); err_ep_shutdown: ret |= rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_free: free(send); free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/12scch-receive-completion-queue/000077500000000000000000000000001443364775400223055ustar00rootroot00000000000000rpma-1.3.0/examples/12scch-receive-completion-queue/CMakeLists.txt000066400000000000000000000022411443364775400250440ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Intel Corporation # cmake_minimum_required(VERSION 3.3) project(receive-completion-queue C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() link_directories(${LIBRPMA_LIBRARY_DIRS} ${LIBIBVERBS_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ${LIBIBVERBS_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) endfunction() add_example(server server.c ../common/common-conn.c) add_example(client client.c ../common/common-conn.c ../common/common-utils.c) rpma-1.3.0/examples/12scch-receive-completion-queue/README.md000066400000000000000000000023741443364775400235720ustar00rootroot00000000000000Example of using receive completion queue (RCQ) with shared completion channel === The RCQ example implements two parts of the messaging process: - The server starts a listening endpoint and waits for an incoming connection. When a new connection request appears it is accepted. The client sends to the server its current counter value. When the server receives the message from the client, its content is incremented and send back to the client. When the server receives the I_M_DONE message it waits for disconnection. - The client connects to the server and sends to it its current counter value. When the client gets the new value from the server it repeats the process for the configured number of rounds. When it is quitting, it send the I_M_DONE message and disconnects. **Note**: In this example, we use the shared completion event channel for CQ and RCQ: - CQ for completions of sends and - RCQ for completions of receives. The mechanism for collecting and processing completions is implemented in the wait_and_validate_completion() function. We use the rpma_conn_wait() function to collect completions. ## Usage ```bash [user@server]$ ./server $server_address $port ``` ```bash [user@client]$ ./client $server_address $port $start_value $rounds [] ``` rpma-1.3.0/examples/12scch-receive-completion-queue/client.c000066400000000000000000000072521443364775400237350ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * client.c -- a client of the receive completion queue example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "common-utils.h" #include "receive-completion-queue-common.h" #define USAGE_STR "usage: %s []\n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 5) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; uint64_t counter = strtoul_noerror(argv[3]); uint64_t rounds = strtoul_noerror(argv[4]); uint64_t sleep_usec = 0; if (argc >= 6) sleep_usec = strtoul_noerror(argv[5]); int ret; /* RPMA resources - general */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; struct ibv_wc wc; /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; uint64_t *recv = malloc_aligned(MSG_SIZE); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(MSG_SIZE); if (send == NULL) { free(recv); return -1; } /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_mr_free; /* register the memory */ ret = rpma_mr_reg(peer, recv, MSG_SIZE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_peer_delete; ret = rpma_mr_reg(peer, send, MSG_SIZE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) { (void) rpma_mr_dereg(&recv_mr); goto err_peer_delete; } /* create a new connection configuration and set RCQ size */ struct rpma_conn_cfg *cfg = NULL; ret = rpma_conn_cfg_new(&cfg); if (ret) goto err_mr_dereg; ret = rpma_conn_cfg_set_rcq_size(cfg, RCQ_SIZE); if (ret) goto err_cfg_delete; ret = rpma_conn_cfg_set_compl_channel(cfg, true); if (ret) goto err_cfg_delete; /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, cfg, NULL, &conn); if (ret) goto err_cfg_delete; while (--rounds) { /* prepare a receive for the server's response */ ret = rpma_recv(conn, recv_mr, 0, MSG_SIZE, recv); if (ret) break; /* send a message to the server */ (void) printf("CLIENT: Value sent: %" PRIu64 "\n", counter); *send = counter; ret = rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) break; /* get one send completion and validate it */ ret = wait_and_validate_completion(conn, IBV_WC_SEND, &wc); if (ret) break; /* get one receive completion and validate it */ ret = wait_and_validate_completion(conn, IBV_WC_RECV, &wc); if (ret) break; /* copy the new value of the counter and print it out */ counter = *recv; printf("CLIENT: Value received: %" PRIu64 "\n", counter); /* sleep if required */ if (sleep_usec > 0) (void) usleep(sleep_usec); } /* send the I_M_DONE message */ *send = I_M_DONE; ret |= rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); ret |= common_disconnect_and_wait_for_conn_close(&conn); err_cfg_delete: ret |= rpma_conn_cfg_delete(&cfg); err_mr_dereg: /* deregister the memory regions */ ret |= rpma_mr_dereg(&send_mr); ret |= rpma_mr_dereg(&recv_mr); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_mr_free: /* free the memory */ free(send); free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/12scch-receive-completion-queue/receive-completion-queue-common.h000066400000000000000000000010161443364775400306550ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * receive-completion-queue-common.h -- a common declarations for the 12 example */ #ifndef EXAMPLES_RECEIVE_COMPLETION_QUEUE_COMMON #define EXAMPLES_RECEIVE_COMPLETION_QUEUE_COMMON #define MSG_SIZE sizeof(uint64_t) /* Both buffers are allocated one after another. */ #define RECV_OFFSET 0 #define SEND_OFFSET MSG_SIZE #define RCQ_SIZE 10 #define I_M_DONE (uint64_t)UINT64_MAX #endif /* EXAMPLES_RECEIVE_COMPLETION_QUEUE_COMMON */ rpma-1.3.0/examples/12scch-receive-completion-queue/server.c000066400000000000000000000105521443364775400237620ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * server.c -- a server of the receive-completion-queue example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #define USAGE_STR "usage: %s \n" #include "common-conn.h" #include "receive-completion-queue-common.h" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; uint64_t *recv = malloc_aligned(MSG_SIZE); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(MSG_SIZE); if (send == NULL) { free(recv); return -1; } /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn_req *req = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; struct rpma_conn *conn = NULL; struct ibv_wc wc; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_peer_delete; /* register the memory */ ret = rpma_mr_reg(peer, recv, MSG_SIZE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_ep_shutdown; ret = rpma_mr_reg(peer, send, MSG_SIZE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) { (void) rpma_mr_dereg(&recv_mr); goto err_ep_shutdown; } /* create a new connection configuration and set RCQ size */ struct rpma_conn_cfg *cfg = NULL; ret = rpma_conn_cfg_new(&cfg); if (ret) goto err_mr_dereg; ret = rpma_conn_cfg_set_rcq_size(cfg, RCQ_SIZE); if (ret) goto err_cfg_delete; ret = rpma_conn_cfg_set_compl_channel(cfg, true); if (ret) goto err_cfg_delete; /* receive an incoming connection request */ ret = rpma_ep_next_conn_req(ep, cfg, &req); if (ret) goto err_cfg_delete; /* * Post an initial receive to be prepared for the first message of the client's ping-pong. */ ret = rpma_conn_req_recv(req, recv_mr, 0, MSG_SIZE, recv); if (ret) { (void) rpma_conn_req_delete(&req); goto err_cfg_delete; } /* accept the connection request and obtain the connection object */ ret = rpma_conn_req_connect(&req, NULL, &conn); if (ret) goto err_cfg_delete; /* wait for the connection to be established */ ret = rpma_conn_next_event(conn, &conn_event); if (ret) goto err_conn_disconnect; if (conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); goto err_conn_disconnect; } while (1) { /* get one receive completion and validate it */ ret = wait_and_validate_completion(conn, IBV_WC_RECV, &wc); if (ret) break; if (*recv == I_M_DONE) break; /* print the received old value of the client's counter */ (void) printf("SERVER: Value received: %" PRIu64 "\n", *recv); /* calculate a new counter's value */ *send = *recv + 1; /* prepare a receive for the client's response */ ret = rpma_recv(conn, recv_mr, 0, MSG_SIZE, recv); if (ret) break; /* send the new value to the client */ (void) printf("SERVER: Value sent: %" PRIu64 "\n", *send); /* * XXX when using RPMA_F_COMPLETION_ON_ERROR * after few rounds rpma_send() returns ENOMEM. */ ret = rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) break; /* get one send completion and validate it */ ret = wait_and_validate_completion(conn, IBV_WC_SEND, &wc); if (ret) break; } err_conn_disconnect: ret |= common_disconnect_and_wait_for_conn_close(&conn); err_cfg_delete: ret |= rpma_conn_cfg_delete(&cfg); err_mr_dereg: /* deregister the memory regions */ ret |= rpma_mr_dereg(&send_mr); ret |= rpma_mr_dereg(&recv_mr); err_ep_shutdown: ret |= rpma_ep_shutdown(&ep); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_free: free(send); free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/13-messages-ping-pong-with-srq/000077500000000000000000000000001443364775400220115ustar00rootroot00000000000000rpma-1.3.0/examples/13-messages-ping-pong-with-srq/CMakeLists.txt000066400000000000000000000024571443364775400245610ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Fujitsu # Copyright 2022, Intel Corporation # cmake_minimum_required(VERSION 3.3) project(messages-ping-pong-with-srq C) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${CMAKE_SOURCE_DIR}/../cmake ${CMAKE_SOURCE_DIR}/../../cmake) include(${CMAKE_SOURCE_DIR}/../../cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=gnu99") find_package(PkgConfig QUIET) if(PKG_CONFIG_FOUND) pkg_check_modules(LIBRPMA librpma) pkg_check_modules(LIBIBVERBS libibverbs) endif() if(NOT LIBRPMA_FOUND) find_package(LIBRPMA REQUIRED librpma) endif() if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() link_directories(${LIBRPMA_LIBRARY_DIRS} ${LIBIBVERBS_LIBRARY_DIRS}) function(add_example name) set(srcs ${ARGN}) add_executable(${name} ${srcs}) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ${LIBIBVERBS_INCLUDE_DIRS} ../common) target_link_libraries(${name} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) endfunction() add_example(server server.c ../common/common-conn.c ../common/common-epoll.c) add_example(client client.c ../common/common-conn.c ../common/common-messages-ping-pong.c ../common/common-utils.c) rpma-1.3.0/examples/13-messages-ping-pong-with-srq/README.md000066400000000000000000000020671443364775400232750ustar00rootroot00000000000000Example of using messaging with shared RQ === The messages ping pong example with shared RQ implements two parts of the messaging process: - The server starts a listening endpoint and waits for multiple incoming connections. When a new connection request appears it is accepted. The client sends to the server its current counter value. When the server receives the message from the client, its content is incremented and send back to the client. When the server receives the I_M_DONE message it disconnects. - The client connects to the server and sends to it its current counter value. When the client gets the new value from the server it repeats the process for the configured number of rounds. When it is quitting, it sends the I_M_DONE message and waits for disconnection. **Note**: The server will end listening when no new connection request appears after the default (5s) or specified seconds. ## Usage ```bash [user@server]$ ./server $server_address $port [] ``` ```bash [user@client]$ ./client $server_address $port $seed $rounds [] ``` rpma-1.3.0/examples/13-messages-ping-pong-with-srq/client.c000066400000000000000000000065211443364775400234370ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* Copyright 2022, Intel Corporation */ /* * client.c -- a client of the messages-ping-pong-srq example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include "common-conn.h" #include "common-messages-ping-pong.h" #include "common-utils.h" #define USAGE_STR "usage: %s []\n" int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 5) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; uint64_t cntr = strtoul_noerror(argv[3]); uint64_t rounds = strtoul_noerror(argv[4]); uint64_t sleep_usec = 0; if (argc >= 6) sleep_usec = strtoul_noerror(argv[5]); int ret; /* RPMA resources - general */ struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; uint64_t *recv = malloc_aligned(MSG_SIZE); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(MSG_SIZE); if (send == NULL) { free(recv); return -1; } /* * lookup an ibv_context via the address and create a new peer using it */ ret = client_peer_via_address(addr, &peer); if (ret) goto err_mr_free; /* register the memory */ ret = rpma_mr_reg(peer, recv, MSG_SIZE, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_peer_delete; ret = rpma_mr_reg(peer, send, MSG_SIZE, RPMA_MR_USAGE_SEND, &send_mr); if (ret) { (void) rpma_mr_dereg(&recv_mr); goto err_peer_delete; } /* establish a new connection to a server listening at addr:port */ ret = client_connect(peer, addr, port, NULL, NULL, &conn); if (ret) goto err_mr_dereg; /* get the connection's main CQ */ struct rpma_cq *cq = NULL; ret = rpma_conn_get_cq(conn, &cq); if (ret) goto err_conn_disconnect; pid_t pid = getpid(); do { /* prepare a receive for the server's response */ ret = rpma_recv(conn, recv_mr, 0, MSG_SIZE, recv); if (ret) break; /* send a message to the server */ (void) printf("[client #%d] value sent: %" PRIu64 "\n", pid, cntr); *send = cntr; ret = rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ALWAYS, NULL); if (ret) break; int send_cmpl = 0; int recv_cmpl = 0; /* get completions and process them */ ret = wait_and_process_completions(cq, recv, &send_cmpl, &recv_cmpl); if (ret) break; cntr = *recv; printf("[client #%d] value received: %" PRIu64 "\n", pid, cntr); /* sleep if required */ if (sleep_usec > 0) (void) usleep(sleep_usec); } while (--rounds); /* send the M_DONE message */ *send = I_M_DONE; ret |= rpma_send(conn, send_mr, 0, MSG_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); err_conn_disconnect: ret |= common_wait_for_conn_close_and_disconnect(&conn); err_mr_dereg: /* deregister the memory regions */ ret |= rpma_mr_dereg(&send_mr); ret |= rpma_mr_dereg(&recv_mr); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_mr_free: /* free the memory */ free(send); free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/13-messages-ping-pong-with-srq/server.c000066400000000000000000000177741443364775400235030ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* Copyright 2022, Intel Corporation */ /* * server.c -- a server of the messages-ping-pong-with-srq example * * Please see README.md for a detailed description of this example. */ #include #include #include #include #include #include #include "common-conn.h" #include "common-messages-ping-pong.h" #include "common-epoll.h" #define USAGE_STR "usage: %s []\n" #define CLIENT_MAX 10 #define TIMEOUT_5S 5000 struct connect_context { struct rpma_conn *conn; uint32_t qp_num; }; static struct connect_context conn_ctxs[CLIENT_MAX]; static int num_clients; static int find_qp(uint32_t qp_num) { int i; if (num_clients == 1) return 0; for (i = 0; i < num_clients; ++i) if (conn_ctxs[i].qp_num == qp_num) return i; (void) fprintf(stderr, "Unable to find the connection [qp#%d]\n", qp_num); return -1; } static int add_fd_to_epoll(int epoll_fd, int ep_fd) { struct epoll_event event; event.events = EPOLLIN; /* set O_NONBLOCK flag for the provided fd */ if (fd_set_nonblock(ep_fd)) return -1; if (epoll_ctl(epoll_fd, EPOLL_CTL_ADD, ep_fd, &event)) { perror("epoll_ctl(EPOLL_CTL_ADD)"); return -1; } return 0; } static int handle_incoming_connections(struct rpma_ep *ep, struct rpma_conn_cfg *cfg, struct rpma_mr_local *recv_mr, uint64_t *recv) { size_t i, offset; struct connect_context *conn_ctx = NULL; struct rpma_srq *srq = NULL; int ret; /* find the first free slot */ for (i = 0; i < CLIENT_MAX; ++i) { conn_ctx = &conn_ctxs[i]; if (conn_ctx->conn != NULL) continue; break; } /* conn_ctx->conn != NULL means that no free slot is found when i reaches CLIENT_MAX */ if (conn_ctx->conn != NULL) { (void) fprintf(stderr, "No free slot for a new connection request.\n"); return -1; } offset = i; /* get the shared RQ object from the connection configuration */ ret = rpma_conn_cfg_get_srq(cfg, &srq); if (ret) return ret; /* * Put an initial receive for a connection to be prepared for * the first message of the client's ping-pong. */ ret = rpma_srq_recv(srq, recv_mr, offset * MSG_SIZE, MSG_SIZE, recv + offset); if (ret) return ret; /* * Wait for an incoming connection request, * accept it and wait for its establishment. */ ret = server_accept_connection(ep, cfg, NULL, &conn_ctx->conn); if (ret) return ret; /* get the qp_num of each connection */ ret = rpma_conn_get_qp_num(conn_ctx->conn, &conn_ctx->qp_num); if (ret) return ret; return 0; } int main(int argc, char *argv[]) { /* validate parameters */ if (argc < 3) { fprintf(stderr, USAGE_STR, argv[0]); exit(-1); } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* read common parameters */ char *addr = argv[1]; char *port = argv[2]; int ret; int timeout = argv[3] ? atoi(argv[3]) * 1000 : TIMEOUT_5S; if (timeout <= 0) { (void) fprintf(stderr, " should be a positive number of seconds (%s given)\n", argv[3]); return -1; } /* prepare memory */ struct rpma_mr_local *recv_mr, *send_mr; size_t size = CLIENT_MAX * MSG_SIZE; uint64_t *recv = malloc_aligned(size); if (recv == NULL) return -1; uint64_t *send = malloc_aligned(size); if (send == NULL) { ret = -1; goto err_free_recv; } /* RPMA resources */ struct rpma_peer *peer = NULL; struct rpma_srq *srq = NULL; struct rpma_conn_cfg *cfg = NULL; struct rpma_ep *ep = NULL; struct rpma_cq *rcq = NULL; int i, offset, index, num_got, total_cnt = 0; int ep_fd, epoll_fd; struct epoll_event event; uint64_t *recv_ptr = NULL; struct ibv_wc wc[CLIENT_MAX]; /* * lookup an ibv_context via the address and create a new peer using it */ ret = server_peer_via_address(addr, &peer); if (ret) goto err_free_send; /* create a shared RQ object */ ret = rpma_srq_new(peer, NULL, &srq); if (ret) goto err_peer_delete; /* create a new connection configuration */ ret = rpma_conn_cfg_new(&cfg); if (ret) goto err_srq_delete; /* set the shared RQ object for the connection configuration */ ret = rpma_conn_cfg_set_srq(cfg, srq); if (ret) goto err_conn_cfg_delete; /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(peer, addr, port, &ep); if (ret) goto err_conn_cfg_delete; /* register the memory */ ret = rpma_mr_reg(peer, recv, size, RPMA_MR_USAGE_RECV, &recv_mr); if (ret) goto err_ep_shutdown; ret = rpma_mr_reg(peer, send, size, RPMA_MR_USAGE_SEND, &send_mr); if (ret) goto err_recv_mr_dereg; /* create an epoll file descriptor */ epoll_fd = epoll_create1(EPOLL_CLOEXEC); if (epoll_fd == -1) { perror("epoll_create1(EPOLL_CLOEXEC)"); ret = -1; goto err_send_mr_dereg; } /* get the endpoint's event file descriptor and add it to epoll */ ret = rpma_ep_get_fd(ep, &ep_fd); if (ret) goto err_close_epoll_fd; ret = add_fd_to_epoll(epoll_fd, ep_fd); if (ret) goto err_close_epoll_fd; /* process epoll's events */ while ((ret = epoll_wait(epoll_fd, &event, 1, timeout)) == 1) { ret = handle_incoming_connections(ep, cfg, recv_mr, recv); if (ret) goto err_conn_disconnect; num_clients++; (void) printf("The client #%i has been connected.\n", num_clients); } (void) printf("Server ended listening.\n"); /* get the receive CQ of rpma_srq object */ ret = rpma_srq_get_rcq(srq, &rcq); if (ret) goto err_conn_disconnect; while (!ret && total_cnt < num_clients) { /* wait for the completion to be ready */ ret = rpma_cq_wait(rcq); if (ret) break; ret = rpma_cq_get_wc(rcq, num_clients, wc, &num_got); if (ret) { /* lack of completion is not an error */ if (ret == RPMA_E_NO_COMPLETION) { ret = 0; continue; } break; } /* validate received completions */ for (i = 0; i < num_got; ++i) { if (wc[i].status != IBV_WC_SUCCESS) { (void) fprintf(stderr, "rpma_srq_recv() failed: %s\n", ibv_wc_status_str(wc[i].status)); ret = -1; break; } if (wc[i].opcode != IBV_WC_RECV) { (void) fprintf(stderr, "received unexpected wc.opcode value (%d != %d)\n", wc[i].opcode, IBV_WC_RECV); ret = -1; break; } index = find_qp(wc[i].qp_num); if (index == -1) { ret = -1; break; } recv_ptr = (uint64_t *)wc[i].wr_id; offset = recv_ptr - recv; if (*recv_ptr == I_M_DONE) { total_cnt++; continue; } (void) printf("[server] value received: %" PRIu64 " from the connection [qp#%d]\n", *recv_ptr, conn_ctxs[index].qp_num); /* prepare a receive for the client's request */ ret = rpma_srq_recv(srq, recv_mr, (size_t)offset * MSG_SIZE, MSG_SIZE, recv_ptr); if (ret) break; *(send + offset) = *recv_ptr + 1; (void) printf("[server] value sent: %" PRIu64 " from the connection [qp#%d]\n", *(send + offset), conn_ctxs[index].qp_num); /* send a message to the client */ ret = rpma_send(conn_ctxs[index].conn, send_mr, (size_t)offset * MSG_SIZE, MSG_SIZE, RPMA_F_COMPLETION_ON_ERROR, NULL); if (ret) break; } } if (total_cnt != num_clients) { (void) fprintf(stderr, "Unable to receive data from all connections\n"); ret = -1; } err_conn_disconnect: /* disconnect all remaining client's */ for (i = 0; i < CLIENT_MAX; ++i) { if (conn_ctxs[i].conn == NULL) continue; (void) rpma_conn_disconnect(conn_ctxs[i].conn); (void) rpma_conn_delete(&conn_ctxs[i].conn); } ret |= epoll_ctl(epoll_fd, EPOLL_CTL_DEL, ep_fd, NULL); err_close_epoll_fd: ret |= close(epoll_fd); epoll_fd = -1; err_send_mr_dereg: ret |= rpma_mr_dereg(&send_mr); err_recv_mr_dereg: ret |= rpma_mr_dereg(&recv_mr); err_ep_shutdown: ret |= rpma_ep_shutdown(&ep); err_conn_cfg_delete: ret |= rpma_conn_cfg_delete(&cfg); err_srq_delete: ret |= rpma_srq_delete(&srq); err_peer_delete: /* delete the peer object */ ret |= rpma_peer_delete(&peer); err_free_send: free(send); err_free_recv: free(recv); return ret ? -1 : 0; } rpma-1.3.0/examples/CMakeLists.txt000066400000000000000000000166011443364775400170550ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2022, Intel Corporation # Copyright 2021-2022, Fujitsu # add_flag(-Wall) add_flag(-Wpointer-arith) add_flag(-Wsign-compare) add_flag(-Wunreachable-code-return) add_flag(-Wmissing-variable-declarations) add_flag(-fno-common) add_flag(-Wunused-macros) add_flag(-Wsign-conversion) add_flag(-ggdb DEBUG) add_flag(-DDEBUG DEBUG) add_flag("-U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2" RELEASE) include(${CMAKE_SOURCE_DIR}/cmake/functions.cmake) # set LIBRT_LIBRARIES if linking with librt is required check_if_librt_is_required() add_custom_target(examples) include_directories(${LIBRPMA_INCLUDE_DIRS}) link_directories(${LIBRPMA_LIBRARY_DIRS}) file(GLOB src_files ${CMAKE_CURRENT_SOURCE_DIR}/*/*.[ch]) # Filter out protobuf-c generated files. # Starting from CMake v3.6 we could use: # list(FILTER src_files EXCLUDE REGEX ".*(pb-c).*") # but we require CMake v3.3, so we have to do it # in the following way: foreach(file IN LISTS src_files) if(NOT file MATCHES ".*(pb-c).*") set(rpma_src_files "${file};${rpma_src_files}") endif() endforeach() add_cstyle(examples-all ${rpma_src_files}) add_check_whitespace(examples-all ${rpma_src_files}) function(add_example) set(options USE_LIBPROTOBUFC) set(oneValueArgs NAME BIN) set(multiValueArgs SRCS) cmake_parse_arguments(EXAMPLE "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(target example-${EXAMPLE_NAME}-${EXAMPLE_BIN}) if (EXAMPLE_USE_LIBPROTOBUFC AND NOT LIBPROTOBUFC_FOUND) message(STATUS "${target} skipped - no libprotobuf-c found") return() endif() prepend(srcs ${CMAKE_CURRENT_SOURCE_DIR} ${srcs}) set(EXAMPLE_SRCS ${EXAMPLE_SRCS} common/common-conn.c) if (LIBPMEM_FOUND OR LIBPMEM2_FOUND) set(EXAMPLE_SRCS ${EXAMPLE_SRCS} common/common-map_file_with_signature_check.c common/common-hello.c) endif() if (LIBPMEM2_FOUND) add_executable(${target} ${EXAMPLE_SRCS} common/common-pmem2_map_file.c) elseif (LIBPMEM_FOUND) add_executable(${target} ${EXAMPLE_SRCS} common/common-pmem_map_file.c) else() add_executable(${target} ${EXAMPLE_SRCS}) endif() add_dependencies(examples ${target}) set_target_properties(${target} PROPERTIES OUTPUT_NAME ${EXAMPLE_BIN} RUNTIME_OUTPUT_DIRECTORY ${EXAMPLE_NAME}) target_link_libraries(${target} ${LIBRPMA_LIBRARIES} ${LIBRT_LIBRARIES} ${LIBIBVERBS_LIBRARIES}) target_include_directories(${target} PRIVATE common ${LIBRPMA_SOURCE_DIR} ${LIBIBVERBS_INCLUDE_DIRS}) if(LIBPMEM2_FOUND) target_include_directories(${target} PRIVATE ${LIBPMEM2_INCLUDE_DIRS}) target_link_libraries(${target} ${LIBPMEM2_LIBRARIES}) target_compile_definitions(${target} PRIVATE USE_LIBPMEM2) elseif(LIBPMEM_FOUND) target_include_directories(${target} PRIVATE ${LIBPMEM_INCLUDE_DIRS}) target_link_libraries(${target} ${LIBPMEM_LIBRARIES}) target_compile_definitions(${target} PRIVATE USE_LIBPMEM) endif() if(EXAMPLE_USE_LIBPROTOBUFC) target_include_directories(${target} PRIVATE ${LIBPROTOBUFC_INCLUDE_DIRS}) target_link_libraries(${target} ${LIBPROTOBUFC_LIBRARIES}) endif() if(IBV_ADVISE_MR_FLAGS_SUPPORTED) target_compile_definitions(${target} PRIVATE IBV_ADVISE_MR_FLAGS_SUPPORTED=1) endif() endfunction() add_example(NAME 01-connection BIN server SRCS 01-connection/server.c) add_example(NAME 01-connection BIN client SRCS 01-connection/client.c) add_example(NAME 02-read-to-volatile BIN server SRCS 02-read-to-volatile/server.c) add_example(NAME 02-read-to-volatile BIN client SRCS 02-read-to-volatile/client.c) add_example(NAME 03-read-to-persistent BIN server SRCS 03-read-to-persistent/server.c) add_example(NAME 03-read-to-persistent BIN client SRCS 03-read-to-persistent/client.c common/common-hello.c common/common-utils.c) add_example(NAME 04-write-to-persistent BIN server SRCS 04-write-to-persistent/server.c common/common-hello.c) add_example(NAME 04-write-to-persistent BIN client SRCS 04-write-to-persistent/client.c common/common-hello.c common/common-utils.c) add_example(NAME 05-flush-to-persistent BIN server SRCS 05-flush-to-persistent/server.c common/common-hello.c) add_example(NAME 05-flush-to-persistent BIN client SRCS 05-flush-to-persistent/client.c common/common-hello.c common/common-utils.c) add_example(NAME 06-multiple-connections BIN server SRCS 06-multiple-connections/server.c common/common-epoll.c) add_example(NAME 06-multiple-connections BIN client SRCS 06-multiple-connections/client.c) add_example(NAME 06scch-multiple-connections BIN server SRCS 06scch-multiple-connections/server.c common/common-epoll.c) add_example(NAME 06scch-multiple-connections BIN client SRCS 06scch-multiple-connections/client.c) add_example(NAME 07-atomic-write BIN server SRCS 07-atomic-write/server.c) add_example(NAME 07-atomic-write BIN client SRCS 07-atomic-write/client.c) add_example(NAME 08-messages-ping-pong BIN server SRCS 08-messages-ping-pong/server.c common/common-messages-ping-pong.c) add_example(NAME 08-messages-ping-pong BIN client SRCS 08-messages-ping-pong/client.c common/common-messages-ping-pong.c common/common-utils.c) add_example(NAME 08srq-simple-messages-ping-pong-with-srq BIN server SRCS 08srq-simple-messages-ping-pong-with-srq/server.c common/common-messages-ping-pong.c) add_example(NAME 08srq-simple-messages-ping-pong-with-srq BIN client SRCS 08srq-simple-messages-ping-pong-with-srq/client.c common/common-messages-ping-pong.c common/common-utils.c) add_example(NAME 09-flush-to-persistent-GPSPM BIN server USE_LIBPROTOBUFC SRCS 09-flush-to-persistent-GPSPM/server.c common/gpspm/GPSPM_flush.pb-c.c common/common-hello.c) add_example(NAME 09-flush-to-persistent-GPSPM BIN client USE_LIBPROTOBUFC SRCS 09-flush-to-persistent-GPSPM/client.c common/gpspm/GPSPM_flush.pb-c.c common/common-hello.c common/common-utils.c) add_example(NAME 09scch-flush-to-persistent-GPSPM BIN server USE_LIBPROTOBUFC SRCS 09scch-flush-to-persistent-GPSPM/server.c common/gpspm/GPSPM_flush.pb-c.c common/common-hello.c) add_example(NAME 09scch-flush-to-persistent-GPSPM BIN client USE_LIBPROTOBUFC SRCS 09scch-flush-to-persistent-GPSPM/client.c common/gpspm/GPSPM_flush.pb-c.c common/common-hello.c common/common-utils.c) add_example(NAME 10-send-with-imm BIN server SRCS 10-send-with-imm/server.c) add_example(NAME 10-send-with-imm BIN client SRCS 10-send-with-imm/client.c) add_example(NAME 11-write-with-imm BIN server SRCS 11-write-with-imm/server.c) add_example(NAME 11-write-with-imm BIN client SRCS 11-write-with-imm/client.c) add_example(NAME 12-receive-completion-queue BIN server SRCS 12-receive-completion-queue/server.c 12-receive-completion-queue/receive-completion-queue-common.c) add_example(NAME 12-receive-completion-queue BIN client SRCS 12-receive-completion-queue/client.c 12-receive-completion-queue/receive-completion-queue-common.c common/common-utils.c) add_example(NAME 12scch-receive-completion-queue BIN server SRCS 12scch-receive-completion-queue/server.c) add_example(NAME 12scch-receive-completion-queue BIN client SRCS 12scch-receive-completion-queue/client.c common/common-utils.c) add_example(NAME 13-messages-ping-pong-with-srq BIN server SRCS 13-messages-ping-pong-with-srq/server.c common/common-epoll.c) add_example(NAME 13-messages-ping-pong-with-srq BIN client SRCS 13-messages-ping-pong-with-srq/client.c common/common-messages-ping-pong.c common/common-utils.c) add_example(NAME log BIN log SRCS log/log-example.c log/log-worker.c ${LIBRPMA_SOURCE_DIR}/log.c ${LIBRPMA_SOURCE_DIR}/log_default.c) rpma-1.3.0/examples/README.md000066400000000000000000000022211443364775400155650ustar00rootroot00000000000000Examples for librpma === This directory contains examples for librpma, the library to simplify accessing persistent memory on remote hosts over Remote Direct Memory Access (RDMA). If you're looking for documentation to get you started using RPMA, start here: https://pmem.io/rpma and follow the links to examples and man pages. ## Requirements In order to build and run all examples you need to have installed additional packages: - libpmem-dev(el) >= 1.6 or libpmem2-dev(el) >= 1.11 for examples: 3, 4, 5, 7, 9, 9s - libprotobuf-c-dev(el) >= 1.0 for examples: 9, 9s **Note**: for more information please check out [this section](../INSTALL.md#for-some-examples-you-also-need). ## Running examples on a configured RDMA-capable network interface This directory contains also the 'run-all-examples.sh' script for running all examples on a configured RDMA-capable network interface (it can be either SoftRoCE or RDMA HW loopback). The examples can be run also from the CMake build directory using 'make run_all_examples' or 'make run_all_examples_under_valgrind' command. The '../tools/configure_softroce.sh' script can be used to configure and enable SoftRoCE. rpma-1.3.0/examples/cmake/000077500000000000000000000000001443364775400153715ustar00rootroot00000000000000rpma-1.3.0/examples/cmake/FindLIBRPMA.cmake000066400000000000000000000012661443364775400202670ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # if(NOT LIBIBVERBS_FOUND) find_package(LIBIBVERBS REQUIRED libibverbs) endif() if(NOT LIBRDMACM_FOUND) find_package(LIBRDMACM REQUIRED librdmacm) endif() message(STATUS "Checking for module 'librpma' w/o PkgConfig") find_library(LIBRPMA_LIBRARY NAMES librpma.so librpma rpma) set(LIBRPMA_LIBRARIES ${LIBRPMA_LIBRARY}) if(LIBRPMA_LIBRARY) message(STATUS " Found librpma w/o PkgConfig") else() set(MSG_NOT_FOUND "librpma NOT found (set CMAKE_PREFIX_PATH to point the location)") if(LIBRPMA_FIND_REQUIRED) message(FATAL_ERROR ${MSG_NOT_FOUND}) else() message(WARNING ${MSG_NOT_FOUND}) endif() endif() rpma-1.3.0/examples/cmake/common.cmake000066400000000000000000000036641443364775400176740ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # cmake_minimum_required(VERSION 3.3) function(add_example_with_pmem) set(options USE_LIBPROTOBUFC) set(oneValueArgs NAME) set(multiValueArgs SRCS) cmake_parse_arguments(EXAMPLE "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(target ${EXAMPLE_NAME}) if (EXAMPLE_USE_LIBPROTOBUFC AND NOT LIBPROTOBUFC_FOUND) message(STATUS "${target} skipped - no libprotobuf-c found") return() endif() prepend(srcs ${CMAKE_CURRENT_SOURCE_DIR} ${srcs}) set(EXAMPLE_SRCS ${EXAMPLE_SRCS} ../common/common-conn.c ../common/common-hello.c) if (LIBPMEM_FOUND OR LIBPMEM2_FOUND) set(EXAMPLE_SRCS ${EXAMPLE_SRCS} ../common/common-map_file_with_signature_check.c) endif() if (LIBPMEM2_FOUND) set(EXAMPLE_SRCS ${EXAMPLE_SRCS} ../common/common-pmem2_map_file.c) elseif (LIBPMEM_FOUND) set(EXAMPLE_SRCS ${EXAMPLE_SRCS} ../common/common-pmem_map_file.c) endif() add_executable(${target} ${EXAMPLE_SRCS}) target_include_directories(${target} PRIVATE ../common ${LIBRPMA_INCLUDE_DIR} ${LIBIBVERBS_INCLUDE_DIRS}) target_link_libraries(${target} rpma ${LIBIBVERBS_LIBRARIES} ${LIBRT_LIBRARIES}) if(LIBPMEM2_FOUND) target_include_directories(${target} PRIVATE ${LIBPMEM2_INCLUDE_DIRS}) target_link_libraries(${target} ${LIBPMEM2_LIBRARIES}) target_compile_definitions(${target} PRIVATE USE_LIBPMEM2) elseif(LIBPMEM_FOUND) target_include_directories(${target} PRIVATE ${LIBPMEM_INCLUDE_DIRS}) target_link_libraries(${target} ${LIBPMEM_LIBRARIES}) target_compile_definitions(${target} PRIVATE USE_LIBPMEM) endif() if(EXAMPLE_USE_LIBPROTOBUFC) target_include_directories(${target} PRIVATE ${LIBPROTOBUFC_INCLUDE_DIRS}) target_link_libraries(${target} ${LIBPROTOBUFC_LIBRARIES}) endif() if(IBV_ADVISE_MR_FLAGS_SUPPORTED) target_compile_definitions(${target} PRIVATE IBV_ADVISE_MR_FLAGS_SUPPORTED=1) endif() endfunction() rpma-1.3.0/examples/common/000077500000000000000000000000001443364775400156015ustar00rootroot00000000000000rpma-1.3.0/examples/common/common-conn.c000066400000000000000000000124371443364775400201770ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * common-conn.c -- a common connection functions used by examples */ #include #include #include #include #include #include "common-conn.h" /* * malloc_aligned -- allocate an aligned chunk of memory */ void * malloc_aligned(size_t size) { long pagesize = sysconf(_SC_PAGESIZE); if (pagesize < 0) { perror("sysconf"); return NULL; } /* allocate a page size aligned local memory pool */ void *mem; int ret = posix_memalign(&mem, (size_t)pagesize, size); if (ret) { (void) fprintf(stderr, "posix_memalign: %s\n", strerror(ret)); return NULL; } /* zero the allocated memory */ memset(mem, 0, size); return mem; } /* * common_peer_via_address -- create a new RPMA peer based on ibv_context * received by the provided address */ int common_peer_via_address(const char *addr, enum rpma_util_ibv_context_type type, struct rpma_peer **peer_ptr) { struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(addr, type, &ibv_ctx); if (ret) return ret; /* create a new peer object */ return rpma_peer_new(ibv_ctx, peer_ptr); } /* * client_connect -- establish a new connection to a server listening at * addr:port */ int client_connect(struct rpma_peer *peer, const char *addr, const char *port, struct rpma_conn_cfg *cfg, struct rpma_conn_private_data *pdata, struct rpma_conn **conn_ptr) { struct rpma_conn_req *req = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; /* create a connection request */ int ret = rpma_conn_req_new(peer, addr, port, cfg, &req); if (ret) return ret; /* connect the connection request and obtain the connection object */ ret = rpma_conn_req_connect(&req, pdata, conn_ptr); if (ret) return ret; /* wait for the connection to establish */ ret = rpma_conn_next_event(*conn_ptr, &conn_event); if (ret) { goto err_conn_delete; } else if (conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); ret = -1; goto err_conn_delete; } return 0; err_conn_delete: (void) rpma_conn_delete(conn_ptr); return ret; } /* * server_accept_connection -- wait for an incoming connection request, * accept it and wait for its establishment */ int server_accept_connection(struct rpma_ep *ep, struct rpma_conn_cfg *cfg, struct rpma_conn_private_data *pdata, struct rpma_conn **conn_ptr) { struct rpma_conn_req *req = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; /* receive an incoming connection request */ int ret = rpma_ep_next_conn_req(ep, cfg, &req); if (ret) return ret; /* * connect / accept the connection request and obtain the connection * object */ ret = rpma_conn_req_connect(&req, pdata, conn_ptr); if (ret) return ret; /* wait for the connection to be established */ ret = rpma_conn_next_event(*conn_ptr, &conn_event); if (!ret && conn_event != RPMA_CONN_ESTABLISHED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); ret = -1; } if (ret) (void) rpma_conn_delete(conn_ptr); return ret; } /* * common_wait_for_conn_close_verbose -- wait for RPMA_CONN_CLOSED and print * an error message on error */ static inline int common_wait_for_conn_close_verbose(struct rpma_conn *conn) { enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; /* wait for the connection to be closed */ int ret = rpma_conn_next_event(conn, &conn_event); if (!ret && conn_event != RPMA_CONN_CLOSED) { fprintf(stderr, "rpma_conn_next_event returned an unexpected event: %s\n", rpma_utils_conn_event_2str(conn_event)); } return ret; } /* * common_wait_for_conn_close_and_disconnect -- wait for RPMA_CONN_CLOSED, * disconnect and delete the connection structure */ int common_wait_for_conn_close_and_disconnect(struct rpma_conn **conn_ptr) { int ret = 0; ret |= common_wait_for_conn_close_verbose(*conn_ptr); ret |= rpma_conn_disconnect(*conn_ptr); ret |= rpma_conn_delete(conn_ptr); return ret; } /* * common_disconnect_and_wait_for_conn_close -- disconnect, wait for * RPMA_CONN_CLOSED and delete the connection structure */ int common_disconnect_and_wait_for_conn_close(struct rpma_conn **conn_ptr) { int ret = 0; ret |= rpma_conn_disconnect(*conn_ptr); if (ret == 0) ret |= common_wait_for_conn_close_verbose(*conn_ptr); ret |= rpma_conn_delete(conn_ptr); return ret; } /* * wait_and_validate_completion -- wait for the completion to be ready * and validate it */ int wait_and_validate_completion(struct rpma_conn *conn, enum ibv_wc_opcode expected_opcode, struct ibv_wc *wc) { struct rpma_cq *cq = NULL; int ret = rpma_conn_wait(conn, 0, &cq, NULL); if (ret) return ret; ret = rpma_cq_get_wc(cq, 1, wc, NULL); if (ret) return ret; char *func_name = (expected_opcode == IBV_WC_SEND)? "send" : "recv"; if (wc->status != IBV_WC_SUCCESS) { (void) fprintf(stderr, "rpma_%s() failed: %s\n", func_name, ibv_wc_status_str(wc->status)); return -1; } if (wc->opcode != expected_opcode) { (void) fprintf(stderr, "unexpected wc.opcode value " "(0x%" PRIXPTR " != 0x%" PRIXPTR ")\n", (uintptr_t)wc->opcode, (uintptr_t)expected_opcode); return -1; } return 0; } rpma-1.3.0/examples/common/common-conn.h000066400000000000000000000045441443364775400202040ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * common-conn.h -- a common connection functions declarations for examples */ #ifndef COMMON_CONN_H #define COMMON_CONN_H #include #include #include "common-pmem.h" #ifdef USE_PMEM #define PMEM_USAGE \ "where can be:\n\ - a Device DAX (/dev/dax0.0 for example) or\n\ - a file on File System DAX (/mnt/pmem/file for example)\n" /* signature marking the persistent contents as valid */ #define SIGNATURE_STR "RPMA_EXAMPLE_SIG" #define SIGNATURE_LEN (strlen(SIGNATURE_STR) + 1) #define NO_PMEM_MSG "No provided. Using DRAM instead.\n" #else #define PMEM_USAGE "" #define NO_PMEM_MSG \ "The example is unable to use libpmem. If unintended please check the build log. Using DRAM instead.\n" #endif /* USE_PMEM */ /* * Limited by the maximum length of the private data * for rdma_connect() in case of RDMA_PS_TCP (56 bytes). */ #define DESCRIPTORS_MAX_SIZE 24 struct common_data { uint16_t data_offset; /* user data offset */ uint8_t mr_desc_size; /* size of mr_desc in descriptors[] */ uint8_t pcfg_desc_size; /* size of pcfg_desc in descriptors[] */ /* buffer containing mr_desc and pcfg_desc */ char descriptors[DESCRIPTORS_MAX_SIZE]; }; #define KILOBYTE 1024 #define TIMEOUT_15S (15000) /* [msec] == 15s */ void *malloc_aligned(size_t size); int common_peer_via_address(const char *addr, enum rpma_util_ibv_context_type type, struct rpma_peer **peer_ptr); #define client_peer_via_address(addr, peer_ptr) \ common_peer_via_address(addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, \ peer_ptr) #define server_peer_via_address(addr, peer_ptr) \ common_peer_via_address(addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, \ peer_ptr) int client_connect(struct rpma_peer *peer, const char *addr, const char *port, struct rpma_conn_cfg *cfg, struct rpma_conn_private_data *pdata, struct rpma_conn **conn_ptr); int server_accept_connection(struct rpma_ep *ep, struct rpma_conn_cfg *cfg, struct rpma_conn_private_data *pdata, struct rpma_conn **conn_ptr); int common_wait_for_conn_close_and_disconnect(struct rpma_conn **conn_ptr); int common_disconnect_and_wait_for_conn_close(struct rpma_conn **conn_ptr); int wait_and_validate_completion(struct rpma_conn *conn, enum ibv_wc_opcode expected_opcode, struct ibv_wc *wc); #endif /* COMMON_CONN_H */ rpma-1.3.0/examples/common/common-epoll.c000066400000000000000000000025351443364775400203530ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * common-epoll.c -- common epoll functions for examples */ #include #include #include #include #include #include #include "common-epoll.h" /* * fd_set_nonblock -- set O_NONBLOCK flag for provided file descriptor */ int fd_set_nonblock(int fd) { int ret = fcntl(fd, F_GETFL); if (ret < 0) { perror("fcntl"); return errno; } int flags = ret | O_NONBLOCK; ret = fcntl(fd, F_SETFL, flags); if (ret < 0) { perror("fcntl"); return errno; } return 0; } /* * epoll_add -- add a custom event to the epoll */ int epoll_add(int epoll, int fd, void *arg, event_func func, struct custom_event *ce) { /* set O_NONBLOCK flag for the provided fd */ int ret = fd_set_nonblock(fd); if (ret) return -1; /* prepare a custom event structure */ ce->fd = fd; ce->arg = arg; ce->func = func; /* prepare an epoll event */ struct epoll_event event; event.events = EPOLLIN; event.data.ptr = ce; /* add the event to epoll */ if (epoll_ctl(epoll, EPOLL_CTL_ADD, fd, &event)) return errno; return 0; } /* * epoll_delete -- remove the custom event from the epoll */ void epoll_delete(int epoll, struct custom_event *ce) { (void) epoll_ctl(epoll, EPOLL_CTL_DEL, ce->fd, NULL); ce->fd = -1; } rpma-1.3.0/examples/common/common-epoll.h000066400000000000000000000011001443364775400203430ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * common-epoll.h -- common epoll functions declarations for examples */ #ifndef EXAMPLES_COMMON_EPOLL #define EXAMPLES_COMMON_EPOLL int fd_set_nonblock(int fd); struct custom_event; typedef void (*event_func)(struct custom_event *ce); struct custom_event { int fd; void *arg; event_func func; }; int epoll_add(int epoll, int fd, void *arg, event_func func, struct custom_event *ce); void epoll_delete(int epoll, struct custom_event *ce); #endif /* EXAMPLES_COMMON_EPOLL */ rpma-1.3.0/examples/common/common-hello.c000066400000000000000000000017351443364775400203440ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * common-hello.c -- a hello message structure declarations */ #include #include #include "common-hello.h" static const char *hello_str[] = { [en] = "Hello world!", [es] = "¡Hola Mundo!" }; #define LANG_NUM (sizeof(hello_str) / sizeof(hello_str[0])) void write_hello_str(struct hello_t *hello, enum lang_t lang) { hello->lang = lang; strncpy(hello->str, hello_str[hello->lang], HELLO_STR_SIZE - 1); hello->str[HELLO_STR_SIZE - 1] = '\0'; } void translate(struct hello_t *hello) { printf("translating...\n"); enum lang_t lang = (enum lang_t)((hello->lang + 1) % LANG_NUM); write_hello_str(hello, lang); } ssize_t init_hello(char *pmem_data, size_t size) { if (size < HELLO_T_SIZE) { (void) fprintf(stderr, "PMem has too small size (%zu < %zu)\n", size, HELLO_T_SIZE); return -1; } write_hello_str((struct hello_t *)pmem_data, en); return HELLO_T_SIZE; } rpma-1.3.0/examples/common/common-hello.h000066400000000000000000000011321443364775400203400ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * common-hello.h -- a hello message structure declarations */ #ifndef COMMON_HELLO_H #define COMMON_HELLO_H #define HELLO_STR_SIZE 1024 enum lang_t {en, es}; struct hello_t { enum lang_t lang; char str[HELLO_STR_SIZE]; }; #define HELLO_STR_OFFSET offsetof(struct hello_t, str) #define HELLO_T_SIZE (sizeof(struct hello_t)) void write_hello_str(struct hello_t *hello, enum lang_t lang); void translate(struct hello_t *hello); ssize_t init_hello(char *pmem_data, size_t size); #endif /* COMMON_HELLO_H */ rpma-1.3.0/examples/common/common-map_file_with_signature_check.c000066400000000000000000000036651443364775400252720ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * common-map_file_with_signature_check.c -- a function to check pmem signature */ #include #include "common-conn.h" #include "common-map_file_with_signature_check.h" #include "common-pmem_map_file.h" int common_pmem_map_file_with_signature_check(char *path, size_t size, struct common_mem *mem, common_init_func_t init_pmem) { if (path == NULL || size == 0 || mem == NULL) return -1; /* * The beginning of the used persistent memory starts at mem->offset. * At the beginning of the used persistent memory, the signature is stored * which marks its content as valid. So the total space is assumed to be at least: * mem->offset + SIGNATURE_LEN + the size expected by the user. */ mem->data_offset = mem->offset + SIGNATURE_LEN; size += mem->data_offset; if (common_pmem_map_file(path, size, mem)) return -1; if (mem->mr_size < size) { (void) fprintf(stderr, "%s has too small size (%zu < %zu)\n", path, mem->mr_size, size); return -1; } /* beginning of the used persistent memory */ char *pmem = mem->mr_ptr + mem->offset; /* * If the signature is not in place the persistent content has * to be initialized and persisted. */ if (strncmp(pmem, SIGNATURE_STR, SIGNATURE_LEN) != 0) { char *pmem_data = pmem + mem->data_offset; if (init_pmem) { /* write the initial hello string and persist it */ ssize_t size_to_persist = (*init_pmem)(pmem_data, size - SIGNATURE_LEN); if (size_to_persist < 0) { (void) fprintf(stderr, "Initialization of PMem failed.\n"); return -1; } mem->persist(pmem_data, (size_t)size_to_persist); } else { /* write the initial empty string and persist it */ pmem_data[0] = '\0'; mem->persist(pmem_data, 1); } /* write the signature to mark the content as valid */ memcpy(pmem, SIGNATURE_STR, SIGNATURE_LEN); mem->persist(pmem, SIGNATURE_LEN); } return 0; } rpma-1.3.0/examples/common/common-map_file_with_signature_check.h000066400000000000000000000011061443364775400252630ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * common-map_file_with_signature_check.h - a header file for * common-map_file_with_signature_check.c */ #ifndef COMMON_MAP_FILE_WITH_SIGNATURE_CHECK_H #define COMMON_MAP_FILE_WITH_SIGNATURE_CHECK_H #include "common-pmem_map_file.h" typedef ssize_t (*common_init_func_t)(char *pmem_data, size_t size); int common_pmem_map_file_with_signature_check(char *path, size_t size, struct common_mem *mem, common_init_func_t init_pmem); #endif /* COMMON_MAP_FILE_WITH_SIGNATURE_CHECK_H */ rpma-1.3.0/examples/common/common-messages-ping-pong.c000066400000000000000000000033061443364775400227400ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * common-messages-ping-pong.c -- a common definitions for messages ping-pong */ #include #include #include "common-messages-ping-pong.h" int validate_wc(struct ibv_wc *wc, uint64_t *recv, int *send_cmpl, int *recv_cmpl) { if (wc->status != IBV_WC_SUCCESS) { char *func = (wc->opcode == IBV_WC_SEND)? "send" : "recv"; (void) fprintf(stderr, "rpma_%s() failed: %s\n", func, ibv_wc_status_str(wc->status)); return -1; } if (wc->opcode == IBV_WC_SEND) { *send_cmpl = 1; } else if (wc->opcode == IBV_WC_RECV) { if (wc->wr_id != (uintptr_t)recv || wc->byte_len != MSG_SIZE) { (void) fprintf(stderr, "received completion is not as expected (0x%" PRIXPTR " != 0x%" PRIXPTR " [wc.wr_id] || %" PRIu32 " != %ld [wc.byte_len])\n", wc->wr_id, (uintptr_t)recv, wc->byte_len, MSG_SIZE); return -1; } *recv_cmpl = 1; } return 0; } int wait_and_process_completions(struct rpma_cq *cq, uint64_t *recv, int *send_cmpl, int *recv_cmpl) { struct ibv_wc wc[MAX_N_WC]; int num_got; int ret; do { /* wait for the completion to be ready */ ret = rpma_cq_wait(cq); if (ret) return ret; /* reset num_got to 0 */ num_got = 0; /* get two next completions at most (1 of send + 1 of recv) */ ret = rpma_cq_get_wc(cq, MAX_N_WC, wc, &num_got); if (ret) /* lack of completion is not an error */ if (ret != RPMA_E_NO_COMPLETION) return ret; /* validate received completions */ for (int i = 0; i < num_got; i++) { ret = validate_wc(&wc[i], recv, send_cmpl, recv_cmpl); if (ret) return ret; } } while (*send_cmpl == 0 || *recv_cmpl == 0); return 0; } rpma-1.3.0/examples/common/common-messages-ping-pong.h000066400000000000000000000013401443364775400227410ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * common-messages-ping-pong.h -- a common declarations for messages ping-pong */ #ifndef COMMON_MSG_PING_PONG #define COMMON_MSG_PING_PONG #define MSG_SIZE sizeof(uint64_t) /* Both buffers are allocated one after another. */ #define RECV_OFFSET 0 #define SEND_OFFSET MSG_SIZE #define I_M_DONE (uint64_t)UINT64_MAX /* the maximum number of completions expected (1 of send + 1 of receive) */ #define MAX_N_WC 2 int wait_and_process_completions(struct rpma_cq *cq, uint64_t *recv, int *send_cmpl, int *recv_cmpl); int validate_wc(struct ibv_wc *wc, uint64_t *recv, int *send_cmpl, int *recv_cmpl); #endif /* COMMON_MSG_PING_PONG */ rpma-1.3.0/examples/common/common-pmem.h000066400000000000000000000006651443364775400202050ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * common-pmem.h -- common PMem definitions */ #ifndef COMMON_PMEM_H #define COMMON_PMEM_H #ifdef USE_LIBPMEM2 #include #elif defined USE_LIBPMEM #include #endif #if defined USE_LIBPMEM2 || defined USE_LIBPMEM #define USE_PMEM 1 #else #undef USE_PMEM #endif /* USE_LIBPMEM2 || USE_LIBPMEM */ #endif /* COMMON_PMEM_H */ rpma-1.3.0/examples/common/common-pmem2_map_file.c000066400000000000000000000041031443364775400221050ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * common-pmem2_map_file.c -- a function to map PMem using libpmem2 */ #include #include #include #include "common-pmem_map_file.h" int common_pmem_map_file(char *path, size_t min_size, struct common_mem *mem) { int fd = 0; struct pmem2_config *cfg = NULL; struct pmem2_map *map = NULL; struct pmem2_source *src = NULL; if ((fd = open(path, O_RDWR)) < 0) { (void) fprintf(stderr, "cannot open file\n"); return -1; } if (pmem2_source_from_fd(&src, fd) != 0) { (void) fprintf(stderr, "pmem2_source_from_fd() failed\n"); goto err_close; } if (pmem2_config_new(&cfg) != 0) { (void) fprintf(stderr, "pmem2_config_new() failed\n"); goto err_source_delete; } if (pmem2_config_set_required_store_granularity(cfg, PMEM2_GRANULARITY_CACHE_LINE) != 0) { (void) fprintf(stderr, "pmem2_config_set_required_store_granularity() failed: %s\n", pmem2_errormsg()); goto err_config_delete; } if (pmem2_map_new(&map, cfg, src) != 0) { (void) fprintf(stderr, "pmem2_map_new(%s) failed: %s\n", path, pmem2_errormsg()); goto err_config_delete; } if (pmem2_map_get_size(map) < min_size) { (void) fprintf(stderr, "mapped size for %s is too small (actual:%zu < expected:%zu): %s\n", path, pmem2_map_get_size(map), min_size, pmem2_errormsg()); (void) pmem2_map_delete(&map); goto err_config_delete; } mem->map = map; mem->mr_size = pmem2_map_get_size(map); mem->mr_ptr = pmem2_map_get_address(map); mem->is_pmem = 1; /* * Get rid of no longer needed config, source * and close the file */ pmem2_config_delete(&cfg); pmem2_source_delete(&src); close(fd); /* Get libpmem2 persist function from pmem2_map */ mem->persist = pmem2_get_persist_fn(map); return 0; err_config_delete: pmem2_config_delete(&cfg); err_source_delete: pmem2_source_delete(&src); err_close: close(fd); return -1; } void common_pmem_unmap_file(struct common_mem *mem) { if (mem->map) { (void) pmem2_map_delete(&mem->map); mem->mr_ptr = NULL; mem->is_pmem = 0; } } rpma-1.3.0/examples/common/common-pmem_map_file.c000066400000000000000000000022431443364775400220260ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * common-pmem_map_file.c -- a function to map PMem using libpmem */ #include #include "common-pmem_map_file.h" int common_pmem_map_file(char *path, size_t min_size, struct common_mem *mem) { mem->is_pmem = 0; /* map the file */ mem->mr_ptr = pmem_map_file(path, 0 /* len */, 0 /* flags */, 0 /* mode */, &mem->mr_size, &mem->is_pmem); if (mem->mr_ptr == NULL) { (void) fprintf(stderr, "pmem_map_file() for %s failed\n", path); return -1; } if (mem->mr_size < min_size) { (void) fprintf(stderr, "mapped size for %s is too small (actual:%zu < expected:%zu)\n", path, mem->mr_size, min_size); (void) pmem_unmap(mem->mr_ptr, mem->mr_size); return -1; } /* pmem is expected */ if (!mem->is_pmem) { (void) fprintf(stderr, "%s is not an actual PMEM\n", path); (void) pmem_unmap(mem->mr_ptr, mem->mr_size); return -1; } if (mem->is_pmem) mem->persist = pmem_persist; return 0; } void common_pmem_unmap_file(struct common_mem *mem) { if (mem->is_pmem) { (void) pmem_unmap(mem->mr_ptr, mem->mr_size); mem->mr_ptr = NULL; mem->is_pmem = 0; } } rpma-1.3.0/examples/common/common-pmem_map_file.h000066400000000000000000000016331443364775400220350ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * common-pmem_map_file.h -- a header file for both common-pmem_map_file.c * and common-pmem2_map_file.c */ #ifndef COMMON_PMEM_MAP_FILE_H #define COMMON_PMEM_MAP_FILE_H #include "common-pmem.h" typedef void (*persist_fn)(const void *ptr, size_t size); struct common_mem { /* memory buffer */ char *mr_ptr; /* size of the mapped persistent memory */ size_t mr_size; /* offset of the beginning of the used persistent memory */ size_t offset; /* offset of user data after the pmem signature */ size_t data_offset; int is_pmem; persist_fn persist; #ifdef USE_LIBPMEM2 /* libpmem2 structure used for mapping PMem */ struct pmem2_map *map; #endif }; int common_pmem_map_file(char *path, size_t min_size, struct common_mem *mem); void common_pmem_unmap_file(struct common_mem *mem); #endif /* COMMON_PMEM_MAP_FILE_H */ rpma-1.3.0/examples/common/common-utils.c000066400000000000000000000007351443364775400204000ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * common-utils.c -- common utils for examples */ #include #include #include #include #include #include "common-utils.h" uint64_t strtoul_noerror(const char *in) { uint64_t out = strtoul(in, NULL, 10); if (out == ULONG_MAX && errno == ERANGE) { (void) fprintf(stderr, "strtoul(%s) overflowed\n", in); exit(-1); } return out; } rpma-1.3.0/examples/common/common-utils.h000066400000000000000000000004221443364775400203760ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * common-utils.h -- declarations of common utils for examples */ #ifndef COMMON_UTILS_H #define COMMON_UTILS_H uint64_t strtoul_noerror(const char *in); #endif /* COMMON_UTILS_H */ rpma-1.3.0/examples/common/gpspm/000077500000000000000000000000001443364775400167275ustar00rootroot00000000000000rpma-1.3.0/examples/common/gpspm/GPSPM_flush.pb-c.c000066400000000000000000000144701443364775400220100ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Generated by the protocol buffer compiler. DO NOT EDIT! */ /* Generated from: GPSPM_flush.proto */ /* Do not generate deprecated warnings for self */ #ifndef PROTOBUF_C__NO_DEPRECATED #define PROTOBUF_C__NO_DEPRECATED #endif #include "GPSPM_flush.pb-c.h" void gpspm_flush_request__init (GPSPMFlushRequest *message) { static const GPSPMFlushRequest init_value = GPSPM_FLUSH_REQUEST__INIT; *message = init_value; } size_t gpspm_flush_request__get_packed_size (const GPSPMFlushRequest *message) { assert(message->base.descriptor == &gpspm_flush_request__descriptor); return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); } size_t gpspm_flush_request__pack (const GPSPMFlushRequest *message, uint8_t *out) { assert(message->base.descriptor == &gpspm_flush_request__descriptor); return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); } size_t gpspm_flush_request__pack_to_buffer (const GPSPMFlushRequest *message, ProtobufCBuffer *buffer) { assert(message->base.descriptor == &gpspm_flush_request__descriptor); return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); } GPSPMFlushRequest * gpspm_flush_request__unpack (ProtobufCAllocator *allocator, size_t len, const uint8_t *data) { return (GPSPMFlushRequest *) protobuf_c_message_unpack (&gpspm_flush_request__descriptor, allocator, len, data); } void gpspm_flush_request__free_unpacked (GPSPMFlushRequest *message, ProtobufCAllocator *allocator) { if(!message) return; assert(message->base.descriptor == &gpspm_flush_request__descriptor); protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); } void gpspm_flush_response__init (GPSPMFlushResponse *message) { static const GPSPMFlushResponse init_value = GPSPM_FLUSH_RESPONSE__INIT; *message = init_value; } size_t gpspm_flush_response__get_packed_size (const GPSPMFlushResponse *message) { assert(message->base.descriptor == &gpspm_flush_response__descriptor); return protobuf_c_message_get_packed_size ((const ProtobufCMessage*)(message)); } size_t gpspm_flush_response__pack (const GPSPMFlushResponse *message, uint8_t *out) { assert(message->base.descriptor == &gpspm_flush_response__descriptor); return protobuf_c_message_pack ((const ProtobufCMessage*)message, out); } size_t gpspm_flush_response__pack_to_buffer (const GPSPMFlushResponse *message, ProtobufCBuffer *buffer) { assert(message->base.descriptor == &gpspm_flush_response__descriptor); return protobuf_c_message_pack_to_buffer ((const ProtobufCMessage*)message, buffer); } GPSPMFlushResponse * gpspm_flush_response__unpack (ProtobufCAllocator *allocator, size_t len, const uint8_t *data) { return (GPSPMFlushResponse *) protobuf_c_message_unpack (&gpspm_flush_response__descriptor, allocator, len, data); } void gpspm_flush_response__free_unpacked (GPSPMFlushResponse *message, ProtobufCAllocator *allocator) { if(!message) return; assert(message->base.descriptor == &gpspm_flush_response__descriptor); protobuf_c_message_free_unpacked ((ProtobufCMessage*)message, allocator); } static const ProtobufCFieldDescriptor gpspm_flush_request__field_descriptors[3] = { { "offset", 1, PROTOBUF_C_LABEL_REQUIRED, PROTOBUF_C_TYPE_FIXED64, 0, /* quantifier_offset */ offsetof(GPSPMFlushRequest, offset), NULL, NULL, 0, /* flags */ 0,NULL,NULL /* reserved1,reserved2, etc */ }, { "length", 2, PROTOBUF_C_LABEL_REQUIRED, PROTOBUF_C_TYPE_FIXED64, 0, /* quantifier_offset */ offsetof(GPSPMFlushRequest, length), NULL, NULL, 0, /* flags */ 0,NULL,NULL /* reserved1,reserved2, etc */ }, { "op_context", 3, PROTOBUF_C_LABEL_REQUIRED, PROTOBUF_C_TYPE_FIXED64, 0, /* quantifier_offset */ offsetof(GPSPMFlushRequest, op_context), NULL, NULL, 0, /* flags */ 0,NULL,NULL /* reserved1,reserved2, etc */ }, }; static const unsigned gpspm_flush_request__field_indices_by_name[] = { 1, /* field[1] = length */ 0, /* field[0] = offset */ 2, /* field[2] = op_context */ }; static const ProtobufCIntRange gpspm_flush_request__number_ranges[1 + 1] = { { 1, 0 }, { 0, 3 } }; const ProtobufCMessageDescriptor gpspm_flush_request__descriptor = { PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, "GPSPM_flush_request", "GPSPMFlushRequest", "GPSPMFlushRequest", "", sizeof(GPSPMFlushRequest), 3, gpspm_flush_request__field_descriptors, gpspm_flush_request__field_indices_by_name, 1, gpspm_flush_request__number_ranges, (ProtobufCMessageInit) gpspm_flush_request__init, NULL,NULL,NULL /* reserved[123] */ }; static const ProtobufCFieldDescriptor gpspm_flush_response__field_descriptors[1] = { { "op_context", 1, PROTOBUF_C_LABEL_REQUIRED, PROTOBUF_C_TYPE_FIXED64, 0, /* quantifier_offset */ offsetof(GPSPMFlushResponse, op_context), NULL, NULL, 0, /* flags */ 0,NULL,NULL /* reserved1,reserved2, etc */ }, }; static const unsigned gpspm_flush_response__field_indices_by_name[] = { 0, /* field[0] = op_context */ }; static const ProtobufCIntRange gpspm_flush_response__number_ranges[1 + 1] = { { 1, 0 }, { 0, 1 } }; const ProtobufCMessageDescriptor gpspm_flush_response__descriptor = { PROTOBUF_C__MESSAGE_DESCRIPTOR_MAGIC, "GPSPM_flush_response", "GPSPMFlushResponse", "GPSPMFlushResponse", "", sizeof(GPSPMFlushResponse), 1, gpspm_flush_response__field_descriptors, gpspm_flush_response__field_indices_by_name, 1, gpspm_flush_response__number_ranges, (ProtobufCMessageInit) gpspm_flush_response__init, NULL,NULL,NULL /* reserved[123] */ }; rpma-1.3.0/examples/common/gpspm/GPSPM_flush.pb-c.h000066400000000000000000000071751443364775400220210ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* Generated by the protocol buffer compiler. DO NOT EDIT! */ /* Generated from: GPSPM_flush.proto */ #ifndef PROTOBUF_C_GPSPM_5fflush_2eproto__INCLUDED #define PROTOBUF_C_GPSPM_5fflush_2eproto__INCLUDED #include PROTOBUF_C__BEGIN_DECLS #if PROTOBUF_C_VERSION_NUMBER < 1000000 # error This file was generated by a newer version of protoc-c which is incompatible with your libprotobuf-c headers. Please update your headers. #elif 1003003 < PROTOBUF_C_MIN_COMPILER_VERSION # error This file was generated by an older version of protoc-c which is incompatible with your libprotobuf-c headers. Please regenerate this file with a newer version of protoc-c. #endif typedef struct _GPSPMFlushRequest GPSPMFlushRequest; typedef struct _GPSPMFlushResponse GPSPMFlushResponse; /* --- enums --- */ /* --- messages --- */ struct _GPSPMFlushRequest { ProtobufCMessage base; uint64_t offset; uint64_t length; uint64_t op_context; }; #define GPSPM_FLUSH_REQUEST__INIT \ { PROTOBUF_C_MESSAGE_INIT (&gpspm_flush_request__descriptor) \ , 0, 0, 0 } struct _GPSPMFlushResponse { ProtobufCMessage base; uint64_t op_context; }; #define GPSPM_FLUSH_RESPONSE__INIT \ { PROTOBUF_C_MESSAGE_INIT (&gpspm_flush_response__descriptor) \ , 0 } /* GPSPMFlushRequest methods */ void gpspm_flush_request__init (GPSPMFlushRequest *message); size_t gpspm_flush_request__get_packed_size (const GPSPMFlushRequest *message); size_t gpspm_flush_request__pack (const GPSPMFlushRequest *message, uint8_t *out); size_t gpspm_flush_request__pack_to_buffer (const GPSPMFlushRequest *message, ProtobufCBuffer *buffer); GPSPMFlushRequest * gpspm_flush_request__unpack (ProtobufCAllocator *allocator, size_t len, const uint8_t *data); void gpspm_flush_request__free_unpacked (GPSPMFlushRequest *message, ProtobufCAllocator *allocator); /* GPSPMFlushResponse methods */ void gpspm_flush_response__init (GPSPMFlushResponse *message); size_t gpspm_flush_response__get_packed_size (const GPSPMFlushResponse *message); size_t gpspm_flush_response__pack (const GPSPMFlushResponse *message, uint8_t *out); size_t gpspm_flush_response__pack_to_buffer (const GPSPMFlushResponse *message, ProtobufCBuffer *buffer); GPSPMFlushResponse * gpspm_flush_response__unpack (ProtobufCAllocator *allocator, size_t len, const uint8_t *data); void gpspm_flush_response__free_unpacked (GPSPMFlushResponse *message, ProtobufCAllocator *allocator); /* --- per-message closures --- */ typedef void (*GPSPMFlushRequest_Closure) (const GPSPMFlushRequest *message, void *closure_data); typedef void (*GPSPMFlushResponse_Closure) (const GPSPMFlushResponse *message, void *closure_data); /* --- services --- */ /* --- descriptors --- */ extern const ProtobufCMessageDescriptor gpspm_flush_request__descriptor; extern const ProtobufCMessageDescriptor gpspm_flush_response__descriptor; PROTOBUF_C__END_DECLS #endif /* PROTOBUF_C_GPSPM_5fflush_2eproto__INCLUDED */ rpma-1.3.0/examples/common/gpspm/GPSPM_flush.proto000066400000000000000000000006761443364775400221140ustar00rootroot00000000000000syntax = "proto2"; message GPSPM_flush_request { /* an offset of a region to be flushed within its memory registration */ required fixed64 offset = 1; /* a length of a region to be flushed */ required fixed64 length = 2; /* a user-defined operation context */ required fixed64 op_context = 3; } message GPSPM_flush_response { /* the operation context of a completed request */ required fixed64 op_context = 1; } rpma-1.3.0/examples/common/gpspm/flush-to-persistent-GPSPM.h000066400000000000000000000006511443364775400237250ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * flush-to-persistent-GSPSPM.h -- a common declarations for the 09 example */ #ifndef FLUSH_TO_PERSISTENT_GPSPM #define FLUSH_TO_PERSISTENT_GPSPM #define MSG_SIZE_MAX 512 #define SEND_OFFSET 0 #define RECV_OFFSET (SEND_OFFSET + MSG_SIZE_MAX) #define RCQ_SIZE 1 #endif /* FLUSH_TO_PERSISTENT_GPSPM */ rpma-1.3.0/examples/log/000077500000000000000000000000001443364775400150725ustar00rootroot00000000000000rpma-1.3.0/examples/log/README.md000066400000000000000000000003071443364775400163510ustar00rootroot00000000000000Example of logging mechanism in librpma === This directory contains an example demonstrating different logging mechanisms in the librpma library. ## Usage ```bash [user@server]$ ./example-log ``` rpma-1.3.0/examples/log/log-example.c000066400000000000000000000033071443364775400174530ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * log-example.c -- an example of how to use and control behavior of the log. */ #include #include #include "librpma.h" extern void log_worker_is_doing_something(void); static void user_log_function(int level, const char *file_name, const int line_no, const char *func_name, const char *message_format, ...) { if (((NULL != file_name) && (NULL == func_name)) || (NULL == message_format)) { return; } if (fprintf(stderr, "Custom log handling: \n") < 0) return; if (NULL != file_name) { if (fprintf(stderr, "%s %4d %s:\n", file_name, line_no, func_name) < 0) { return; } } if (fprintf(stderr, "level: %d ", level) < 0) return; va_list args; va_start(args, message_format); if (vfprintf(stderr, message_format, args) < 0) { va_end(args); return; } va_end(args); if (NULL != file_name) fprintf(stderr, "\n"); } int main(int argc, char *argv[]) { /* * log messages to be produced to syslog as well as stderr */ printf("Let's write messages to stderr and syslog\n"); rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_DEBUG); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_DEBUG); log_worker_is_doing_something(); printf( "Use: \n$ sudo tail -n 60 /var/log/syslog | grep rpma\nto see messages in the syslog."); /* * log messages to be transferred only to custom user function */ rpma_log_set_function(user_log_function); printf( "Let's use custom log function to write messages to stderr\nNo message should be written to syslog\n"); log_worker_is_doing_something(); rpma_log_set_function(RPMA_LOG_USE_DEFAULT_FUNCTION); return 0; } rpma-1.3.0/examples/log/log-worker.c000066400000000000000000000006571443364775400173360ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * log-worker.c -- an example of how the internal API of the log module is used * by the RPMA library */ #include #include "log_internal.h" void log_worker_is_doing_something(void) { RPMA_LOG_NOTICE("Just a notice"); RPMA_LOG_WARNING("Important warning about value: %d", 720401); RPMA_LOG_ERROR("Error due to order %x", 102); } rpma-1.3.0/examples/run-all-examples.sh000077500000000000000000000362471443364775400200520ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2022, Fujitsu # # run-all-examples.sh - run all examples (optionally under valgrind or with fault injection) # # Usage: run-all-examples.sh [--valgrind|--integration-tests] # [--stop-on-failure] [] [IP_address] [port] # # Important: the given order of command line arguments is mandatory! # # Used environment variables: # - RPMA_EXAMPLES_PMEM_PATH # - RPMA_EXAMPLES_STOP_ON_FAILURE # # In order to run the examples on a PMem (a DAX device or a file on a file system DAX), # an absolute path (starting with '/') to this PMem has to be provided # either via the '' argument or via the 'RPMA_EXAMPLES_PMEM_PATH' environment variable. # If both of them are set, the command line argument '' will be used. # # If the '--stop-on-failure' argument is used or the 'RPMA_EXAMPLES_STOP_ON_FAILURE' # environment variable is set to ON, then the integration tests will stop on the first failure. # # The '--integration-tests' option starts integration tests documented # in the https://github.com/pmem/rpma/blob/main/DEVELOPMENT.md#running-integration-tests file. # # value used to get the maximum reachable value of fault injection for each example GET_FI_MAX=999999 # offset where the clients use a PMem from PMEM_CLIENT_OFFSET=1024 USAGE_STRING="\ Usage:\n\ $ run-all-examples.sh [--valgrind|--integration-tests] \ [--stop-on-failure] [] [IP_address] [port]\n\ \n\ Important: the given order of command line arguments is mandatory!\n\ \n\ In order to run the examples on a PMem (a DAX device or a file on a file system DAX), \ an absolute path (starting with '/') to this PMem has to be provided \ either via the '' argument or via the 'RPMA_EXAMPLES_PMEM_PATH' environment variable. \ If both of them are set, the command line argument '' will be used. \ \n\ If the '--stop-on-failure' argument is used or the 'RPMA_EXAMPLES_STOP_ON_FAILURE' \ environment variable is set to ON, then the integration tests will stop on the first failure.\n \n\ The '--integration-tests' option starts integration tests documented \ in the https://github.com/pmem/rpma/blob/main/DEVELOPMENT.md#running-integration-tests file.\n" BIN_DIR=$1 if [ "$BIN_DIR" == "" -o ! -d "$BIN_DIR" ]; then echo "Error: missing required argument" echo echo -e $USAGE_STRING exit 1 fi MODE="none" if [ "$2" == "--integration-tests" ]; then MODE="integration-tests" shift elif [ "$2" == "--valgrind" ]; then MODE="valgrind" shift fi STOP_ON_FAILURE=0 if [ "$2" == "--stop-on-failure" -o "$RPMA_EXAMPLES_STOP_ON_FAILURE" == "ON" ]; then STOP_ON_FAILURE=1 [ "$2" == "--stop-on-failure" ] && shift fi PMEM_PATH="" if [[ $2 = /* ]]; then if [ -c "$2" -o -f "$2" ]; then PMEM_PATH=$2 echo "Notice: running examples on PMem: $PMEM_PATH" # PMEM_PATH overrides RPMA_EXAMPLES_PMEM_PATH RPMA_EXAMPLES_PMEM_PATH="" else echo "Error: the $2 path is not a file nor a character device" exit 1 fi shift fi if [ "$PMEM_PATH" == "" -a "$RPMA_EXAMPLES_PMEM_PATH" != "" ]; then _PATH=$RPMA_EXAMPLES_PMEM_PATH if [[ $_PATH = /* ]] && [ -c "$_PATH" -o -f "$_PATH" ]; then echo "Notice: running examples on PMem: $RPMA_EXAMPLES_PMEM_PATH (RPMA_EXAMPLES_PMEM_PATH)." PMEM_PATH=$RPMA_EXAMPLES_PMEM_PATH else echo "Notice: $RPMA_EXAMPLES_PMEM_PATH is not an absolute path of a file nor a character device" exit 1 fi elif [ "$PMEM_PATH" == "" ]; then echo "Notice: PMem path (RPMA_EXAMPLES_PMEM_PATH) is not set, examples will be run on DRAM." fi if [[ $2 =~ [0-9]*\.[0-9]*\.[0-9]*\.[0-9]* ]]; then IP_ADDRESS=$2 PORT=$3 else IP_ADDRES="" PORT=$2 fi [ "$PORT" == "" ] && PORT="7204" function print_out_log_file() { echo echo "*** file $1 (START) ***" cat $1 echo "*** file $1 (END) ***" echo } function get_max_fault_injection() { LOG_FILE=$1 FI_MAX=$(grep -e '\[#' $LOG_FILE | cut -d'#' -f2 | cut -d']' -f1 | sort -n | tail -n1) echo $FI_MAX } function error_out_if_no_max_fault_injection() { FI_MAX=$1 LOG_FILE=$2 if [ "$FI_MAX" == "" ]; then print_out_log_file $LOG_FILE echo echo "Error: no fault-injection markers found in the log,"\ "please check if librpma is built with the DEBUG_FAULT_INJECTION CMake variable set to ON." echo exit 1 fi if ! [[ $FI_MAX =~ ^[0-9]+$ ]]; then print_out_log_file $LOG_FILE echo "Error: the maximum value of fault injection is not a number" exit 1 fi } function run_command_of() { WHO=$1 shift if [ "$MODE" != "integration-tests" ]; then echo "[${WHO}]$ $*" eval $* elif [ "$LOG_OUTPUT" == "yes" ]; then if [ "$WHO" == "server" ]; then rm -f $S_LOG_FILE echo "[${WHO}]$ $S_FI $* > $S_LOG_FILE 2>&1" eval $S_FI $* > $S_LOG_FILE 2>&1 else rm -f $C_LOG_FILE echo "[${WHO}]$ $C_FI $* > $C_LOG_FILE 2>&1" eval $C_FI $* > $C_LOG_FILE 2>&1 fi else S_TIME="" C_TIME="" [ "$S_FI" == "" ] && S_TIME="timeout --preserve-status $TIMEOUT" # run the server with timeout [ "$C_FI" == "" ] && C_TIME="timeout --preserve-status $TIMEOUT" # run the client with timeout if [ "$WHO" == "server" ]; then echo "[${WHO}]$ $S_FI $S_TIME $*" eval $S_FI $S_TIME $* else echo "[${WHO}]$ $C_FI $C_TIME $*" eval $C_FI $C_TIME $* fi fi } function start_server() { echo "Starting the server ..." run_command_of server $* & } function start_client() { echo "Starting the client ..." run_command_of client $* RV=$? } function print_FI_if_failed() { if [ $SFAILED -eq 1 -o $CFAILED -eq 1 ]; then echo echo "==========================================" echo "Fault injection ERROR" [ $S_FI_VAL -gt 0 ] && \ echo "Server's fault injection value = $S_FI_VAL" [ $C_FI_VAL -gt 0 ] && \ echo "Client's fault injection value = $C_FI_VAL" echo "==========================================" echo [ $STOP_ON_FAILURE -eq 1 ] && exit 1 fi } function get_IP_of_RDMA_interface() { STATE_OK="state ACTIVE physical_state LINK_UP" NETDEV=$(rdma link show | grep -e "$STATE_OK" | head -n1 | cut -d' ' -f8) IP_ADDRESS=$(ip address show dev $NETDEV | grep -e inet | grep -v -e inet6 | cut -d' ' -f6 | cut -d/ -f1) echo $IP_ADDRESS } function get_PID_of_server() { IP_ADDR=$1 PORT=$2 ARGS="server $IP_ADDR $PORT" PID=$(ps aux | grep -e "$ARGS" | grep -v -e "grep -e $ARGS" | awk '{print $2}') echo $PID } function run_example() { DIR=$1 S_FI_VAL=$2 # server's fault injection value C_FI_VAL=$3 # client's fault injection value EXAMPLE=$(basename $DIR) LOG_OUTPUT="no" VLD_SCMD=$VLD_SCMD_ORIG VLD_CCMD=$VLD_CCMD_ORIG S_FI="" # server's fault injection string C_FI="" # client's fault injection string if [ "$MODE" == "integration-tests" ]; then if [ "$S_FI_VAL" == "" -o "$C_FI_VAL" == "" ]; then echo "Error: both S_FI_VAL and C_FI_VAL have to be set in the integration-tests mode." exit 1 fi [ $S_FI_VAL -ge $GET_FI_MAX -o $C_FI_VAL -ge $GET_FI_MAX ] && LOG_OUTPUT="yes" if [ $S_FI_VAL -gt 0 ]; then S_FI="RPMA_FAULT_INJECTION=$S_FI_VAL" VLD_CCMD="" # do not run the client under valgrind else S_FI="" fi if [ $C_FI_VAL -gt 0 ]; then C_FI="RPMA_FAULT_INJECTION=$C_FI_VAL" VLD_SCMD="" # do not run the server under valgrind else C_FI="" fi else S_FI_VAL=0 C_FI_VAL=0 fi SFAILED=0 CFAILED=0 echo "*** Running example: $EXAMPLE $VLD_MSG" # The default case is needed here, because in case of integration tests # all examples are run twice: once with the fault injection in the server # and once with the fault injection in the client. case $EXAMPLE in 08srq-simple-messages-ping-pong-with-srq) # timeout value for both the server and the client TIMEOUT=3s start_server $VLD_SCMD $DIR/server $IP_ADDRESS $PORT ;; 13-messages-ping-pong-with-srq) # timeout value for both the server and the client TIMEOUT=6s start_server $VLD_SCMD $DIR/server $IP_ADDRESS $PORT 3 ;; *) # timeout value for both the server and the client TIMEOUT=3s start_server $VLD_SCMD $DIR/server $IP_ADDRESS $PORT $PMEM_PATH ;; esac sleep 1 RV=0 case $EXAMPLE in 06-multiple-connections|06scch-multiple-connections) [ "$MODE" == "integration-tests" ] && SEEDS="8" || SEEDS="8 9 11 12" for SEED in $SEEDS; do start_client $VLD_CCMD $DIR/client $IP_ADDRESS $PORT $SEED [ $RV -ne 0 ] && break done ;; 07-atomic-write) [ "$MODE" == "integration-tests" ] && WORDS="1st_word" || WORDS="1st_word 2nd_word 3rd_word" start_client $VLD_CCMD $DIR/client $IP_ADDRESS $PORT $WORDS ;; 08-messages-ping-pong|08srq-simple-messages-ping-pong-with-srq) SEED=7 [ "$MODE" == "integration-tests" ] && ROUNDS=1 || ROUNDS=3 start_client $VLD_CCMD $DIR/client $IP_ADDRESS $PORT $SEED $ROUNDS ;; 10-send-with-imm) start_client $VLD_CCMD $DIR/client $IP_ADDRESS $PORT "1234" "1st_word" ;; 11-write-with-imm) start_client $VLD_CCMD $DIR/client $IP_ADDRESS $PORT "1234" ;; 12-receive-completion-queue|12scch-receive-completion-queue) START_VALUE=7 [ "$MODE" == "integration-tests" ] && ROUNDS=1 || ROUNDS=3 start_client $VLD_CCMD $DIR/client $IP_ADDRESS $PORT $START_VALUE $ROUNDS ;; 13-messages-ping-pong-with-srq) ROUNDS=3 [ "$MODE" == "integration-tests" ] && SEEDS="1" || SEEDS="1 5 10" for SEED in $SEEDS; do echo "Starting the client ..." run_command_of client $VLD_CCMD $DIR/client $IP_ADDRESS $PORT $SEED $ROUNDS & CLIENT_PIDS="$CLIENT_PIDS $!" done for CLIENT_PID in $CLIENT_PIDS; do wait $CLIENT_PID TMP_RV=$? [ $RV -eq 0 ] && RV=$TMP_RV done ;; *) if [ "$PMEM_PATH" != "" ]; then start_client $VLD_CCMD $DIR/client $IP_ADDRESS $PORT $PMEM_PATH $PMEM_CLIENT_OFFSET else start_client $VLD_CCMD $DIR/client $IP_ADDRESS $PORT fi ;; esac if [ "$MODE" != "integration-tests" -a $RV -ne 0 ]; then echo "Error: example $EXAMPLE FAILED!" N_FAILED=$(($N_FAILED + 1)) LIST_FAILED="${LIST_FAILED}${EXAMPLE}\n" elif [ "$VLD_CCMD" != "" ]; then cerrno=$(grep "ERROR SUMMARY:" ${VLD_CLOG_FILE} | grep -Eoh "[0-9]+ errors" | awk '{print $1}') if [ "$cerrno" == "" ]; then cat ${VLD_CLOG_FILE} echo "Error: missing ERROR SUMMARY" exit 1 fi if [ $cerrno -gt 0 ]; then echo "Error: example $EXAMPLE client $VLD_MSG FAILED!" CFAILED=1 N_CFAILED=$(($N_CFAILED + 1)) LIST_CFAILED="${LIST_CFAILED}${EXAMPLE}-client\n" mv ${VLD_CLOG_FILE} ${BIN_DIR}/$EXAMPLE-valgrind-client.log print_out_log_file ${BIN_DIR}/$EXAMPLE-valgrind-client.log fi fi # make sure the server's process is finished if [ "$MODE" != "integration-tests" -o $S_FI_VAL -gt 0 ]; then PID=$(get_PID_of_server $IP_ADDRESS $PORT) if [ "$PID" != "" ]; then echo "Notice: server is still running, waiting 1 sec ..." sleep 1 fi fi PID=$(get_PID_of_server $IP_ADDRESS $PORT) if [ "$PID" != "" ]; then echo "Notice: server is still running, killing it ..." if [ "$MODE" != "integration-tests" -o $S_FI_VAL -gt 0 ]; then kill $PID sleep 1 fi kill -9 $PID 2>/dev/null elif [ "$VLD_SCMD" != "" ]; then serrno=$(grep "ERROR SUMMARY:" ${VLD_SLOG_FILE} | grep -Eoh "[0-9]+ errors" | awk '{print $1}') if [ "$serrno" == "" ]; then cat ${VLD_SLOG_FILE} echo "Error: missing ERROR SUMMARY" exit 1 fi if [ $serrno -gt 0 ]; then echo "Error: example $EXAMPLE server $VLD_MSG FAILED!" SFAILED=1 N_SFAILED=$(($N_SFAILED + 1)) LIST_SFAILED="${LIST_SFAILED}${EXAMPLE}-server\n" mv ${VLD_SLOG_FILE} ${BIN_DIR}/$EXAMPLE-valgrind-server.log print_out_log_file ${BIN_DIR}/$EXAMPLE-valgrind-server.log fi fi S_FI_MAX=0 C_FI_MAX=0 if [ "$LOG_OUTPUT" == "yes" ]; then if [ "$S_FI" != "" ]; then S_FI_MAX=$(get_max_fault_injection $S_LOG_FILE) error_out_if_no_max_fault_injection "$S_FI_MAX" "$S_LOG_FILE" echo "Detected S_FI_MAX = \"$S_FI_MAX\"" elif [ "$C_FI" != "" ]; then C_FI_MAX=$(get_max_fault_injection $C_LOG_FILE) error_out_if_no_max_fault_injection "$C_FI_MAX" "$C_LOG_FILE" echo "Detected C_FI_MAX = \"$C_FI_MAX\"" fi fi echo } ### SCRIPT STARTS HERE ### N_FAILED=0 LIST_FAILED="" N_SFAILED=0 LIST_SFAILED="" N_CFAILED=0 LIST_CFAILED="" S_LOG_FILE="nohup_server.out" C_LOG_FILE="nohup_client.out" if [ "$IP_ADDRESS" == "" -a "$RPMA_TESTING_IP" != "" ]; then echo "Notice: no IP address given. Using RPMA_TESTING_IP=$RPMA_TESTING_IP." IP_ADDRESS=$RPMA_TESTING_IP fi [ "$IP_ADDRESS" == "" ] && IP_ADDRESS=$(get_IP_of_RDMA_interface) if [ "$IP_ADDRESS" == "" ]; then echo "Error: not found any RDMA-capable network interface" exit 1 fi echo "Notice: running examples for IP address $IP_ADDRESS and port $PORT" echo JOBS=$(ps aux | grep -e "server $IP_ADDRESS $PORT" -e "client $IP_ADDRESS $PORT" | grep -v "grep -e") if [ "$JOBS" != "" ]; then echo "Wait for the following processes to finish or kill them:" echo "$JOBS" echo "Error: cannot run examples, because some of them are still running" exit 1 fi if [ "$MODE" == "valgrind" -o "$MODE" == "integration-tests" ]; then if ! which valgrind > /dev/null; then if [ "$MODE" == "valgrind" ]; then echo "Error: valgrind not found - the examples cannot be run under valgrind." exit 1 else # "$MODE" == "integration-tests" echo "Error: valgrind not found - the integration tests cannot be run." exit 1 fi fi VLD_CMD="valgrind --leak-check=full" VLD_SUPP_PATH=$(dirname $0)/../tests/ VLD_SUPP="--suppressions=${VLD_SUPP_PATH}/memcheck-libibverbs-librdmacm.supp" VLD_SUPP="${VLD_SUPP} --suppressions=${VLD_SUPP_PATH}/memcheck-libnl.supp" VLD_SUPP="${VLD_SUPP} --gen-suppressions=all" # prepare the server command VLD_SLOG_FILE="${BIN_DIR}/valgrind-server.log" VLD_SLOG="--log-file=${VLD_SLOG_FILE}" VLD_SCMD="${VLD_CMD} ${VLD_SUPP} ${VLD_SLOG}" # prepare the client command VLD_CLOG_FILE="${BIN_DIR}/valgrind-client.log" VLD_CLOG="--log-file=${VLD_CLOG_FILE}" VLD_CCMD="${VLD_CMD} ${VLD_SUPP} ${VLD_CLOG}" VLD_MSG="(under Valgrind)" # save the original values VLD_SCMD_ORIG=$VLD_SCMD VLD_CCMD_ORIG=$VLD_CCMD echo -n "Notice: running examples with Valgrind is tuned for debug build of librpma " echo -n "on Ubuntu 22.04 (see the CircleCI build). It may fail for any other OS, " echo "OS version, rdma-core version and for the release build." echo fi EXAMPLES=$(find $BIN_DIR -name server | sort) if [ "$MODE" != "integration-tests" ]; then for srv in $EXAMPLES; do DIR=$(dirname $srv) run_example $DIR done else # run the CLIENT with fault-injection for srv in $EXAMPLES; do DIR=$(dirname $srv) # get the maximum reachable value of fault-injection run_example $DIR 0 $GET_FI_MAX for fault_inject in $(seq 1 $(($C_FI_MAX + 1))); do run_example $DIR 0 $fault_inject print_FI_if_failed done done # run the SERVER with fault-injection for srv in $EXAMPLES; do DIR=$(dirname $srv) # get the maximum reachable value of fault-injection run_example $DIR $GET_FI_MAX 0 for fault_inject in $(seq 1 $(($S_FI_MAX + 1))); do run_example $DIR $fault_inject 0 print_FI_if_failed done done fi if [ $N_FAILED -gt 0 ]; then echo "$N_FAILED example(s) failed:" echo -e "$LIST_FAILED" err=1 fi if [ $N_SFAILED -gt 0 ]; then echo "$N_SFAILED example(s) server $VLD_MSG failed:" echo -e "$LIST_SFAILED" err=1 fi if [ $N_CFAILED -gt 0 ]; then echo "$N_CFAILED example(s) client $VLD_MSG failed:" echo -e "$LIST_CFAILED" err=1 fi if [[ $err == 1 ]]; then exit 1 fi echo "All examples succeeded" rpma-1.3.0/src/000077500000000000000000000000001443364775400132625ustar00rootroot00000000000000rpma-1.3.0/src/CMakeLists.txt000066400000000000000000000030251443364775400160220ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021-2022, Fujitsu # add_cstyle(src ${CMAKE_CURRENT_SOURCE_DIR}/*.[ch] ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h) add_check_whitespace(src ${CMAKE_CURRENT_SOURCE_DIR}/*.[ch] ${CMAKE_CURRENT_SOURCE_DIR}/include/*.h) set(SOURCES conn.c conn_cfg.c conn_req.c cq.c debug.c ep.c flush.c info.c librpma.c log.c log_default.c mr.c peer.c peer_cfg.c private_data.c rpma_err.c utils.c srq.c srq_cfg.c) add_library(rpma SHARED ${SOURCES}) target_include_directories(rpma PRIVATE . include) target_link_libraries(rpma PRIVATE ${LIBIBVERBS_LIBRARIES} ${LIBRDMACM_LIBRARIES} -Wl,--version-script=${CMAKE_SOURCE_DIR}/src/librpma.map) set_target_properties(rpma PROPERTIES SOVERSION 0 PUBLIC_HEADER "include/librpma.h") target_compile_definitions(rpma PRIVATE SRCVERSION="${SRCVERSION}") if(DEBUG_LOG_TRACE) target_compile_definitions(rpma PRIVATE DEBUG_LOG_TRACE=1) endif() if(DEBUG_FAULT_INJECTION) target_compile_definitions(rpma PRIVATE DEBUG_FAULT_INJECTION=1) endif() if(VALGRIND_FOUND) target_include_directories(rpma PRIVATE src/valgrind) endif() if(IBV_ADVISE_MR_FLAGS_SUPPORTED) target_compile_definitions(rpma PRIVATE IBV_ADVISE_MR_FLAGS_SUPPORTED=1) endif() if(CMAKE_BUILD_TYPE STREQUAL "Debug") target_compile_definitions(rpma PRIVATE DEBUG=1) endif() install(TARGETS rpma PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) rpma-1.3.0/src/cmocka_alloc.h000066400000000000000000000012141443364775400160400ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ #ifndef COMMON_CMOCKA_ALLOC #define COMMON_CMOCKA_ALLOC #include extern void *_test_malloc(const size_t size, const char *file, const int line); extern void *_test_calloc(const size_t number_of_elements, const size_t size, const char *file, const int line); extern void _test_free(void *const ptr, const char *file, const int line); #define malloc(size) _test_malloc(size, __FILE__, __LINE__) #define calloc(num, size) _test_calloc(num, size, __FILE__, __LINE__) #define free(ptr) _test_free(ptr, __FILE__, __LINE__) #endif /* COMMON_CMOCKA_ALLOC */ rpma-1.3.0/src/common.h000066400000000000000000000010061443364775400147200ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * common.h -- librpma common internal definitions */ #ifndef LIBRPMA_COMMON_H #define LIBRPMA_COMMON_H #define STR_HELPER(x) #x #define STR(x) STR_HELPER(x) #define CLIP_TO_INT(size) ((size) > INT_MAX ? INT_MAX : (int)(size)) #ifdef __GNUC__ #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else #define likely(x) (x) #define unlikely(x) (x) #endif #endif /* LIBRPMA_COMMON_H */ rpma-1.3.0/src/conn.c000066400000000000000000000377061443364775400144000ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2021,2023 Fujitsu Limited */ /* * conn.c -- librpma connection-related implementations */ #include #include #include "common.h" #include "conn.h" #include "debug.h" #include "flush.h" #include "log_internal.h" #include "mr.h" #include "private_data.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif struct rpma_conn { struct rdma_cm_id *id; /* a CM ID of the connection */ struct rdma_event_channel *evch; /* event channel of the CM ID */ struct rpma_cq *cq; /* main CQ */ struct rpma_cq *rcq; /* receive CQ */ struct ibv_comp_channel *channel; /* shared completion channel */ struct rpma_conn_private_data data; /* private data of the CM ID */ struct rpma_flush *flush; /* flushing object */ bool direct_write_to_pmem; /* direct write to pmem is supported */ }; /* internal librpma API */ /* * rpma_conn_new -- migrate an obtained CM ID into newly created event channel. * If succeeded wrap provided entities into a newly created connection object. * * Note: rdma_migrate_id(3) will block if the previous event channel of the CM * ID has any outstanding (unacknowledged) events. */ int rpma_conn_new(struct rpma_peer *peer, struct rdma_cm_id *id, struct rpma_cq *cq, struct rpma_cq *rcq, struct ibv_comp_channel *channel, struct rpma_conn **conn_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); if (peer == NULL || id == NULL || cq == NULL || conn_ptr == NULL) return RPMA_E_INVAL; int ret = 0; struct rdma_event_channel *evch = rdma_create_event_channel(); if (!evch) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_create_event_channel()"); return RPMA_E_PROVIDER; } RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_destroy_evch); if (rdma_migrate_id(id, evch)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_migrate_id()"); ret = RPMA_E_PROVIDER; goto err_destroy_evch; } struct rpma_flush *flush; ret = rpma_flush_new(peer, id->qp, &flush); if (ret) goto err_migrate_id_NULL; struct rpma_conn *conn = malloc(sizeof(*conn)); if (!conn) { ret = RPMA_E_NOMEM; goto err_flush_delete; } conn->id = id; conn->evch = evch; conn->cq = cq; conn->rcq = rcq; conn->channel = channel; conn->data.ptr = NULL; conn->data.len = 0; conn->flush = flush; conn->direct_write_to_pmem = false; *conn_ptr = conn; return 0; err_flush_delete: (void) rpma_flush_delete(&flush); err_migrate_id_NULL: (void) rdma_migrate_id(id, NULL); err_destroy_evch: rdma_destroy_event_channel(evch); return ret; } /* * rpma_conn_transfer_private_data -- transfer the private data to the connection (a take over). */ void rpma_conn_transfer_private_data(struct rpma_conn *conn, struct rpma_conn_private_data *pdata) { RPMA_DEBUG_TRACE; conn->data.ptr = pdata->ptr; conn->data.len = pdata->len; pdata->ptr = NULL; pdata->len = 0; } /* public librpma API */ /* * rpma_conn_get_event_fd -- get a file descriptor of the event channel * associated with the connection */ int rpma_conn_get_event_fd(const struct rpma_conn *conn, int *fd) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || fd == NULL) return RPMA_E_INVAL; *fd = conn->evch->fd; return 0; } /* * rpma_conn_next_event -- obtain the next event from the connection */ int rpma_conn_next_event(struct rpma_conn *conn, enum rpma_conn_event *event) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); RPMA_FAULT_INJECTION(RPMA_E_NO_EVENT, { errno = ENODATA; }); int ret; if (conn == NULL || event == NULL) return RPMA_E_INVAL; struct rdma_cm_event *edata = NULL; if (rdma_get_cm_event(conn->evch, &edata)) { if (errno == ENODATA) return RPMA_E_NO_EVENT; RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_get_cm_event()"); return RPMA_E_PROVIDER; } if (edata->event == RDMA_CM_EVENT_ESTABLISHED && conn->data.ptr == NULL) { ret = rpma_private_data_store(edata, &conn->data); if (ret) { (void) rdma_ack_cm_event(edata); return ret; } } enum rdma_cm_event_type cm_event = edata->event; if (rdma_ack_cm_event(edata)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_ack_cm_event()"); ret = RPMA_E_PROVIDER; goto err_private_data_discard; } RPMA_FAULT_INJECTION_GOTO(RPMA_E_UNKNOWN, err_private_data_discard); switch (cm_event) { case RDMA_CM_EVENT_ESTABLISHED: *event = RPMA_CONN_ESTABLISHED; break; case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_DEVICE_REMOVAL: *event = RPMA_CONN_LOST; break; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: *event = RPMA_CONN_CLOSED; break; case RDMA_CM_EVENT_REJECTED: *event = RPMA_CONN_REJECTED; break; case RDMA_CM_EVENT_UNREACHABLE: *event = RPMA_CONN_UNREACHABLE; break; default: RPMA_LOG_WARNING("%s: %s", rpma_utils_conn_event_2str(*event), rdma_event_str(cm_event)); return RPMA_E_UNKNOWN; } RPMA_LOG_NOTICE("%s", rpma_utils_conn_event_2str(*event)); return 0; err_private_data_discard: rpma_private_data_delete(&conn->data); return ret; } /* * rpma_conn_wait -- wait for a completion event on the shared completion channel from CQ or RCQ, * ack it and return a CQ that caused the event in the cq argument and a boolean value saying * if it is RCQ or not in the is_rcq argument (if is_rcq is not NULL), * the flags argument is added to ensure backward compatibility in the future when * https://github.com/pmem/rpma/issues/1743 is implemented */ int rpma_conn_wait(struct rpma_conn *conn, int flags, struct rpma_cq **cq, bool *is_rcq) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_NO_COMPLETION, {}); if (conn == NULL || cq == NULL) return RPMA_E_INVAL; if (conn->channel == NULL) return RPMA_E_NOT_SHARED_CHNL; /* wait for the completion event */ struct ibv_cq *ev_cq; /* CQ that got the event */ void *ev_ctx; /* unused */ if (ibv_get_cq_event(conn->channel, &ev_cq, &ev_ctx)) return RPMA_E_NO_COMPLETION; if (conn->cq && (rpma_cq_get_ibv_cq(conn->cq) == ev_cq)) { *cq = conn->cq; if (is_rcq) *is_rcq = false; } else if (conn->rcq && (rpma_cq_get_ibv_cq(conn->rcq) == ev_cq)) { *cq = conn->rcq; if (is_rcq) *is_rcq = true; } else { RPMA_LOG_ERROR("ibv_get_cq_event() returned unknown CQ"); return RPMA_E_UNKNOWN; } /* * ACK the collected CQ event. * * XXX for performance reasons, it may be beneficial to ACK more than * one CQ event at the same time. */ ibv_ack_cq_events(ev_cq, 1 /* # of CQ events */); /* request for the next event on the CQ channel */ RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, { *cq = NULL; }); errno = ibv_req_notify_cq(ev_cq, 0 /* all completions */); if (errno) { *cq = NULL; RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_req_notify_cq()"); return RPMA_E_PROVIDER; } return 0; } /* * rpma_conn_get_compl_fd -- get a file descriptor of the shared * completion channel from the connection */ int rpma_conn_get_compl_fd(const struct rpma_conn *conn, int *fd) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || fd == NULL) return RPMA_E_INVAL; if (conn->channel == NULL) return RPMA_E_NOT_SHARED_CHNL; *fd = conn->channel->fd; return 0; } /* * rpma_conn_get_private_data -- hand a pointer to the connection's private data */ int rpma_conn_get_private_data(const struct rpma_conn *conn, struct rpma_conn_private_data *pdata) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || pdata == NULL) return RPMA_E_INVAL; pdata->ptr = conn->data.ptr; pdata->len = conn->data.len; return 0; } /* * rpma_conn_disconnect -- disconnect the connection */ int rpma_conn_disconnect(struct rpma_conn *conn) { RPMA_DEBUG_TRACE; if (conn == NULL) return RPMA_E_INVAL; if (rdma_disconnect(conn->id)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_disconnect()"); return RPMA_E_PROVIDER; } RPMA_LOG_NOTICE("Requesting for disconnection"); RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return 0; } /* * rpma_conn_delete -- delete the connection object */ int rpma_conn_delete(struct rpma_conn **conn_ptr) { RPMA_DEBUG_TRACE; if (conn_ptr == NULL) return RPMA_E_INVAL; struct rpma_conn *conn = *conn_ptr; if (conn == NULL) return 0; int ret = 0; ret = rpma_flush_delete(&conn->flush); if (ret) goto err_destroy_qp; rdma_destroy_qp(conn->id); ret = rpma_cq_delete(&conn->rcq); if (ret) goto err_rpma_cq_delete; ret = rpma_cq_delete(&conn->cq); if (ret) goto err_destroy_id; if (rdma_destroy_id(conn->id)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_destroy_id()"); ret = RPMA_E_PROVIDER; goto err_destroy_comp_channel; } RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_destroy_comp_channel); if (conn->channel) { errno = ibv_destroy_comp_channel(conn->channel); if (errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_destroy_comp_channel()"); ret = RPMA_E_PROVIDER; goto err_destroy_event_channel; } RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_destroy_event_channel); } rdma_destroy_event_channel(conn->evch); rpma_private_data_delete(&conn->data); free(conn); *conn_ptr = NULL; return 0; err_destroy_qp: rdma_destroy_qp(conn->id); (void) rpma_cq_delete(&conn->rcq); err_rpma_cq_delete: (void) rpma_cq_delete(&conn->cq); err_destroy_id: (void) rdma_destroy_id(conn->id); err_destroy_comp_channel: if (conn->channel) (void) ibv_destroy_comp_channel(conn->channel); err_destroy_event_channel: rdma_destroy_event_channel(conn->evch); rpma_private_data_delete(&conn->data); free(conn); *conn_ptr = NULL; return ret; } /* * rpma_read -- initiate the read operation */ int rpma_read(struct rpma_conn *conn, struct rpma_mr_local *dst, size_t dst_offset, const struct rpma_mr_remote *src, size_t src_offset, size_t len, int flags, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || flags == 0 || ((src == NULL || dst == NULL) && (src != NULL || dst != NULL || dst_offset != 0 || src_offset != 0 || len != 0))) return RPMA_E_INVAL; return rpma_mr_read(conn->id->qp, dst, dst_offset, src, src_offset, len, flags, op_context); } /* * rpma_write -- initiate the write operation */ int rpma_write(struct rpma_conn *conn, struct rpma_mr_remote *dst, size_t dst_offset, const struct rpma_mr_local *src, size_t src_offset, size_t len, int flags, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || flags == 0 || ((src == NULL || dst == NULL) && (src != NULL || dst != NULL || dst_offset != 0 || src_offset != 0 || len != 0))) return RPMA_E_INVAL; return rpma_mr_write(conn->id->qp, dst, dst_offset, src, src_offset, len, flags, IBV_WR_RDMA_WRITE, 0, op_context); } /* * rpma_write_with_imm -- initiate the write operation with immediate data */ int rpma_write_with_imm(struct rpma_conn *conn, struct rpma_mr_remote *dst, size_t dst_offset, const struct rpma_mr_local *src, size_t src_offset, size_t len, int flags, uint32_t imm, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || flags == 0 || ((src == NULL || dst == NULL) && (src != NULL || dst != NULL || dst_offset != 0 || src_offset != 0 || len != 0))) return RPMA_E_INVAL; return rpma_mr_write(conn->id->qp, dst, dst_offset, src, src_offset, len, flags, IBV_WR_RDMA_WRITE_WITH_IMM, imm, op_context); } /* * rpma_atomic_write -- initiate the atomic 8 bytes write operation */ int rpma_atomic_write(struct rpma_conn *conn, struct rpma_mr_remote *dst, size_t dst_offset, const char src[8], int flags, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || dst == NULL || src == NULL || flags == 0) return RPMA_E_INVAL; if (dst_offset % RPMA_ATOMIC_WRITE_ALIGNMENT != 0) return RPMA_E_INVAL; return rpma_mr_atomic_write(conn->id->qp, dst, dst_offset, src, flags, op_context); } /* * rpma_flush -- initiate the flush operation */ int rpma_flush(struct rpma_conn *conn, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_NOSUPP, {}); if (conn == NULL || dst == NULL || flags == 0) return RPMA_E_INVAL; if (type == RPMA_FLUSH_TYPE_PERSISTENT && !conn->direct_write_to_pmem) { RPMA_LOG_ERROR( "Connection does not support flush to persistency. " "Check if the remote node supports direct write to persistent memory."); return RPMA_E_NOSUPP; } /* * Initialize 'flush_type' to prevent * the "Conditional jump or move depends on uninitialised value(s)" error * in case of fault-injection in rpma_mr_remote_get_flush_type(). */ int flush_type = 0; /* it cannot fail because: mr != NULL && flush_type != NULL */ (void) rpma_mr_remote_get_flush_type(dst, &flush_type); if (type == RPMA_FLUSH_TYPE_PERSISTENT && 0 == (flush_type & RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT)) { RPMA_LOG_ERROR( "The remote memory region does not support flushing to persistency"); return RPMA_E_NOSUPP; } if (type == RPMA_FLUSH_TYPE_VISIBILITY && 0 == (flush_type & RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY)) { RPMA_LOG_ERROR( "The remote memory region does not support flushing to global visibility"); return RPMA_E_NOSUPP; } rpma_flush_func flush = conn->flush->func; return flush(conn->id->qp, conn->flush, dst, dst_offset, len, type, flags, op_context); } /* * rpma_send -- initiate the send operation */ int rpma_send(struct rpma_conn *conn, const struct rpma_mr_local *src, size_t offset, size_t len, int flags, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || flags == 0 || (src == NULL && (offset != 0 || len != 0))) return RPMA_E_INVAL; return rpma_mr_send(conn->id->qp, src, offset, len, flags, IBV_WR_SEND, 0, op_context); } /* * rpma_send_with_imm -- initiate the send operation with immediate data */ int rpma_send_with_imm(struct rpma_conn *conn, const struct rpma_mr_local *src, size_t offset, size_t len, int flags, uint32_t imm, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || flags == 0 || (src == NULL && (offset != 0 || len != 0))) return RPMA_E_INVAL; return rpma_mr_send(conn->id->qp, src, offset, len, flags, IBV_WR_SEND_WITH_IMM, imm, op_context); } /* * rpma_recv -- initiate the receive operation */ int rpma_recv(struct rpma_conn *conn, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || (dst == NULL && (offset != 0 || len != 0))) return RPMA_E_INVAL; return rpma_mr_recv(conn->id->qp, dst, offset, len, op_context); } /* * rpma_conn_get_qp_num -- get the connection's qp_num */ int rpma_conn_get_qp_num(const struct rpma_conn *conn, uint32_t *qp_num) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || qp_num == NULL) return RPMA_E_INVAL; *qp_num = conn->id->qp->qp_num; return 0; } /* * rpma_conn_get_cq -- get the connection's main CQ */ int rpma_conn_get_cq(const struct rpma_conn *conn, struct rpma_cq **cq_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || cq_ptr == NULL) return RPMA_E_INVAL; *cq_ptr = conn->cq; return 0; } /* * rpma_conn_get_rcq -- get the connection's receive CQ */ int rpma_conn_get_rcq(const struct rpma_conn *conn, struct rpma_cq **rcq_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || rcq_ptr == NULL) return RPMA_E_INVAL; *rcq_ptr = conn->rcq; return 0; } /* * rpma_conn_apply_remote_peer_cfg -- apply remote peer cfg for the connection */ int rpma_conn_apply_remote_peer_cfg(struct rpma_conn *conn, const struct rpma_peer_cfg *pcfg) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (conn == NULL || pcfg == NULL) return RPMA_E_INVAL; return rpma_peer_cfg_get_direct_write_to_pmem(pcfg, &conn->direct_write_to_pmem); } rpma-1.3.0/src/conn.h000066400000000000000000000017571443364775400144020ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn.h -- librpma connection-related internal definitions */ #ifndef LIBRPMA_CONN_H #define LIBRPMA_CONN_H #include "librpma.h" #include "cq.h" #include /* * ERRORS * rpma_conn_new() can fail with the following errors: * * - RPMA_E_INVAL - peer, id, cq or conn_ptr is NULL * - RPMA_E_PROVIDER - if rdma_create_event_channel(3) or rdma_migrate_id(3) fail * - RPMA_E_NOMEM - out of memory */ int rpma_conn_new(struct rpma_peer *peer, struct rdma_cm_id *id, struct rpma_cq *cq, struct rpma_cq *rcq, struct ibv_comp_channel *channel, struct rpma_conn **conn_ptr); /* * rpma_conn_transfer_private_data -- transfer the private data to the connection (a take over). * * ASSUMPTIONS * - conn != NULL && pdata != NULL */ void rpma_conn_transfer_private_data(struct rpma_conn *conn, struct rpma_conn_private_data *pdata); #endif /* LIBRPMA_CONN_H */ rpma-1.3.0/src/conn_cfg.c000066400000000000000000000303251443364775400152050ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * conn_cfg.c -- librpma connection-configuration-related implementations */ #include #include #ifdef ATOMIC_OPERATIONS_SUPPORTED #include #endif /* ATOMIC_OPERATIONS_SUPPORTED */ #include #include "common.h" #include "conn_cfg.h" #include "conn_req.h" #include "debug.h" #include "librpma.h" #include "log_internal.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif /* * For the simplicity sake, it is assumed all CQ/SQ/RQ default sizes are equal. */ #define RPMA_DEFAULT_Q_SIZE 10 /* * The default size of the receive CQ is 0, which means no receive CQ * is created for the connection. */ #define RPMA_DEFAULT_RCQ_SIZE 0 /* * By default the completion channel is NOT shared by CQ and RCQ. */ #define RPMA_DEFAULT_SHARED_COMPL_CHANNEL false struct rpma_conn_cfg { #ifdef ATOMIC_OPERATIONS_SUPPORTED _Atomic int timeout_ms; /* connection establishment timeout */ _Atomic uint32_t cq_size; /* main CQ size */ _Atomic uint32_t rcq_size; /* receive CQ size */ _Atomic uint32_t sq_size; /* SQ size */ _Atomic uint32_t rq_size; /* RQ size */ _Atomic bool shared_comp_channel; /* completion channel shared by CQ and RCQ */ _Atomic uintptr_t srq; /* shared RQ object of (struct rpma_srq *) type */ #else int timeout_ms; /* connection establishment timeout */ uint32_t cq_size; /* main CQ size */ uint32_t rcq_size; /* receive CQ size */ uint32_t sq_size; /* SQ size */ uint32_t rq_size; /* RQ size */ bool shared_comp_channel; /* completion channel shared by CQ and RCQ */ uintptr_t srq; /* shared RQ object of (struct rpma_srq *) type */ #endif /* ATOMIC_OPERATIONS_SUPPORTED */ }; static struct rpma_conn_cfg Conn_cfg_default = { .timeout_ms = RPMA_DEFAULT_TIMEOUT_MS, .cq_size = RPMA_DEFAULT_Q_SIZE, .rcq_size = RPMA_DEFAULT_RCQ_SIZE, .sq_size = RPMA_DEFAULT_Q_SIZE, .rq_size = RPMA_DEFAULT_Q_SIZE, .shared_comp_channel = RPMA_DEFAULT_SHARED_COMPL_CHANNEL, .srq = 0 }; /* internal librpma API */ /* * rpma_conn_cfg_default -- return pointer to default connection configuration object */ struct rpma_conn_cfg * rpma_conn_cfg_default() { RPMA_DEBUG_TRACE; return &Conn_cfg_default; } /* * rpma_conn_cfg_get_cqe -- ibv_create_cq(..., int cqe, ...) compatible variant * of rpma_conn_cfg_get_cq_size(). Round down the cq_size when it is too big * for storing into an int type of value. Convert otherwise. */ void rpma_conn_cfg_get_cqe(const struct rpma_conn_cfg *cfg, int *cqe) { RPMA_DEBUG_TRACE; uint32_t cq_size = 0; (void) rpma_conn_cfg_get_cq_size(cfg, &cq_size); *cqe = CLIP_TO_INT(cq_size); } /* * rpma_conn_cfg_get_rcqe -- ibv_create_cq(..., int cqe, ...) compatible variant * of rpma_conn_cfg_get_rcq_size(). Round down the rcq_size when it is too big * for storing into an int type of value. Convert otherwise. */ void rpma_conn_cfg_get_rcqe(const struct rpma_conn_cfg *cfg, int *rcqe) { RPMA_DEBUG_TRACE; uint32_t rcq_size = 0; (void) rpma_conn_cfg_get_rcq_size(cfg, &rcq_size); *rcqe = CLIP_TO_INT(rcq_size); } /* public librpma API */ /* * rpma_conn_cfg_new -- create a new connection configuration object */ int rpma_conn_cfg_new(struct rpma_conn_cfg **cfg_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_NOMEM, {}); if (cfg_ptr == NULL) return RPMA_E_INVAL; *cfg_ptr = malloc(sizeof(struct rpma_conn_cfg)); if (*cfg_ptr == NULL) return RPMA_E_NOMEM; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_init(&(*cfg_ptr)->cq_size, atomic_load_explicit(&Conn_cfg_default.cq_size, __ATOMIC_SEQ_CST)); atomic_init(&(*cfg_ptr)->rq_size, atomic_load_explicit(&Conn_cfg_default.rq_size, __ATOMIC_SEQ_CST)); atomic_init(&(*cfg_ptr)->sq_size, atomic_load_explicit(&Conn_cfg_default.sq_size, __ATOMIC_SEQ_CST)); atomic_init(&(*cfg_ptr)->timeout_ms, atomic_load_explicit(&Conn_cfg_default.timeout_ms, __ATOMIC_SEQ_CST)); atomic_init(&(*cfg_ptr)->rcq_size, atomic_load_explicit(&Conn_cfg_default.rcq_size, __ATOMIC_SEQ_CST)); atomic_init(&(*cfg_ptr)->shared_comp_channel, atomic_load_explicit(&Conn_cfg_default.shared_comp_channel, __ATOMIC_SEQ_CST)); atomic_init(&(*cfg_ptr)->srq, atomic_load_explicit(&Conn_cfg_default.srq, __ATOMIC_SEQ_CST)); #else memcpy(*cfg_ptr, &Conn_cfg_default, sizeof(struct rpma_conn_cfg)); #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_conn_cfg_delete -- delete the connection configuration object */ int rpma_conn_cfg_delete(struct rpma_conn_cfg **cfg_ptr) { RPMA_DEBUG_TRACE; if (cfg_ptr == NULL) return RPMA_E_INVAL; if (*cfg_ptr == NULL) return 0; free(*cfg_ptr); *cfg_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_conn_cfg_set_timeout -- set connection establishment timeout */ int rpma_conn_cfg_set_timeout(struct rpma_conn_cfg *cfg, int timeout_ms) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg == NULL || timeout_ms < 0) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&cfg->timeout_ms, timeout_ms, __ATOMIC_SEQ_CST); #else cfg->timeout_ms = timeout_ms; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_conn_cfg_get_timeout -- get connection establishment timeout */ int rpma_conn_cfg_get_timeout(const struct rpma_conn_cfg *cfg, int *timeout_ms) { RPMA_DEBUG_TRACE; if (cfg == NULL || timeout_ms == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *timeout_ms = atomic_load_explicit((_Atomic int *)&cfg->timeout_ms, __ATOMIC_SEQ_CST); #else *timeout_ms = cfg->timeout_ms; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_conn_cfg_set_cq_size -- set CQ size for the connection */ int rpma_conn_cfg_set_cq_size(struct rpma_conn_cfg *cfg, uint32_t cq_size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&cfg->cq_size, cq_size, __ATOMIC_SEQ_CST); #else cfg->cq_size = cq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_conn_cfg_get_cq_size -- get CQ size for the connection */ int rpma_conn_cfg_get_cq_size(const struct rpma_conn_cfg *cfg, uint32_t *cq_size) { RPMA_DEBUG_TRACE; /* fault injection is located at the end of this function - see the comment */ if (cfg == NULL || cq_size == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *cq_size = atomic_load_explicit((_Atomic uint32_t *)&cfg->cq_size, __ATOMIC_SEQ_CST); #else *cq_size = cfg->cq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ /* * This function is used as void in rpma_conn_cfg_get_cqe() and therefore it has to return * the correct value of size of CQ, if it fails because of fault injection. */ RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_conn_cfg_set_rcq_size -- set receive CQ size for the connection */ int rpma_conn_cfg_set_rcq_size(struct rpma_conn_cfg *cfg, uint32_t rcq_size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&cfg->rcq_size, rcq_size, __ATOMIC_SEQ_CST); #else cfg->rcq_size = rcq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_conn_cfg_get_rcq_size -- get receive CQ size for the connection */ int rpma_conn_cfg_get_rcq_size(const struct rpma_conn_cfg *cfg, uint32_t *rcq_size) { RPMA_DEBUG_TRACE; /* fault injection is located at the end of this function - see the comment */ if (cfg == NULL || rcq_size == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *rcq_size = atomic_load_explicit((_Atomic uint32_t *)&cfg->rcq_size, __ATOMIC_SEQ_CST); #else *rcq_size = cfg->rcq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ /* * This function is used as void in rpma_conn_cfg_get_rcqe() and therefore it has to return * the correct value of size of RCQ, if it fails because of fault injection. */ RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_conn_cfg_set_sq_size -- set SQ size for the connection */ int rpma_conn_cfg_set_sq_size(struct rpma_conn_cfg *cfg, uint32_t sq_size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&cfg->sq_size, sq_size, __ATOMIC_SEQ_CST); #else cfg->sq_size = sq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_conn_cfg_get_sq_size -- get SQ size for the connection */ int rpma_conn_cfg_get_sq_size(const struct rpma_conn_cfg *cfg, uint32_t *sq_size) { RPMA_DEBUG_TRACE; /* fault injection is located at the end of this function - see the comment */ if (cfg == NULL || sq_size == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *sq_size = atomic_load_explicit((_Atomic uint32_t *)&cfg->sq_size, __ATOMIC_SEQ_CST); #else *sq_size = cfg->sq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ /* * This function is used as void in rpma_peer_setup_qp() and therefore it has to return * the correct value of size of SQ, if it fails because of fault injection. */ RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_conn_cfg_set_rq_size -- set RQ size for the connection */ int rpma_conn_cfg_set_rq_size(struct rpma_conn_cfg *cfg, uint32_t rq_size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&cfg->rq_size, rq_size, __ATOMIC_SEQ_CST); #else cfg->rq_size = rq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_conn_cfg_get_rq_size -- get RQ size for the connection */ int rpma_conn_cfg_get_rq_size(const struct rpma_conn_cfg *cfg, uint32_t *rq_size) { RPMA_DEBUG_TRACE; /* fault injection is located at the end of this function - see the comment */ if (cfg == NULL || rq_size == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *rq_size = atomic_load_explicit((_Atomic uint32_t *)&cfg->rq_size, __ATOMIC_SEQ_CST); #else *rq_size = cfg->rq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ /* * This function is used as void in rpma_peer_setup_qp() and therefore it has to return * the correct value of size of RQ, if it fails because of fault injection. */ RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_conn_cfg_set_compl_channel -- set if the completion channel is shared by CQ and RCQ */ int rpma_conn_cfg_set_compl_channel(struct rpma_conn_cfg *cfg, bool shared) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&cfg->shared_comp_channel, shared, __ATOMIC_SEQ_CST); #else cfg->shared_comp_channel = shared; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_conn_cfg_get_compl_channel -- get if the completion channel is shared by CQ and RCQ */ int rpma_conn_cfg_get_compl_channel(const struct rpma_conn_cfg *cfg, bool *shared) { RPMA_DEBUG_TRACE; if (cfg == NULL || shared == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *shared = atomic_load_explicit((_Atomic bool *)&cfg->shared_comp_channel, __ATOMIC_SEQ_CST); #else *shared = cfg->shared_comp_channel; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_conn_cfg_set_srq -- set a shared RQ object for the connection */ int rpma_conn_cfg_set_srq(struct rpma_conn_cfg *cfg, struct rpma_srq *srq) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg == NULL || srq == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&cfg->srq, (uintptr_t)srq, __ATOMIC_SEQ_CST); #else cfg->srq = (uintptr_t)srq; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_conn_cfg_get_srq -- get the shared RQ object from the connection */ int rpma_conn_cfg_get_srq(const struct rpma_conn_cfg *cfg, struct rpma_srq **srq_ptr) { RPMA_DEBUG_TRACE; if (cfg == NULL || srq_ptr == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *srq_ptr = (struct rpma_srq *)atomic_load_explicit((_Atomic uintptr_t *)&cfg->srq, __ATOMIC_SEQ_CST); #else *srq_ptr = (struct rpma_srq *)cfg->srq; #endif RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } rpma-1.3.0/src/conn_cfg.h000066400000000000000000000017501443364775400152120ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn_cfg.h -- librpma connection-configuration-related internal definitions */ #ifndef LIBRPMA_CONN_CFG_H #define LIBRPMA_CONN_CFG_H #include "librpma.h" /* * ERRORS * rpma_conn_cfg_default() cannot fail. */ struct rpma_conn_cfg *rpma_conn_cfg_default(); /* * ERRORS * rpma_conn_cfg_get_cqe() cannot fail. * * ASSUMPTIONS * cfg != NULL && cqe != NULL */ void rpma_conn_cfg_get_cqe(const struct rpma_conn_cfg *cfg, int *cqe); /* * ERRORS * rpma_conn_cfg_get_rcqe() cannot fail. * * ASSUMPTIONS * cfg != NULL && rcqe != NULL */ void rpma_conn_cfg_get_rcqe(const struct rpma_conn_cfg *cfg, int *rcqe); /* * ERRORS * rpma_conn_cfg_get_compl_channel() can fail with the following error: * * - RPMA_E_INVAL - cfg or shared is NULL */ int rpma_conn_cfg_get_compl_channel(const struct rpma_conn_cfg *cfg, bool *shared); #endif /* LIBRPMA_CONN_CFG_H */ rpma-1.3.0/src/conn_req.c000066400000000000000000000331431443364775400152360ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * conn_req.c -- librpma connection-request-related implementations */ #include #include #include "common.h" #include "conn.h" #include "conn_cfg.h" #include "conn_req.h" #include "debug.h" #include "info.h" #include "log_internal.h" #include "mr.h" #include "peer.h" #include "private_data.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif struct rpma_conn_req { /* it is the passive side */ int is_passive; /* CM ID of the connection request */ struct rdma_cm_id *id; /* main CQ */ struct rpma_cq *cq; /* receive CQ */ struct rpma_cq *rcq; /* shared completion channel */ struct ibv_comp_channel *channel; /* private data of the CM ID (incoming only) */ struct rpma_conn_private_data data; /* a parent RPMA peer of this request - needed for derivative objects */ struct rpma_peer *peer; }; #ifdef DEBUG /* * rpma_snprintf_gid -- snprintf GID address to the given string (helper function) */ static inline int rpma_snprintf_gid(uint8_t *raw, char *gid, size_t size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_UNKNOWN, {}); memset(gid, 0, size); int ret = snprintf(gid, size, "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", raw[0], raw[1], raw[2], raw[3], raw[4], raw[5], raw[6], raw[7], raw[8], raw[9], raw[10], raw[11], raw[12], raw[13], raw[14], raw[15]); if (ret < 0) { memset(gid, 0, size); return RPMA_E_UNKNOWN; } return 0; } #endif /* DEBUG */ /* * rpma_conn_req_new_from_id -- allocate a new conn_req object from CM ID and equip the latter * with QP and CQ * * ASSUMPTIONS * - peer != NULL && id != NULL && cfg != NULL && req_ptr != NULL */ static int rpma_conn_req_new_from_id(struct rpma_peer *peer, struct rdma_cm_id *id, const struct rpma_conn_cfg *cfg, struct rpma_conn_req **req_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); int ret = 0; int cqe, rcqe; bool shared = false; struct rpma_srq *srq = NULL; struct rpma_cq *cq = NULL; struct rpma_cq *rcq = NULL; struct rpma_cq *srq_rcq = NULL; /* read the main CQ size from the configuration */ rpma_conn_cfg_get_cqe(cfg, &cqe); /* read the receive CQ size from the configuration */ rpma_conn_cfg_get_rcqe(cfg, &rcqe); /* get if the completion channel should be shared by CQ and RCQ */ (void) rpma_conn_cfg_get_compl_channel(cfg, &shared); /* get the shared RQ object from the connection */ (void) rpma_conn_cfg_get_srq(cfg, &srq); if (srq) (void) rpma_srq_get_rcq(srq, &srq_rcq); if (shared && srq_rcq) { RPMA_LOG_ERROR( "connection shared completion channel cannot be used when the shared RQ has its own RCQ"); return RPMA_E_INVAL; } RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); struct ibv_comp_channel *channel = NULL; if (shared) { /* create a completion channel */ channel = ibv_create_comp_channel(id->verbs); if (channel == NULL) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_create_comp_channel()"); return RPMA_E_PROVIDER; } } ret = rpma_cq_new(id->verbs, cqe, channel, &cq); if (ret) goto err_comp_channel_destroy; if (!srq_rcq && rcqe) { ret = rpma_cq_new(id->verbs, rcqe, channel, &rcq); if (ret) goto err_rpma_cq_delete; } /* setup a QP */ ret = rpma_peer_setup_qp(peer, id, cq, srq_rcq ? srq_rcq : rcq, cfg); if (ret) goto err_rpma_rcq_delete; *req_ptr = (struct rpma_conn_req *)malloc(sizeof(struct rpma_conn_req)); if (*req_ptr == NULL) { ret = RPMA_E_NOMEM; goto err_destroy_qp; } #ifdef DEBUG /* * Maximum length of GID address in the following format: * 0000:0000:0000:0000:0000:ffff:c0a8:6604 */ #define GID_STR_LEN 40 /* log GID addresses if log level >= RPMA_LOG_LEVEL_NOTICE */ enum rpma_log_level level; ret = rpma_log_get_threshold(RPMA_LOG_THRESHOLD, &level); if (ret == 0 && level >= RPMA_LOG_LEVEL_NOTICE) { struct ibv_sa_path_rec *path_rec = id->route.path_rec; char gid[GID_STR_LEN]; if (path_rec && !rpma_snprintf_gid(path_rec->sgid.raw, gid, GID_STR_LEN)) { RPMA_LOG_NOTICE("src GID = %s", gid); } else { RPMA_LOG_NOTICE("src GID is not available"); } if (path_rec && !rpma_snprintf_gid(path_rec->dgid.raw, gid, GID_STR_LEN)) { RPMA_LOG_NOTICE("dst GID = %s", gid); } else { RPMA_LOG_NOTICE("dst GID is not available"); } } #undef GID_STR_LEN #endif /* DEBUG */ (*req_ptr)->is_passive = 0; (*req_ptr)->id = id; (*req_ptr)->cq = cq; (*req_ptr)->rcq = rcq; (*req_ptr)->channel = channel; (*req_ptr)->data.ptr = NULL; (*req_ptr)->data.len = 0; (*req_ptr)->peer = peer; return 0; err_destroy_qp: rdma_destroy_qp(id); err_rpma_rcq_delete: (void) rpma_cq_delete(&rcq); err_rpma_cq_delete: (void) rpma_cq_delete(&cq); err_comp_channel_destroy: if (channel) (void) ibv_destroy_comp_channel(channel); return ret; } /* * rpma_conn_new_accept -- call rdma_accept()+rdma_ack_cm_event(). If succeeds * request re-packing the connection request to a connection object. Otherwise, * rdma_disconnect()+rdma_destroy_qp()+rpma_cq_delete() to destroy * the unsuccessful connection request. * * ASSUMPTIONS * - req != NULL && conn_param != NULL && conn_ptr != NULL */ static int rpma_conn_new_accept(struct rpma_conn_req *req, struct rdma_conn_param *conn_param, struct rpma_conn **conn_ptr) { int ret = 0; RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_conn_req_delete); if (rdma_accept(req->id, conn_param)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_accept()"); ret = RPMA_E_PROVIDER; goto err_conn_req_delete; } struct rpma_conn *conn = NULL; ret = rpma_conn_new(req->peer, req->id, req->cq, req->rcq, req->channel, &conn); if (ret) goto err_conn_disconnect; rpma_conn_transfer_private_data(conn, &req->data); *conn_ptr = conn; return 0; err_conn_disconnect: (void) rdma_disconnect(req->id); err_conn_req_delete: rdma_destroy_qp(req->id); (void) rpma_cq_delete(&req->rcq); (void) rpma_cq_delete(&req->cq); (void) rpma_private_data_delete(&req->data); if (req->channel) (void) ibv_destroy_comp_channel(req->channel); return ret; } /* * rpma_conn_new_connect -- call rdma_connect(). If succeeds request * re-packing the connection request to a connection object. Otherwise, * rdma_destroy_qp()+rpma_cq_delete()+rdma_destroy_id() to destroy * the unsuccessful connection request. * * ASSUMPTIONS * - req != NULL && conn_param != NULL && conn_ptr != NULL */ static int rpma_conn_new_connect(struct rpma_conn_req *req, struct rdma_conn_param *conn_param, struct rpma_conn **conn_ptr) { int ret = 0; RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_conn_new); struct rpma_conn *conn = NULL; ret = rpma_conn_new(req->peer, req->id, req->cq, req->rcq, req->channel, &conn); if (ret) goto err_conn_new; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, { (void) rpma_conn_delete(&conn); }); if (rdma_connect(req->id, conn_param)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_connect()"); (void) rpma_conn_delete(&conn); return RPMA_E_PROVIDER; } *conn_ptr = conn; return 0; err_conn_new: rdma_destroy_qp(req->id); (void) rpma_cq_delete(&req->rcq); (void) rpma_cq_delete(&req->cq); (void) rdma_destroy_id(req->id); if (req->channel) (void) ibv_destroy_comp_channel(req->channel); return ret; } /* * rpma_conn_req_reject -- destroy CQ of the CM ID and reject the connection. * * ASSUMPTIONS * - req != NULL */ static int rpma_conn_req_reject(struct rpma_conn_req *req) { RPMA_DEBUG_TRACE; if (rdma_reject(req->id, NULL /* private data */, 0 /* private data len */)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_reject()"); return RPMA_E_PROVIDER; } RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return 0; } /* * rpma_conn_req_destroy -- destroy CQ of the CM ID and destroy the CM ID. * * ASSUMPTIONS * - req != NULL */ static int rpma_conn_req_destroy(struct rpma_conn_req *req) { RPMA_DEBUG_TRACE; if (rdma_destroy_id(req->id)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_destroy_id()"); return RPMA_E_PROVIDER; } RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return 0; } /* internal librpma API */ /* * rpma_conn_req_new_from_cm_event -- feeds an ID from cm event into * rpma_conn_req_new_from_id and add the event to conn_req * * ASSUMPTIONS * cfg != NULL */ int rpma_conn_req_new_from_cm_event(struct rpma_peer *peer, struct rdma_cm_event *event, const struct rpma_conn_cfg *cfg, struct rpma_conn_req **req_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (peer == NULL || event == NULL || event->event != RDMA_CM_EVENT_CONNECT_REQUEST || req_ptr == NULL) return RPMA_E_INVAL; struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_id(peer, event->id, cfg, &req); if (ret) return ret; ret = rpma_private_data_store(event, &req->data); if (ret) goto err_conn_req_delete; req->is_passive = 1; *req_ptr = req; return 0; err_conn_req_delete: (void) rpma_conn_req_delete(&req); return ret; } /* public librpma API */ /* * rpma_conn_req_new -- create a new outgoing connection request object. It uses * rdma_create_id, rpma_info_resolve_addr and rdma_resolve_route and feeds * the prepared ID into rpma_conn_req_new_from_id. */ int rpma_conn_req_new(struct rpma_peer *peer, const char *addr, const char *port, const struct rpma_conn_cfg *cfg, struct rpma_conn_req **req_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (peer == NULL || addr == NULL || port == NULL || req_ptr == NULL) return RPMA_E_INVAL; if (cfg == NULL) cfg = rpma_conn_cfg_default(); int timeout_ms = 0; (void) rpma_conn_cfg_get_timeout(cfg, &timeout_ms); struct rpma_info *info; int ret = rpma_info_new(addr, port, RPMA_INFO_ACTIVE, &info); if (ret) return ret; struct rdma_cm_id *id; RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_info_delete); if (rdma_create_id(NULL, &id, NULL, RDMA_PS_TCP)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_create_id()"); ret = RPMA_E_PROVIDER; goto err_info_delete; } /* resolve address */ ret = rpma_info_resolve_addr(info, id, timeout_ms); if (ret) goto err_destroy_id; /* resolve route */ RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_destroy_id); if (rdma_resolve_route(id, timeout_ms)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_resolve_route(timeout_ms=%i)", timeout_ms); ret = RPMA_E_PROVIDER; goto err_destroy_id; } struct rpma_conn_req *req; ret = rpma_conn_req_new_from_id(peer, id, cfg, &req); if (ret) goto err_destroy_id; *req_ptr = req; (void) rpma_info_delete(&info); RPMA_LOG_NOTICE("Requesting a connection to %s:%s", addr, port); return 0; err_destroy_id: (void) rdma_destroy_id(id); err_info_delete: (void) rpma_info_delete(&info); return ret; } /* * rpma_conn_req_connect -- prepare connection parameters and request connecting * a connection request (either active or passive). When done release (delete) * the connection request object (regardless of the result). */ int rpma_conn_req_connect(struct rpma_conn_req **req_ptr, const struct rpma_conn_private_data *pdata, struct rpma_conn **conn_ptr) { RPMA_DEBUG_TRACE; if (req_ptr == NULL || *req_ptr == NULL) return RPMA_E_INVAL; RPMA_FAULT_INJECTION(RPMA_E_INVAL, { (void) rpma_conn_req_delete(req_ptr); }); if (conn_ptr == NULL || (pdata != NULL && (pdata->ptr == NULL || pdata->len == 0))) { (void) rpma_conn_req_delete(req_ptr); return RPMA_E_INVAL; } struct rdma_conn_param conn_param = {0}; conn_param.private_data = pdata ? pdata->ptr : NULL; conn_param.private_data_len = pdata ? pdata->len : 0; conn_param.responder_resources = RDMA_MAX_RESP_RES; conn_param.initiator_depth = RDMA_MAX_INIT_DEPTH; conn_param.flow_control = 1; conn_param.retry_count = 7; /* max 3-bit value */ conn_param.rnr_retry_count = 7; /* max 3-bit value */ int ret = 0; if ((*req_ptr)->is_passive) ret = rpma_conn_new_accept(*req_ptr, &conn_param, conn_ptr); else ret = rpma_conn_new_connect(*req_ptr, &conn_param, conn_ptr); free(*req_ptr); *req_ptr = NULL; return ret; } /* * rpma_conn_req_delete -- destroy QP and either reject (for incoming connection requests) * or destroy the connection request (for the outgoing one). At last release the connection * request object. */ int rpma_conn_req_delete(struct rpma_conn_req **req_ptr) { RPMA_DEBUG_TRACE; if (req_ptr == NULL) return RPMA_E_INVAL; struct rpma_conn_req *req = *req_ptr; if (req == NULL) return 0; rdma_destroy_qp(req->id); int ret = rpma_cq_delete(&req->rcq); int ret2 = rpma_cq_delete(&req->cq); if (!ret) ret = ret2; if (req->is_passive) ret2 = rpma_conn_req_reject(req); else ret2 = rpma_conn_req_destroy(req); if (!ret) ret = ret2; if (req->channel) { errno = ibv_destroy_comp_channel(req->channel); if (errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_destroy_comp_channel()"); if (!ret) ret = RPMA_E_PROVIDER; } } rpma_private_data_delete(&req->data); free(req); *req_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return ret; } /* * rpma_conn_req_recv -- initiate the receive operation */ int rpma_conn_req_recv(struct rpma_conn_req *req, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (req == NULL || dst == NULL) return RPMA_E_INVAL; return rpma_mr_recv(req->id->qp, dst, offset, len, op_context); } /* * rpma_conn_req_get_private_data -- get a pointer to the incoming connection's private data */ int rpma_conn_req_get_private_data(const struct rpma_conn_req *req, struct rpma_conn_private_data *pdata) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (req == NULL || pdata == NULL) return RPMA_E_INVAL; pdata->ptr = req->data.ptr; pdata->len = req->data.len; return 0; } rpma-1.3.0/src/conn_req.h000066400000000000000000000014071443364775400152410ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * conn_req.h -- librpma connection-request-related internal definitions */ #ifndef LIBRPMA_CONN_REQ_H #define LIBRPMA_CONN_REQ_H #include "librpma.h" #include /* * ERRORS * rpma_conn_req_new_from_cm_event() can fail with the following errors: * * - RPMA_E_INVAL - peer, event or req_ptr is NULL * - RPMA_E_INVAL - event is not RDMA_CM_EVENT_CONNECT_REQUEST * - RPMA_E_PROVIDER - ibv_create_cq(3) or rdma_create_qp(3) failed * - RPMA_E_NOMEM - out of memory */ int rpma_conn_req_new_from_cm_event(struct rpma_peer *peer, struct rdma_cm_event *event, const struct rpma_conn_cfg *cfg, struct rpma_conn_req **req_ptr); #endif /* LIBRPMA_CONN_REQ_H */ rpma-1.3.0/src/cq.c000066400000000000000000000133511443364775400140340ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * cq.c -- librpma completion-queue-related implementations */ #include #include #include #include #include "common.h" #include "cq.h" #include "debug.h" #include "log_internal.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif struct rpma_cq { struct ibv_comp_channel *channel; /* completion channel */ bool shared_comp_channel; /* completion channel is shared */ struct ibv_cq *cq; /* completion queue */ }; /* internal librpma API */ /* * rpma_cq_get_ibv_cq -- get the CQ member from the rpma_cq object * * ASSUMPTIONS * - cq != NULL */ struct ibv_cq * rpma_cq_get_ibv_cq(const struct rpma_cq *cq) { return cq->cq; } /* * rpma_cq_new -- create a completion channel and CQ and then encapsulate them in a rpma_cq object * * ASSUMPTIONS * - ibv_ctx != NULL && cq_ptr != NULL */ int rpma_cq_new(struct ibv_context *ibv_ctx, int cqe, struct ibv_comp_channel *shared_channel, struct rpma_cq **cq_ptr) { RPMA_DEBUG_TRACE; struct ibv_comp_channel *channel; int ret = 0; if (shared_channel) { channel = shared_channel; } else { /* create a completion channel */ RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); channel = ibv_create_comp_channel(ibv_ctx); if (channel == NULL) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_create_comp_channel()"); return RPMA_E_PROVIDER; } } /* create a CQ */ RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_destroy_comp_channel); struct ibv_cq *cq = ibv_create_cq(ibv_ctx, cqe, NULL /* cq_context */, channel /* channel */, 0 /* comp_vector */); if (cq == NULL) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_create_cq()"); ret = RPMA_E_PROVIDER; goto err_destroy_comp_channel; } /* request for the next completion on the completion channel */ RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_destroy_cq); errno = ibv_req_notify_cq(cq, 0 /* all completions */); if (errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_req_notify_cq()"); ret = RPMA_E_PROVIDER; goto err_destroy_cq; } *cq_ptr = (struct rpma_cq *)malloc(sizeof(struct rpma_cq)); if (*cq_ptr == NULL) { ret = RPMA_E_NOMEM; goto err_destroy_cq; } (*cq_ptr)->channel = channel; (*cq_ptr)->shared_comp_channel = (shared_channel != NULL); (*cq_ptr)->cq = cq; return 0; err_destroy_cq: (void) ibv_destroy_cq(cq); err_destroy_comp_channel: if (!shared_channel) (void) ibv_destroy_comp_channel(channel); return ret; } /* * rpma_cq_delete -- destroy the CQ and the completion channel and then free the encapsulating * rpma_cq object * * ASSUMPTIONS * - cq_ptr != NULL */ int rpma_cq_delete(struct rpma_cq **cq_ptr) { RPMA_DEBUG_TRACE; struct rpma_cq *cq = *cq_ptr; int ret = 0; /* it is possible for cq to be NULL (e.g. rcq) */ if (cq == NULL) return ret; errno = ibv_destroy_cq(cq->cq); if (errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_destroy_cq()"); ret = RPMA_E_PROVIDER; } if (!cq->shared_comp_channel) { errno = ibv_destroy_comp_channel(cq->channel); if (!ret && errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_destroy_comp_channel()"); ret = RPMA_E_PROVIDER; } } free(cq); *cq_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return ret; } /* public librpma API */ /* * rpma_cq_get_fd -- get a file descriptor of the completion event channel from the CQ */ int rpma_cq_get_fd(const struct rpma_cq *cq, int *fd) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cq == NULL || fd == NULL) return RPMA_E_INVAL; *fd = cq->channel->fd; return 0; } /* * rpma_cq_wait -- wait for a completion event from the CQ and ack the completion event if * the completion channel is not shared. */ int rpma_cq_wait(struct rpma_cq *cq) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cq == NULL) return RPMA_E_INVAL; if (cq->shared_comp_channel) return RPMA_E_SHARED_CHANNEL; /* wait for the completion event */ struct ibv_cq *ev_cq; /* unused */ void *ev_ctx; /* unused */ RPMA_FAULT_INJECTION(RPMA_E_NO_COMPLETION, {}); if (ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx)) return RPMA_E_NO_COMPLETION; /* * ACK the collected CQ event. * * XXX for performance reasons, it may be beneficial to ACK more than one CQ event * at the same time. */ ibv_ack_cq_events(cq->cq, 1 /* # of CQ events */); /* request for the next event on the CQ channel */ RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); errno = ibv_req_notify_cq(cq->cq, 0 /* all completions */); if (errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_req_notify_cq()"); return RPMA_E_PROVIDER; } return 0; } /* * rpma_cq_get_wc -- receive one or more completions from the CQ */ int rpma_cq_get_wc(struct rpma_cq *cq, int num_entries, struct ibv_wc *wc, int *num_entries_got) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cq == NULL || num_entries < 1 || wc == NULL) return RPMA_E_INVAL; if (num_entries > 1 && num_entries_got == NULL) return RPMA_E_INVAL; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int result = ibv_poll_cq(cq->cq, num_entries, wc); if (result == 0) { /* * There may be an extra CQ event with no completion in the CQ. */ RPMA_LOG_DEBUG("No completion in the CQ"); return RPMA_E_NO_COMPLETION; } else if (result < 0) { /* ibv_poll_cq() may return only -1; no errno provided */ RPMA_LOG_ERROR("ibv_poll_cq() failed (no details available)"); return RPMA_E_PROVIDER; } else if (result > num_entries) { RPMA_LOG_ERROR( "ibv_poll_cq() returned %d where <= %d is expected", result, num_entries); return RPMA_E_UNKNOWN; } if (num_entries_got) *num_entries_got = result; RPMA_FAULT_INJECTION(RPMA_E_NO_COMPLETION, {}); RPMA_FAULT_INJECTION(RPMA_E_UNKNOWN, {}); return 0; } rpma-1.3.0/src/cq.h000066400000000000000000000020161443364775400140350ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * cq.h -- librpma completion-queue-related internal definitions */ #ifndef LIBRPMA_CQ_H #define LIBRPMA_CQ_H #include #include #include "librpma.h" /* * ERRORS * rpma_cq_get_ibv_cq() cannot fail. */ struct ibv_cq *rpma_cq_get_ibv_cq(const struct rpma_cq *cq); /* * ERRORS * rpma_cq_new() can fail with the following errors: * * - RPMA_E_PROVIDER - ibv_create_comp_channel(3), ibv_create_cq(3) or ibv_req_notify_cq(3) failed * with a provider error * - RPMA_E_NOMEM - out of memory */ int rpma_cq_new(struct ibv_context *ibv_ctx, int cqe, struct ibv_comp_channel *shared_channel, struct rpma_cq **cq_ptr); /* * ERRORS * rpma_cq_delete() can fail with the following errors: * * - RPMA_E_PROVIDER - ibv_destroy_cq(3) or ibv_destroy_comp_channel(3) failed with a provider * error */ int rpma_cq_delete(struct rpma_cq **cq_ptr); #endif /* LIBRPMA_CQ_H */ rpma-1.3.0/src/debug.c000066400000000000000000000014351443364775400145170ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * debug.c -- librpma debug implementations */ #include #include "librpma.h" #include "debug.h" #ifdef DEBUG_FAULT_INJECTION /* * rpma_fault_injection -- fail when counter reaches the value defined by the RPMA_FAULT_INJECTION * environment variable */ int rpma_fault_injection(int *value) { static int counter = 0; static int fault_injection = 0; if (counter == 0 && getenv("RPMA_FAULT_INJECTION") != NULL) { fault_injection = atoi(getenv("RPMA_FAULT_INJECTION")); if (fault_injection <= 0) { fault_injection = 0; counter = 1; } } if (fault_injection) { *value = ++counter; if (counter == fault_injection) return -1; } return 0; } #endif /* DEBUG_FAULT_INJECTION */ rpma-1.3.0/src/debug.h000066400000000000000000000021461443364775400145240ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * debug.h -- librpma debug internal definitions */ #ifndef LIBRPMA_DEBUG_H #define LIBRPMA_DEBUG_H #include "log_internal.h" #ifdef DEBUG_LOG_TRACE #define RPMA_DEBUG_TRACE \ RPMA_LOG_ALWAYS("-") #else #define RPMA_DEBUG_TRACE \ do { } while (0) #endif /* DEBUG_LOG_TRACE */ #ifdef DEBUG_FAULT_INJECTION #define RPMA_FAULT_INJECTION(ret_val, exit_code) \ do { \ int value = 0; \ if (rpma_fault_injection(&value)) { \ RPMA_LOG_ALWAYS("[#%i] [FAULT INJECTION: \"%s\"]", \ value, rpma_err_2str(ret_val)); \ { exit_code; } \ return ret_val; \ } \ if (value) \ RPMA_LOG_ALWAYS("[#%i]", value); \ } while (0) #define RPMA_FAULT_INJECTION_GOTO(ret_val, goto_label) \ RPMA_FAULT_INJECTION(ret_val, \ { \ ret = ret_val; \ goto goto_label; \ }); int rpma_fault_injection(int *value); #else #define RPMA_FAULT_INJECTION(ret_val, exit_code) \ do { } while (0) #define RPMA_FAULT_INJECTION_GOTO(ret_val, goto_label) \ do { } while (0) #endif /* DEBUG_FAULT_INJECTION */ #endif /* LIBRPMA_DEBUG_H */ rpma-1.3.0/src/ep.c000066400000000000000000000120001443364775400140230ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * ep.c -- librpma endpoint-related implementations */ #include #include #include "conn_cfg.h" #include "conn_req.h" #include "debug.h" #include "info.h" #include "librpma.h" #include "log_internal.h" struct rpma_ep { /* parent peer object */ struct rpma_peer *peer; /* CM ID dedicated to listening for incoming connections */ struct rdma_cm_id *id; /* event channel of the CM ID */ struct rdma_event_channel *evch; }; #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif /* public librpma API */ /* * rpma_ep_listen -- create a new event channel and a new CM ID attached to the event channel. * Bind the CM ID to the provided addr:port pair. If everything succeeds a new endpoint is created * encapsulating the event channel and the CM ID. */ int rpma_ep_listen(struct rpma_peer *peer, const char *addr, const char *port, struct rpma_ep **ep_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); if (peer == NULL || addr == NULL || port == NULL || ep_ptr == NULL) return RPMA_E_INVAL; struct rdma_event_channel *evch = NULL; struct rdma_cm_id *id = NULL; struct rpma_info *info = NULL; struct rpma_ep *ep = NULL; int ret = 0; evch = rdma_create_event_channel(); if (evch == NULL) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_create_event_channel()"); return RPMA_E_PROVIDER; } RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_destroy_event_channel); if (rdma_create_id(evch, &id, NULL, RDMA_PS_TCP)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_create_id()"); ret = RPMA_E_PROVIDER; goto err_destroy_event_channel; } ret = rpma_info_new(addr, port, RPMA_INFO_PASSIVE, &info); if (ret) goto err_destroy_id; ret = rpma_info_bind_addr(info, id); if (ret) goto err_info_delete; RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_info_delete); if (rdma_listen(id, 0 /* backlog */)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_listen()"); ret = RPMA_E_PROVIDER; goto err_info_delete; } ep = malloc(sizeof(*ep)); if (ep == NULL) { /* according to malloc(3) it can fail only with ENOMEM */ ret = RPMA_E_NOMEM; goto err_info_delete; } ep->peer = peer; ep->evch = evch; ep->id = id; *ep_ptr = ep; /* an error at this step should not affect the final result */ (void) rpma_info_delete(&info); RPMA_LOG_NOTICE("Waiting for incoming connection on %s:%s", addr, port); return ret; err_info_delete: (void) rpma_info_delete(&info); err_destroy_id: (void) rdma_destroy_id(id); err_destroy_event_channel: rdma_destroy_event_channel(evch); return ret; } /* * rpma_ep_shutdown -- destroy the encapsulated CM ID and event channel. * When done delete the endpoint. */ int rpma_ep_shutdown(struct rpma_ep **ep_ptr) { RPMA_DEBUG_TRACE; int ret = 0; if (ep_ptr == NULL) return RPMA_E_INVAL; struct rpma_ep *ep = *ep_ptr; if (ep == NULL) return 0; if (rdma_destroy_id(ep->id)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_destroy_id()"); ret = RPMA_E_PROVIDER; } rdma_destroy_event_channel(ep->evch); free(ep); *ep_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return ret; } /* * rpma_ep_get_fd -- get a file descriptor of the event channel associated with the endpoint */ int rpma_ep_get_fd(const struct rpma_ep *ep, int *fd) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (ep == NULL || fd == NULL) return RPMA_E_INVAL; *fd = ep->evch->fd; return 0; } /* * rpma_ep_next_conn_req -- get the next event in the hope it will be * an RDMA_CM_EVENT_CONNECT_REQUEST. If so it orders the creation of a connection request object * based on the obtained request. If succeeds it returns a newly created object. */ int rpma_ep_next_conn_req(struct rpma_ep *ep, const struct rpma_conn_cfg *cfg, struct rpma_conn_req **req_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); RPMA_FAULT_INJECTION(RPMA_E_NO_EVENT, { errno = ENODATA; }); if (ep == NULL || req_ptr == NULL) return RPMA_E_INVAL; if (cfg == NULL) cfg = rpma_conn_cfg_default(); int ret = 0; struct rdma_cm_event *event = NULL; /* get an event */ if (rdma_get_cm_event(ep->evch, &event)) { if (errno == ENODATA) return RPMA_E_NO_EVENT; RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_get_cm_event()"); return RPMA_E_PROVIDER; } /* we expect only one type of events here */ RPMA_FAULT_INJECTION_GOTO(RPMA_E_INVAL, err_ack); if (event->event != RDMA_CM_EVENT_CONNECT_REQUEST) { RPMA_LOG_ERROR("Unexpected event received: %s", rdma_event_str(event->event)); ret = RPMA_E_INVAL; goto err_ack; } ret = rpma_conn_req_new_from_cm_event(ep->peer, event, cfg, req_ptr); if (ret) goto err_ack; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, { (void) rpma_conn_req_delete(req_ptr); goto err_ack; }); /* ACK the connection request event */ if (rdma_ack_cm_event(event)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_ack_cm_event()"); (void) rpma_conn_req_delete(req_ptr); return RPMA_E_PROVIDER; } return 0; err_ack: (void) rdma_ack_cm_event(event); return ret; } rpma-1.3.0/src/flush.c000066400000000000000000000142311443364775400145500ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2023 Fujitsu Limited */ /* * flush.c -- librpma flush-related implementations */ #include #include #include #include #include #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif #include "debug.h" #include "flush.h" #include "log_internal.h" #include "mr.h" static int rpma_flush_apm_new(struct rpma_peer *peer, struct rpma_flush *flush); static int rpma_flush_apm_delete(struct rpma_flush *flush); static int rpma_flush_apm_execute(struct ibv_qp *qp, struct rpma_flush *flush, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context); #ifdef NATIVE_FLUSH_SUPPORTED static int rpma_native_flush_new(struct rpma_flush *flush); static int rpma_native_flush_execute(struct ibv_qp *qp, struct rpma_flush *flush, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context); #endif typedef int (*rpma_flush_delete_func)(struct rpma_flush *flush); struct rpma_flush_internal { rpma_flush_func flush_func; rpma_flush_delete_func delete_func; void *context; }; /* * Appliance Persistency Method (APM) implementation of the flush operation * using Read-after-Write (RAW) technique for flushing intermediate buffers. */ struct flush_apm { void *raw; /* buffer for read-after-write memory region */ size_t mmap_size; /* size of the mmap()'ed memory */ struct rpma_mr_local *raw_mr; /* read-after-write memory region */ }; #define RAW_SIZE 8 /* read-after-write memory region size */ /* * rpma_flush_apm_new -- allocate a RAW buffer and register it */ static int rpma_flush_apm_new(struct rpma_peer *peer, struct rpma_flush *flush) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int ret; /* a memory registration has to be page-aligned */ long pagesize = sysconf(_SC_PAGESIZE); if (pagesize < 0) { RPMA_LOG_FATAL("sysconf(_SC_PAGESIZE) failed: %s", strerror(errno)); return RPMA_E_PROVIDER; } size_t mmap_size = (size_t)pagesize; /* allocate memory for the read-after-write buffer (RAW) */ void *raw = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (raw == MAP_FAILED) return RPMA_E_NOMEM; /* register the RAW buffer */ struct rpma_mr_local *raw_mr = NULL; ret = rpma_mr_reg(peer, raw, RAW_SIZE, RPMA_MR_USAGE_READ_DST, &raw_mr); if (ret) { (void) munmap(raw, mmap_size); return ret; } struct flush_apm *flush_apm = malloc(sizeof(struct flush_apm)); if (flush_apm == NULL) { (void) rpma_mr_dereg(&raw_mr); (void) munmap(raw, mmap_size); return RPMA_E_NOMEM; } flush_apm->raw = raw; flush_apm->raw_mr = raw_mr; flush_apm->mmap_size = mmap_size; struct rpma_flush_internal *flush_internal = (struct rpma_flush_internal *)flush; flush_internal->flush_func = rpma_flush_apm_execute; flush_internal->delete_func = rpma_flush_apm_delete; flush_internal->context = flush_apm; return 0; } /* * rpma_flush_apm_delete -- unregister the RAW buffer and deallocate it */ static int rpma_flush_apm_delete(struct rpma_flush *flush) { RPMA_DEBUG_TRACE; struct rpma_flush_internal *flush_internal = (struct rpma_flush_internal *)flush; struct flush_apm *flush_apm = (struct flush_apm *)flush_internal->context; int ret_dereg = rpma_mr_dereg(&flush_apm->raw_mr); int ret_unmap = munmap(flush_apm->raw, flush_apm->mmap_size); free(flush_apm); if (ret_dereg) return ret_dereg; if (ret_unmap) return RPMA_E_INVAL; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_flush_apm_execute -- perform the APM-style flush */ static int rpma_flush_apm_execute(struct ibv_qp *qp, struct rpma_flush *flush, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); struct rpma_flush_internal *flush_internal = (struct rpma_flush_internal *)flush; struct flush_apm *flush_apm = (struct flush_apm *)flush_internal->context; return rpma_mr_read(qp, flush_apm->raw_mr, 0, dst, dst_offset, RAW_SIZE, flags, op_context); } #ifdef NATIVE_FLUSH_SUPPORTED /* * rpma_native_flush_new -- register rpma_native_flush_execute() */ static int rpma_native_flush_new(struct rpma_flush *flush) { RPMA_DEBUG_TRACE; struct rpma_flush_internal *flush_internal = (struct rpma_flush_internal *)flush; flush_internal->flush_func = rpma_native_flush_execute; flush_internal->delete_func = NULL; flush_internal->context = NULL; return 0; } /* * rpma_native_flush_execute -- perform the native flush */ static int rpma_native_flush_execute(struct ibv_qp *qp, struct rpma_flush *flush, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return rpma_mr_flush(qp, dst, dst_offset, len, type, flags, op_context); } #endif /* internal librpma API */ /* * rpma_flush_new -- peak a flush implementation and return the flushing object */ int rpma_flush_new(struct rpma_peer *peer, struct ibv_qp *qp, struct rpma_flush **flush_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_NOMEM, {}); int ret; struct rpma_flush *flush = malloc(sizeof(struct rpma_flush_internal)); if (!flush) return RPMA_E_NOMEM; #ifdef NATIVE_FLUSH_SUPPORTED struct ibv_qp_ex *qpx = ibv_qp_to_qp_ex(qp); /* check if the created QP supports the native flush */ if (qpx && qpx->wr_flush) ret = rpma_native_flush_new(flush); else ret = rpma_flush_apm_new(peer, flush); #else ret = rpma_flush_apm_new(peer, flush); #endif if (ret) { free(flush); return ret; } *flush_ptr = flush; return 0; } /* * rpma_flush_delete -- delete the flushing object */ int rpma_flush_delete(struct rpma_flush **flush_ptr) { RPMA_DEBUG_TRACE; struct rpma_flush_internal *flush_internal = *(struct rpma_flush_internal **)flush_ptr; int ret = 0; if (flush_internal->delete_func) ret = flush_internal->delete_func(*flush_ptr); free(*flush_ptr); *flush_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return ret; } rpma-1.3.0/src/flush.h000066400000000000000000000020371443364775400145560ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2021, Intel Corporation */ /* Copyright (c) 2023 Fujitsu Limited */ /* * flush.h -- librpma flush-related internal definitions */ #ifndef LIBRPMA_FLUSH_H #define LIBRPMA_FLUSH_H #include "librpma.h" struct rpma_flush; typedef int (*rpma_flush_func)(struct ibv_qp *qp, struct rpma_flush *flush, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context); struct rpma_flush { rpma_flush_func func; }; /* * ERRORS * rpma_flush_new() can fail with the following errors: * * - RPMA_E_NOMEM - out of memory (mmap() failed) * - RPMA_E_PROVIDER - sysconf() or ibv_reg_mr() failed */ int rpma_flush_new(struct rpma_peer *peer, struct ibv_qp *qp, struct rpma_flush **flush_ptr); /* * ERRORS * rpma_flush_delete() can fail with the following error: * * - RPMA_E_PROVIDER - ibv_dereg_mr() failed * - RPMA_E_INVAL - munmap() failed */ int rpma_flush_delete(struct rpma_flush **flush_ptr); #endif /* LIBRPMA_FLUSH_H */ rpma-1.3.0/src/include/000077500000000000000000000000001443364775400147055ustar00rootroot00000000000000rpma-1.3.0/src/include/librpma.h000066400000000000000000003400251443364775400165100ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2019-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * librpma.h -- definitions of librpma entry points * * This library provides low-level support for remote access to persistent memory utilizing * RDMA-capable NICs. */ #ifndef LIBRPMA_H #define LIBRPMA_H 1 #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /** 7 * librpma - remote persistent memory access library * * SYNOPSIS * * #include * cc ... -lrpma * * DESCRIPTION * * librpma is a C library to simplify accessing persistent memory (PMem) on remote hosts over * Remote Direct Memory Access (RDMA). * * The librpma library provides two possible schemes of operation: Remote Memory Access and * Messaging. Both of them are available over a connection established between two peers. Both of * these schemes can make use of PMem as well as DRAM for the sake of building efficient and * scalable Remote Persistent Memory Accessing (RPMA) applications. * * REMOTE MEMORY ACCESS * * The librpma library implements four basic API calls dedicated for accessing a remote memory: * - rpma_read() - initiates transferring data from the remote memory to the local memory, * - rpma_write() - initiates transferring data from the local memory to the remote memory), * - rpma_atomic_write() - works like rpma_write(), but it allows transferring 8 bytes of data * (RPMA_ATOMIC_WRITE_ALIGNMENT) and storing them atomically in the remote memory (see * rpma_atomic_write(3) for details and restrictions), and: * - rpma_flush() - initiates finalizing a transfer of data to the remote memory. Possible types of * rpma_flush() operation: * - RPMA_FLUSH_TYPE_PERSISTENT - flush data down to the persistent domain, * - RPMA_FLUSH_TYPE_VISIBILITY - flush data deep enough to make it visible on the remote node. * * All the above functions use the attribute flags to set the completion notification indicator: * - RPMA_F_COMPLETION_ON_ERROR - generates the completion only on error * - RPMA_F_COMPLETION_ALWAYS - generates the completion regardless of a result of the operation. * * All of these operations are considered as finished when the respective completion is generated. * * DIRECT WRITE TO PMEM * * \f[B]Direct Write to PMem\f[R] is a feature of a platform and its configuration which allows * an RDMA-capable network interface to write data to platform's PMem in a persistent way. It may * be impossible because of e.g. caching mechanisms existing on the data's way. When * \f[B]Direct Write to PMem\f[R] is impossible, operating in the way assuming it is possible may * corrupt data on PMem, so this is why \f[B]Direct Write to PMem\f[R] is not enabled by default. * * On the current Intel platforms, the only thing you have to do in order to enable * \f[B]Direct Write to PMem\f[R] is turning off Intel Direct Data I/O (DDIO). Sometimes, you can * turn off DDIO either globally for the whole platform or for a specific PCIe Root Port. * For details, please see the manual of your platform. * * When you have a platform which allows \f[B]Direct Write to PMem\f[R], you have to declare this * is the case in your peer's configuration. The peer's configuration has to be transferred to all * the peers which want to execute rpma_flush() with RPMA_FLUSH_TYPE_PERSISTENT against * the platform's PMem and applied to the connection object which safeguards access to PMem. * * - rpma_peer_cfg_set_direct_write_to_pmem() - declare \f[B]Direct Write to PMem\f[R] support * - rpma_peer_cfg_get_descriptor() - get the descriptor of the peer configuration * - rpma_peer_cfg_from_descriptor() - create a peer configuration from the descriptor * - rpma_conn_apply_remote_peer_cfg() - apply remote peer cfg to the connection * * For details on how to use these APIs please see * https://github.com/pmem/rpma/tree/main/examples/05-flush-to-persistent. * * CLIENT OPERATION * A client is the active side of the process of establishing a connection. A role of the peer * during the process of establishing connection does not determine direction of the data flow * (neither via Remote Memory Access nor via Messaging). After establishing the connection both * peers have the same capabilities. * * The client, in order to establish a connection, has to perform the following steps: * * - rpma_conn_req_new() - create a new outgoing connection request object * - rpma_conn_req_connect() - initiate processing the connection request * - rpma_conn_next_event() - wait for the RPMA_CONN_ESTABLISHED event * * After establishing the connection both peers can perform Remote Memory Access and/or Messaging * over the connection. * * The client, in order to close a connection, has to perform the following steps: * * - rpma_conn_disconnect() - initiate disconnection * - rpma_conn_next_event() - wait for the RPMA_CONN_CLOSED event * - rpma_conn_delete() - delete the closed connection * * SERVER OPERATION * A server is the passive side of the process of establishing a connection. Note that after * establishing the connection both peers have the same capabilities. * * The server, in order to establish a connection, has to perform the following steps: * * - rpma_ep_listen() - create a listening endpoint * - rpma_ep_next_conn_req() - obtain an incoming connection request * - rpma_conn_req_connect() - initiate connecting the connection request * - rpma_conn_next_event() - wait for the RPMA_CONN_ESTABLISHED event * * After establishing the connection both peers can perform Remote Memory Access and/or Messaging * over the connection. * * The server, in order to close a connection, has to perform the following steps: * * - rpma_conn_next_event() - wait for the RPMA_CONN_CLOSED event * - rpma_conn_disconnect() - disconnect the connection * - rpma_conn_delete() - delete the closed connection * * When no more incoming connections are expected, the server can stop waiting for them: * * - rpma_ep_shutdown() - stop listening and delete the endpoint * * MEMORY MANAGEMENT * * Every piece of memory (either volatile or persistent) must be registered and its usage must be * specified in order to be used in Remote Memory Access or Messaging. This can be done using * the following memory management librpma functions: * - rpma_mr_reg() which registers a memory region and creates a local memory registration object * and * - rpma_mr_dereg() which deregisters the memory region and deletes the local memory registration * object. * * A description of the registered memory region sometimes has to be transferred via network to * the other side of the connection. In order to do that a network-transferable description of * the provided memory region (called 'descriptor') has to be created using * rpma_mr_get_descriptor(). On the other side of the connection the received descriptor should be * decoded using rpma_mr_remote_from_descriptor(). It creates a remote memory region's structure * that allows for Remote Memory Access. * * MESSAGING * * The librpma messaging API allows transferring messages (buffers of arbitrary data) between * the peers. Transferring messages requires preparing buffers (memory regions) on the remote side * to receive the sent data. The received data are written to those dedicated buffers and * the sender does not have to have a respective remote memory region object to send a message. * The memory buffers used for messaging have to be registered using rpma_mr_reg() prior to * rpma_send() or rpma_recv() function call. * * The librpma library implements the following messaging API: * - rpma_send() - initiates the send operation which transfers a message from the local memory to * other side of the connection, * - rpma_recv() - initiates the receive operation which prepares a buffer for a message sent from * other side of the connection, * - rpma_conn_req_recv() works as rpma_recv(), but it may be used before the connection is * established. * * All of these operations are considered as finished when the respective completion is generated. * * COMPLETIONS * * RDMA operations generate complitions that notify a user that the respective operation has been * completed. * * The following operations are available in librpma: * - IBV_WC_RDMA_READ - RMA read operation * - IBV_WC_RDMA_WRITE - RMA write operation * - IBV_WC_SEND - messaging send operation * - IBV_WC_RECV - messaging receive operation * - IBV_WC_RECV_RDMA_WITH_IMM - messaging receive operation for RMA write operation with immediate * data * * All operations generate completion on error. The operations posted with * the \f[B]RPMA_F_COMPLETION_ALWAYS\f[R] flag also generate a completion on success. * Completion codes are reused from the libibverbs library, where the IBV_WC_SUCCESS status * indicates the successful completion of an operation. Completions are collected in the completion * queue (CQ) (see the \f[B]QUEUES, PERFORMANCE AND RESOURCE USE\f[R] section for more details * on queues). * * The librpma library implements the following API for handling completions: * - rpma_conn_get_cq() gets the connection's main CQ, * - rpma_conn_get_rcq() gets the connection's receive CQ, * - rpma_cq_wait() waits for an incoming completion from the specified CQ (main or receive CQ) - * if it succeeds the completion can be collected using rpma_cq_get_wc(), * - rpma_cq_get_wc() receives the next available completion of an already posted operation. * * PEER * * A peer is an abstraction representing an RDMA-capable device. * All other RPMA objects have to be created in the context of a peer. * A peer allows one to: * - establish connections (Client Operation) * - register memory regions (Memory Management) * - create endpoints for listening for incoming connections (Server Operation) * * At the beginning, in order to create a peer, a user has to obtain * an RDMA device context by the given IPv4/IPv6 address using * rpma_utils_get_ibv_context(). Then a new peer object can be created * using rpma_peer_new() and deleted using rpma_peer_delete(). * * SYNCHRONOUS AND ASYNCHRONOUS MODES * By default, all endpoints and connections operate in the synchronous mode where: * * - rpma_ep_next_conn_req(), * - rpma_cq_wait() and * - rpma_conn_get_next_event() * * are blocking calls. You can make those API calls non-blocking by modifying the respective file * descriptors: * * - rpma_ep_get_fd() - provides a file descriptor for rpma_ep_next_conn_req() * - rpma_cq_get_fd() - provides a file descriptor for rpma_cq_wait() * - rpma_conn_get_event_fd() - provides a file descriptor for rpma_conn_get_next_event() * * When you have a file descriptor, you can make it non-blocking using fcntl(2) as follows: * * int ret = fcntl(fd, F_GETFL); * fcntl(fd, F_SETFL, flags | O_NONBLOCK); * * Such change makes the respective API call non-blocking automatically. * * The provided file descriptors can also be used for scalable I/O handling like epoll(7). * * Please see the example showing how to make use of RPMA file descriptors: * https://github.com/pmem/rpma/tree/main/examples/06-multiple-connections * * .SH QUEUES, PERFORMANCE AND RESOURCE USE * * \f[B]Remote Memory Access\f[R] operations, \f[B]Messaging\f[R] operations and their * \f[B]Completions\f[R] consume space in queues allocated in an RDMA-capable network interface * (RNIC) hardware for each of the connections. You must be aware of the existence of these queues: * * - completion queue \f[B](CQ)\f[R] where completions of operations are placed, either when * a completion was required by a user (RPMA_F_COMPLETION_ALWAYS) or a completion with an error * occurred. All \f[B]Remote Memory Access\f[R] operations and \f[B]Messaging\f[R] operations can * consume \f[B]CQ\f[R] space. * - send queue \f[B](SQ)\f[R] where all \f[B]Remote Memory Access\f[R] operations and rpma_send() * operations are placed before they are executed by RNIC. * - receive queue \f[B](RQ)\f[R] where rpma_recv() entries are placed before they are consumed by * the rpma_send() coming from another side of the connection. * * You must assume \f[B]SQ\f[R] and \f[B]RQ\f[R] entries occupy the place in their respective queue * till: * * - a respective operation's completion is generated or * - a completion of an operation, which was scheduled later, is generated. * * You must also be aware that RNIC has limited resources so it is impossible to store a very long * set of queues for many possibly existing connections. If all of the queues will not fit into * RNIC's resources it will start using the platform's memory for this purpose. In this case, * the performance will be degraded because of inevitable cache misses. * * Because the length of queues has so profound impact on the performance of RPMA application you * can configure the length of each of the queues separately for each of the connections: * * - rpma_conn_cfg_set_cq_size() - set length of \f[B]CQ\f[R] * - rpma_conn_cfg_set_sq_size() - set length of \f[B]SQ\f[R] * - rpma_conn_cfg_set_rq_size() - set length of \f[B]RQ\f[R] * * When the connection configuration object is ready it has to be used for either * rpma_conn_req_new() or rpma_ep_next_conn_req() for the settings to take effect. * * THREAD SAFETY * * The analysis of thread safety of the librpma library is described in details in * the THREAD_SAFETY.md file: * * https://github.com/pmem/rpma/blob/main/THREAD_SAFETY.md * * .SH ON-DEMAND PAGING SUPPORT * * On-Demand-Paging (ODP) is a technique that simplifies the memory registration process (for * example, applications no longer need to pin down the underlying physical pages of the address * space and track the validity of the mappings). On-Demand Paging is available if both * the hardware and the kernel support it. The detailed description of ODP can be found here: * * https://community.mellanox.com/s/article/understanding-on-demand-paging--odp-x * * State of ODP support can be checked using the rpma_utils_ibv_context_is_odp_capable() function * that queries the RDMA device context's capabilities and checks if it supports On-Demand Paging. * * The librpma library uses ODP automatically if it is supported. ODP support is required to * register PMem memory region mapped from File System DAX (FSDAX). * * DEBUGGING AND ERROR HANDLING * * If a librpma function may fail, it returns a negative error code. Checking if the returned value * is non-negative is the only programmatically available way to verify if the API call succeeded. * The exact meaning of all error codes is described in the manual of each function. * * The librpma library implements the logging API which may give additional information in case of * an error and during normal operation as well, according to the current logging threshold levels. * * The function that will handle all generated log messages can be set using * rpma_log_set_function(). The logging function can be either the default logging function (built * into the library) or a user-defined, thread-safe, function. The default logging function can * write messages to syslog(3) and stderr(3). The logging threshold level can be set or got using * rpma_log_set_threshold() or rpma_log_get_threshold() respectively. * * There is an example of the usage of the logging functions: * https://github.com/pmem/rpma/tree/main/examples/log * * EXAMPLES * * See https://github.com/pmem/rpma/tree/main/examples for examples of using the librpma API. * * ACKNOWLEDGEMENTS * * librpma is built on the top of libibverbs and librdmacm APIs. * * DEPRECATING * * Using of the API calls which are marked as deprecated should be avoided, because they will be * removed in a new major release. * * NOTE: API calls deprecated in 0.X release will be removed in 0.(X+1) release usually. * * SEE ALSO * * https://pmem.io/rpma/ */ #define RPMA_W_WAIT_FOR_COMPLETION (1) #define RPMA_E_UNKNOWN (-100000) /* Unknown error */ #define RPMA_E_NOSUPP (-100001) /* Not supported */ #define RPMA_E_PROVIDER (-100002) /* Provider error occurred */ #define RPMA_E_NOMEM (-100003) /* Out of memory */ #define RPMA_E_INVAL (-100004) /* Invalid argument */ #define RPMA_E_NO_COMPLETION (-100005) /* No next completion available */ #define RPMA_E_NO_EVENT (-100006) /* No next event available */ #define RPMA_E_AGAIN (-100007) /* Temporary error */ #define RPMA_E_SHARED_CHANNEL (-100008) /* Completion channel is shared */ #define RPMA_E_NOT_SHARED_CHNL (-100009) /* Completion channel isn't shared */ /* picking up an RDMA-capable device */ #define RPMA_DEFAULT_TIMEOUT_MS 1000 /* pick a type of an ibv_context to lookup for */ enum rpma_util_ibv_context_type { RPMA_UTIL_IBV_CONTEXT_LOCAL, /* lookup for a local device */ RPMA_UTIL_IBV_CONTEXT_REMOTE /* lookup for a remote device */ }; /** 3 * rpma_utils_get_ibv_context - obtain an RDMA device context by IP address * * SYNOPSIS * * #include * * struct ibv_context; * enum rpma_util_ibv_context_type { * RPMA_UTIL_IBV_CONTEXT_LOCAL, * RPMA_UTIL_IBV_CONTEXT_REMOTE * }; * * int rpma_utils_get_ibv_context(const char *addr, enum rpma_util_ibv_context_type type, * struct ibv_context **ibv_ctx_ptr); * * DESCRIPTION * rpma_utils_get_ibv_context() obtains an RDMA device context by the given IPv4/IPv6 address * (either local or remote) using the TCP RDMA port space (RDMA_PS_TCP) - reliable, * connection-oriented and message-based QP communication. Possible values of the 'type' argument: * - RPMA_UTIL_IBV_CONTEXT_LOCAL - lookup for a device based on the given local address * - RPMA_UTIL_IBV_CONTEXT_REMOTE - lookup for a device based on the given remote address * * RETURN VALUE * The rpma_utils_get_ibv_context() function returns 0 on success or a negative error code on * failure. rpma_utils_get_ibv_context() does not set *ibv_ctx_ptr value on failure. * * ERRORS * rpma_utils_get_ibv_context() can fail with the following errors: * * - RPMA_E_INVAL - addr or ibv_ctx_ptr is NULL or type is unknown * - RPMA_E_NOMEM - out of memory * - RPMA_E_PROVIDER - rdma_getaddrinfo(), rdma_create_id(), rdma_bind_addr() or * rdma_resolve_addr() failed, the exact cause of the error can be read from the log * * SEE ALSO * rpma_peer_new(3), rpma_utils_ibv_context_is_odp_capable(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_utils_get_ibv_context(const char *addr, enum rpma_util_ibv_context_type type, struct ibv_context **ibv_ctx_ptr); /** 3 * rpma_utils_ibv_context_is_odp_capable - is On-Demand Paging supported * * SYNOPSIS * * #include * * struct ibv_context; * int rpma_utils_ibv_context_is_odp_capable(struct ibv_context *ibv_ctx, * int *is_odp_capable); * * DESCRIPTION * rpma_utils_ibv_context_is_odp_capable() queries the RDMA device context's capabilities and check * if it supports On-Demand Paging. * * RETURN VALUE * The rpma_utils_ibv_context_is_odp_capable() function returns 0 on success or a negative error * code on failure. The *is_odp_capable value on failure is undefined. * * ERRORS * rpma_utils_ibv_context_is_odp_capable() can fail with the following errors: * * - RPMA_E_INVAL - ibv_ctx or is_odp_capable is NULL * - RPMA_E_PROVIDER - ibv_query_device_ex() failed, the exact cause of the error can be read from * the log * * SEE ALSO * rpma_utils_get_ibv_context(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_utils_ibv_context_is_odp_capable(struct ibv_context *ibv_ctx, int *is_odp_capable); /* peer configuration */ struct rpma_peer_cfg; /** 3 * rpma_peer_cfg_new - create a new peer configuration object * * SYNOPSIS * * #include * * struct rpma_peer_cfg; * int rpma_peer_cfg_new(struct rpma_peer_cfg **pcfg_ptr); * * DESCRIPTION * rpma_peer_cfg_new() creates a new peer configuration object. * * RETURN VALUE * The rpma_peer_cfg_new() function returns 0 on success or a negative error code on failure. * rpm_peer_cfg_new() does not set *pcfg_ptr value on failure. * * ERRORS * rpma_peer_cfg_new() can fail with the following errors: * * - RPMA_E_INVAL - pcfg_ptr is NULL * - RPMA_E_NOMEM - out of memory * * SEE ALSO * rpma_conn_apply_remote_peer_cfg(3), rpma_peer_cfg_delete(3), rpma_peer_cfg_from_descriptor(3), * rpma_peer_cfg_get_descriptor(3), rpma_peer_cfg_get_descriptor_size(3), * rpma_peer_cfg_get_direct_write_to_pmem(3), rpma_peer_cfg_set_direct_write_to_pmem(3), librpma(7) * and https://pmem.io/rpma/ */ int rpma_peer_cfg_new(struct rpma_peer_cfg **pcfg_ptr); /** 3 * rpma_peer_cfg_delete - delete the peer configuration object * * SYNOPSIS * * #include * * struct rpma_peer_cfg; * int rpma_peer_cfg_delete(struct rpma_peer_cfg **pcfg_ptr); * * DESCRIPTION * rpma_peer_cfg_delete() deletes the peer configuration object. * * RETURN VALUE * The rpma_peer_cfg_delete() function returns 0 on success or a negative error code on failure. * rpm_peer_cfg_delete() does not set *pcfg_ptr value to NULL on failure. * * ERRORS * rpma_peer_cfg_delete() can fail with the following error: * * - RPMA_E_INVAL - pcfg_ptr is NULL * * SEE ALSO * rpma_peer_cfg_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_peer_cfg_delete(struct rpma_peer_cfg **pcfg_ptr); /** 3 * rpma_peer_cfg_set_direct_write_to_pmem - declare direct write to PMEM support * * SYNOPSIS * * #include * * struct rpma_peer_cfg; * int rpma_peer_cfg_set_direct_write_to_pmem(struct rpma_peer_cfg *pcfg, bool supported); * * DESCRIPTION * rpma_peer_cfg_set_direct_write_to_pmem() declares the support of the direct write to PMEM. * * RETURN VALUE * The rpma_peer_cfg_set_direct_write_to_pmem() function returns 0 on success or a negative error * code on failure. * * ERRORS * rpma_peer_cfg_set_direct_write_to_pmem() can fail with the following error: * * - RPMA_E_INVAL - pcfg is NULL * * SEE ALSO * rpma_conn_apply_remote_peer_cfg(3), rpma_peer_cfg_get_descriptor(3), * rpma_peer_cfg_get_direct_write_to_pmem(3), rpma_peer_cfg_new(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_peer_cfg_set_direct_write_to_pmem(struct rpma_peer_cfg *pcfg, bool supported); /** 3 * rpma_peer_cfg_get_direct_write_to_pmem - check direct write to PMEM support * * SYNOPSIS * * #include * * struct rpma_peer_cfg; * int rpma_peer_cfg_get_direct_write_to_pmem( * const struct rpma_peer_cfg *pcfg, bool *supported); * * DESCRIPTION * rpma_peer_cfg_get_direct_write_to_pmem() checks the support of the direct write to PMEM. * * RETURN VALUE * The rpma_peer_cfg_get_direct_write_to_pmem() function returns 0 on success or a negative error * code on failure. * * ERRORS * rpma_peer_cfg_get_direct_write_to_pmem() can fail with the following error: * * - RPMA_E_INVAL - pcfg or supported are NULL * * SEE ALSO * rpma_peer_cfg_from_descriptor(3), rpma_peer_cfg_new(3), * rpma_peer_cfg_set_direct_write_to_pmem(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_peer_cfg_get_direct_write_to_pmem(const struct rpma_peer_cfg *pcfg, bool *supported); /** 3 * rpma_peer_cfg_get_descriptor - get the descriptor of the peer configuration * * SYNOPSIS * * #include * * struct rpma_peer_cfg; * int rpma_peer_cfg_get_descriptor(const struct rpma_peer_cfg *pcfg, void *desc); * * DESCRIPTION * rpma_peer_cfg_get_descriptor() gets the descriptor of the peer configuration. * * SECURITY WARNING * See rpma_peer_cfg_from_descriptor(3). * * RETURN VALUE * The rpma_peer_cfg_get_descriptor() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_peer_cfg_get_descriptor() can fail with the following error: * * - RPMA_E_INVAL - pcfg or desc are NULL * * SEE ALSO * rpma_peer_cfg_from_descriptor(3), rpma_peer_cfg_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_peer_cfg_get_descriptor(const struct rpma_peer_cfg *pcfg, void *desc); /** 3 * rpma_peer_cfg_get_descriptor_size - get size of the peer cfg descriptor * * SYNOPSIS * * #include * * struct rpma_peer_cfg; * int rpma_peer_cfg_get_descriptor_size(const struct rpma_peer_cfg *pcfg, size_t *desc_size); * * DESCRIPTION * rpma_peer_cfg_get_descriptor_size() gets size of the peer configuration descriptor. * * RETURN VALUE * The rpma_peer_cfg_get_descriptor_size() function returns 0 on success or a negative error code * on failure. * * ERRORS * rpma_peer_cfg_get_descriptor_size() can fail with the following error: * * - RPMA_E_INVAL - pcfg or desc_size is NULL * * SEE ALSO * rpma_peer_cfg_get_descriptor(3), rpma_peer_cfg_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_peer_cfg_get_descriptor_size(const struct rpma_peer_cfg *pcfg, size_t *desc_size); /** 3 * rpma_peer_cfg_from_descriptor - create a peer cfg from the descriptor * * SYNOPSIS * * #include * * struct rpma_peer_cfg; * int rpma_peer_cfg_from_descriptor(const void *desc, size_t desc_size, * struct rpma_peer_cfg **pcfg_ptr); * * DESCRIPTION * rpma_peer_cfg_from_descriptor() creates a peer configuration object from the descriptor. * * SECURITY WARNING * An attacker might modify the serialized remote node configuration while it is transferred * via an unsecured connection (e.g. rdma_cm private data), which might cause * different remote persistency method selections. The most dangerous situation is switching * from the GPSPM mode to the APM one. Users should avoid using rpma_conn_get_private_data(3) * and rpma_conn_req_get_private_data(3) API calls and they should utilize TLS/SSL connections * to transfer all configuration data between peers instead. * * RETURN VALUE * The rpma_peer_cfg_from_descriptor() function returns 0 on success or a negative error code on * failure. rpma_peer_cfg_from_descriptor() does not set *pcfg_ptr value on failure. * * ERRORS * rpma_peer_cfg_from_descriptor() can fail with the following errors: * * - RPMA_E_INVAL - desc or pcfg_ptr are NULL * - RPMA_E_NOMEM - out of memory * * SEE ALSO * rpma_conn_apply_remote_peer_cfg(3), rpma_peer_cfg_get_descriptor(3), rpma_peer_cfg_new(3), * librpma(7) and https://pmem.io/rpma/ */ int rpma_peer_cfg_from_descriptor(const void *desc, size_t desc_size, struct rpma_peer_cfg **pcfg_ptr); /* peer */ struct rpma_peer; /** 3 * rpma_peer_new - create a peer object * * SYNOPSIS * * #include * * struct ibv_context; * struct rpma_peer; * int rpma_peer_new(struct ibv_context *ibv_ctx, struct rpma_peer **peer_ptr); * * DESCRIPTION * rpma_peer_new() creates a new peer object. * * RETURN VALUE * The rpma_peer_new() function returns 0 on success or a negative error code on failure. * rpma_peer_new() does not set *peer_ptr value on failure. * * ERRORS * rpma_peer_new() can fail with the following errors: * * - RPMA_E_INVAL - ibv_ctx or peer_ptr is NULL * - RPMA_E_NOMEM - creating a verbs protection domain failed with ENOMEM. * - RPMA_E_PROVIDER - creating a verbs protection domain failed with error other than ENOMEM. * - RPMA_E_UNKNOWN - creating a verbs protection domain failed without error value. * - RPMA_E_NOMEM - out of memory * * SEE ALSO * rpma_conn_req_new(3), rpma_ep_listen(3), rpma_mr_reg(3), rpma_peer_delete(3), * rpma_utils_get_ibv_context(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_peer_new(struct ibv_context *ibv_ctx, struct rpma_peer **peer_ptr); /** 3 * rpma_peer_delete - delete a peer object * * SYNOPSIS * * #include * * struct rpma_peer; * int rpma_peer_delete(struct rpma_peer **peer_ptr); * * DESCRIPTION * rpma_peer_delete() deletes the peer object. * * RETURN VALUE * The rpma_peer_delete() function returns 0 on success or a negative error code on failure. * rpm_peer_delete() does not set *peer_ptr value to NULL on failure. * * RETURN VALUE * The rpma_peer_delete() function returns 0 on success or a negative error code on failure. * rpma_peer_delete() does not set *peer_ptr to NULL on failure. * * ERRORS * rpma_peer_delete() can fail with the following error: * * - RPMA_E_PROVIDER - deleting the verbs protection domain failed. * * SEE ALSO * rpma_peer_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_peer_delete(struct rpma_peer **peer_ptr); /* memory-related structures */ struct rpma_mr_local; struct rpma_mr_remote; #define RPMA_MR_USAGE_READ_SRC (1 << 0) #define RPMA_MR_USAGE_READ_DST (1 << 1) #define RPMA_MR_USAGE_WRITE_SRC (1 << 2) #define RPMA_MR_USAGE_WRITE_DST (1 << 3) #define RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY (1 << 4) #define RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT (1 << 5) #define RPMA_MR_USAGE_SEND (1 << 6) #define RPMA_MR_USAGE_RECV (1 << 7) /** 3 * rpma_mr_reg - create a local memory registration object * * SYNOPSIS * * #include * * struct rpma_peer; * struct rpma_mr_local; * * int rpma_mr_reg(struct rpma_peer *peer, void *ptr, size_t size, int usage, * struct rpma_mr_local **mr_ptr); * * DESCRIPTION * rpma_mr_reg() registers a memory region and creates a local memory registration object. The * usage parameter specifies the operations that can be performed on the given memory region which * should be expressed as bitwise-inclusive OR of the following: * - RPMA_MR_USAGE_READ_SRC - memory used as a source of the read operation * - RPMA_MR_USAGE_READ_DST - memory used as a destination of the read operation * - RPMA_MR_USAGE_WRITE_SRC - memory used as a source of the write operation * - RPMA_MR_USAGE_WRITE_DST - memory used as a destination of the write operation * - RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY - memory with available flush operation * - RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT - memory with available persistent flush operation * - RPMA_MR_USAGE_SEND - memory used for send operation * - RPMA_MR_USAGE_RECV - memory used for receive operation * * RETURN VALUE * The rpma_mr_reg() function returns 0 on success or a negative error code on failure. * rpma_mr_reg() does not set *mr_ptr value on failure. * * ERRORS * rpma_mr_reg() can fail with the following errors: * * - RPMA_E_INVAL - peer or ptr or mr_ptr is NULL * - RPMA_E_INVAL - size equals 0 * - RPMA_E_NOMEM - out of memory * - RPMA_E_PROVIDER - memory registration failed * * SEE ALSO * rpma_conn_req_recv(3), rpma_mr_dereg(3), rpma_mr_get_descriptor(3), * rpma_mr_get_descriptor_size(3), rpma_peer_new(3), rpma_read(3), rpma_recv(3), rpma_send(3), * rpma_write(3), rpma_atomic_write(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_reg(struct rpma_peer *peer, void *ptr, size_t size, int usage, struct rpma_mr_local **mr_ptr); /** 3 * rpma_mr_dereg - delete a local memory registration object * * SYNOPSIS * * #include * * struct rpma_mr_local; * int rpma_mr_dereg(struct rpma_mr_local **mr_ptr); * * DESCRIPTION * rpma_mr_dereg() deregisters a memory region and deletes a local memory registration object. * * RETURN VALUE * The rpma_mr_dereg() function returns 0 on success or a negative error code on failure. * rpma_mr_dereg() does not set *mr_ptr value to NULL on failure. * * ERRORS * rpma_mr_dereg() can fail with the following errors: * * - RPMA_E_INVAL - mr_ptr is NULL * - RPMA_E_PROVIDER - memory deregistration failed * * SEE ALSO * rpma_mr_reg(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_dereg(struct rpma_mr_local **mr_ptr); /** 3 * rpma_mr_get_descriptor - get a descriptor of a memory region * * SYNOPSIS * * #include * * struct rpma_mr_local; * int rpma_mr_get_descriptor(const struct rpma_mr_local *mr, void *desc); * * DESCRIPTION * rpma_mr_get_descriptor() writes a network-transferable description of the provided local memory * region (called 'descriptor'). Once the descriptor is transferred to the other side it should be * decoded by rpma_mr_remote_from_descriptor() to create a remote memory region's structure which * allows for Remote Memory Access. Please see librpma(7) for details. * * SECURITY WARNING * See rpma_mr_remote_from_descriptor(3). * * RETURN VALUE * The rpma_mr_get_descriptor() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_mr_get_descriptor() can fail with the following error: * * - RPMA_E_INVAL - mr or desc is NULL * * SEE ALSO * rpma_mr_get_descriptor_size(3), rpma_mr_reg(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_get_descriptor(const struct rpma_mr_local *mr, void *desc); /** 3 * rpma_mr_remote_from_descriptor - create a memory region from a descriptor * * SYNOPSIS * * #include * * struct rpma_mr_remote; * int rpma_mr_remote_from_descriptor(const void *desc, size_t desc_size, * struct rpma_mr_remote **mr_ptr); * * DESCRIPTION * Create a remote memory region's structure based on the provided descriptor with * a network-transferable description of the memory region local to the remote peer. * * SECURITY WARNING * An attacker might modify the serialized remote memory registration configuration * while it is transferred via an unsecured connection (e.g. rdma_cm private data), * which might cause data corruption when writing to a different location. * Users should avoid using rpma_conn_get_private_data(3) and rpma_conn_req_get_private_data(3) * API calls and they should utilize TLS/SSL connections to transfer all configuration data * between peers instead. * * RETURN VALUE * The rpma_mr_remote_from_descriptor() function returns 0 on success or a negative error code on * failure. rpma_mr_remote_from_descriptor() does not set *mr_ptr value on failure. * * ERRORS * rpma_mr_remote_from_descriptor() can fail with the following errors: * * - RPMA_E_INVAL - desc or mr_ptr is NULL * - RPMA_E_INVAL - incompatible descriptor size * - RPMA_E_NOSUPP - deserialized information does not represent a valid memory region * - RPMA_E_NOMEM - out of memory * * SEE ALSO * rpma_mr_remote_delete(3), rpma_mr_remote_get_flush_type(3), rpma_mr_remote_get_size(3), * rpma_flush(3), rpma_read(3), rpma_write(3), rpma_atomic_write(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_mr_remote_from_descriptor(const void *desc, size_t desc_size, struct rpma_mr_remote **mr_ptr); /** 3 * rpma_mr_get_descriptor_size - get size of the memory region descriptor * * SYNOPSIS * * #include * * struct rpma_mr_local; * int rpma_mr_get_descriptor_size(const struct rpma_mr_local *mr, size_t *desc_size); * * DESCRIPTION * rpma_mr_get_descriptor_size() gets size of the memory region descriptor. * * RETURN VALUE * The rpma_mr_get_descriptor_size() function returns 0 on success or a negative error code on * failure. rpma_mr_get_descriptor_size() does not set *desc_size value on failure. * * ERRORS * rpma_mr_get_descriptor_size() can fail with the following error: * * - RPMA_E_INVAL - mr or desc_size is NULL * * SEE ALSO * rpma_mr_get_descriptor(3), rpma_mr_reg(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_get_descriptor_size(const struct rpma_mr_local *mr, size_t *desc_size); /** 3 * rpma_mr_get_ptr - get the pointer to the local memory region * * SYNOPSIS * * #include * * struct rpma_mr_local; * int rpma_mr_get_ptr(const struct rpma_mr_local *mr, void **ptr); * * DESCRIPTION * rpma_mr_get_ptr() gets a memory region pointer from the local memory registration object. * * RETURN VALUE * The rpma_mr_get_ptr() function returns 0 on success or a negative error code on failure. * rpma_mr_get_ptr() does not set *ptr value on failure. * * ERRORS * rpma_mr_get_ptr() can fail with the following error: * * - RPMA_E_INVAL - mr or ptr is NULL * * SEE ALSO * rpma_mr_reg(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_get_ptr(const struct rpma_mr_local *mr, void **ptr); /** 3 * rpma_mr_get_size - get the size of the local memory region * * SYNOPSIS * * #include * * struct rpma_mr_local; * int rpma_mr_get_size(const struct rpma_mr_local *mr, size_t *size); * * DESCRIPTION * rpma_mr_get_size() gets a memory region size from the local memory registration object. * * RETURN VALUE * The rpma_mr_get_size() function returns 0 on success or a negative error code on failure. * rpma_mr_get_size() does not set *size value on failure. * * ERRORS * rpma_mr_get_size() can fail with the following error: * * - RPMA_E_INVAL - mr or size is NULL * * SEE ALSO * rpma_mr_reg(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_get_size(const struct rpma_mr_local *mr, size_t *size); /** 3 * rpma_mr_remote_get_size - get a remote memory region size * * SYNOPSIS * * #include * * struct rpma_mr_remote; * int rpma_mr_remote_get_size(const struct rpma_mr_remote *mr, * size_t *size); * * DESCRIPTION * rpma_mr_remote_get_size() gets the size of the remote memory region. * * RETURN VALUE * The rpma_mr_remote_get_size() function returns 0 on success or a negative error code on failure. * rpma_mr_remote_get_size() does not set *size value on failure. * * ERRORS * rpma_mr_remote_get_size() can fail with the following error: * * - RPMA_E_INVAL - mr or size is NULL * * SEE ALSO * rpma_mr_remote_from_descriptor(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_remote_get_size(const struct rpma_mr_remote *mr, size_t *size); /** 3 * rpma_mr_remote_get_flush_type - get a remote memory region's flush types * * SYNOPSIS * * #include * * struct rpma_mr_remote; * int rpma_mr_remote_get_flush_type(const struct rpma_mr_remote *mr, int *flush_type); * * DESCRIPTION * rpma_mr_remote_get_flush_type() gets flush types supported by the remote memory region. * * RETURN VALUE * The rpma_mr_remote_get_flush_type() function returns 0 on success or a negative error code on * failure. rpma_mr_remote_get_flush_type() does not set *flush_type value on failure. * * ERRORS * rpma_mr_remote_get_flush_type() can fail with the following error: * * - RPMA_E_INVAL - mr or flush_type is NULL * * SEE ALSO * rpma_mr_remote_from_descriptor(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_remote_get_flush_type(const struct rpma_mr_remote *mr, int *flush_type); /** 3 * rpma_mr_remote_delete - delete the remote memory region's structure * * SYNOPSIS * * #include * * struct rpma_mr_remote; * int rpma_mr_remote_delete(struct rpma_mr_remote **mr_ptr); * * DESCRIPTION * rpma_mr_remote_delete() deletes the remote memory region's structure. * * RETURN VALUE * The rpma_mr_remote_delete() function returns 0 on success or a negative error code on failure. * rpma_mr_remote_delete() does not set *mr_ptr value to NULL on failure. * * ERRORS * rpma_mr_remote_delete() can fail with the following error: * * - RPMA_E_INVAL - mr_ptr is NULL * * SEE ALSO * rpma_mr_remote_from_descriptor(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_remote_delete(struct rpma_mr_remote **mr_ptr); /** 3 * rpma_mr_advise - give advice about an address range in a memory registration * * SYNOPSIS * * #include * * struct rpma_mr_local *mr; * int rpma_mr_advise(struct rpma_mr_local *mr, size_t offset, size_t len, int advice, * uint32_t flags); * * DESCRIPTION * rpma_mr_advise() gives advice about an address range in a memory registration. The usage * parameter specifies the operations that can be performed on the given memory address range. * For available advice and flags values please see ibv_advise_mr(3). * * RETURN VALUE * The rpma_mr_advise() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_mr_advise() can fail with the following errors: * * - RPMA_E_INVAL - in one of the following: * - the requested range is out of the memory registration bounds * - the memory registration usage does not allow the specific advice * - the flags are invalid * - RPMA_E_NOSUPP - the operation is not supported by the system * - RPMA_E_PROVIDER - ibv_mr_advise(3) failed for other errors * * SEE ALSO * rpma_mr_reg(3), ibv_mr_advise(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_mr_advise(struct rpma_mr_local *mr, size_t offset, size_t len, int advice, uint32_t flags); /* connection configuration */ struct rpma_conn_cfg; /** 3 * rpma_conn_cfg_new - create a new connection configuration object * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_new(struct rpma_conn_cfg **cfg_ptr); * * DESCRIPTION * rpma_conn_cfg_new() creates a new connection configuration object and fills it with the default * values: * * .timeout_ms = 1000 * .cq_size = 10 * .rcq_size = 0 * .sq_size = 10 * .rq_size = 10 * .shared_comp_channel = false * * RETURN VALUE * The rpma_conn_cfg_new() function returns 0 on success or a negative error code on failure. * rpma_conn_cfg_new() does not set *cfg_ptr value on failure. * * ERRORS * rpma_conn_cfg_new() can fail with the following error: * * - RPMA_E_INVAL - cfg_ptr is NULL * - RPMA_E_NOMEM - out of memory * * SEE ALSO * rpma_conn_cfg_delete(3), rpma_conn_cfg_get_compl_channel(3), rpma_conn_cfg_get_cq_size(3), * rpma_conn_cfg_get_rq_size(3), rpma_conn_cfg_get_sq_size(3), rpma_conn_cfg_get_timeout(3), * rpma_conn_cfg_set_compl_channel(3), rpma_conn_cfg_set_cq_size(3), rpma_conn_cfg_set_rq_size(3), * rpma_conn_cfg_set_sq_size(3), rpma_conn_cfg_set_timeout(3), rpma_conn_req_new(3), * rpma_ep_next_conn_req(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_new(struct rpma_conn_cfg **cfg_ptr); /** 3 * rpma_conn_cfg_delete - delete the connection configuration object * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_delete(struct rpma_conn_cfg **cfg_ptr); * * DESCRIPTION * rpma_conn_cfg_delete() deletes the connection configuration object. * * RETURN VALUE * The rpma_conn_cfg_delete() function returns 0 on success or a negative error code on failure. * rpma_conn_cfg_delete() sets *cfg_ptr value to NULL on success and on failure. * * ERRORS * rpma_conn_cfg_delete() can fail with the following error: * * - RPMA_E_INVAL - cfg_ptr is NULL * * SEE ALSO * rpma_conn_cfg_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_delete(struct rpma_conn_cfg **cfg_ptr); /** 3 * rpma_conn_cfg_set_timeout - set connection establishment timeout * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_set_timeout(struct rpma_conn_cfg *cfg, int timeout_ms); * * DESCRIPTION * rpma_conn_cfg_set_timeout() sets the connection establishment timeout. If this function is not * called, the timeout has the default value (1000) set by rpma_conn_cfg_new(3). * * RETURN VALUE * The rpma_conn_cfg_set_timeout() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_conn_cfg_set_timeout() can fail with the following error: * * - RPMA_E_INVAL - cfg is NULL or timeout_ms < 0 * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_get_timeout(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_set_timeout(struct rpma_conn_cfg *cfg, int timeout_ms); /** 3 * rpma_conn_cfg_get_timeout - get connection establishment timeout * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_get_timeout(const struct rpma_conn_cfg *cfg, int *timeout_ms); * * DESCRIPTION * rpma_conn_cfg_get_timeout() gets the connection establishment timeout. * * RETURN VALUE * The rpma_conn_cfg_get_timeout() function returns 0 on success or a negative error code on * failure. rpma_conn_cfg_get_timeout() does not set *timeout_ms value on failure. * * ERRORS * rpma_conn_cfg_get_timeout() can fail with the following error: * * - RPMA_E_INVAL - cfg or timeout_ms is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_set_timeout(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_get_timeout(const struct rpma_conn_cfg *cfg, int *timeout_ms); /** 3 * rpma_conn_cfg_set_cq_size - set CQ size for the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_set_cq_size(struct rpma_conn_cfg *cfg, uint32_t cq_size); * * DESCRIPTION * rpma_conn_cfg_set_cq_size() sets the CQ size for the connection. If this function is not called, * the cq_size has the default value (10) set by rpma_conn_cfg_new(3). * * RETURN VALUE * The rpma_conn_cfg_set_cq_size() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_conn_cfg_set_cq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_get_cq_size(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_set_cq_size(struct rpma_conn_cfg *cfg, uint32_t cq_size); /** 3 * rpma_conn_cfg_get_compl_channel - get if the completion event channel can be shared by CQ and RCQ * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_get_compl_channel(const struct rpma_conn_cfg *cfg, bool *shared); * * DESCRIPTION * rpma_conn_cfg_get_compl_channel() gets if the completion event channel can be shared by CQ and * RCQ. * * RETURN VALUE * The rpma_conn_cfg_get_compl_channel() function returns 0 on success or a negative error code on * failure. rpma_conn_cfg_get_compl_channel() does not set *shared value on failure. * * ERRORS * rpma_conn_cfg_get_compl_channel() can fail with the following error: * * - RPMA_E_INVAL - cfg or shared is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_wait(3), rpma_conn_cfg_set_compl_channel(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_conn_cfg_get_compl_channel(const struct rpma_conn_cfg *cfg, bool *shared); /** 3 * rpma_conn_cfg_set_compl_channel - set if the completion event channel can be shared by CQ and RCQ * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_set_compl_channel(struct rpma_conn_cfg *cfg, bool shared); * * DESCRIPTION * rpma_conn_cfg_set_compl_channel() sets if the completion event channel can be shared by CQ and * RCQ or not. The completion event channel is not shared by CQ and RCQ by default. See * rpma_conn_cfg_new(3) for details. * * RETURN VALUE * The rpma_conn_cfg_set_compl_channel() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_conn_cfg_set_compl_channel() can fail with the following error: * * - RPMA_E_INVAL - cfg is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_wait(3), rpma_conn_cfg_get_compl_channel(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_conn_cfg_set_compl_channel(struct rpma_conn_cfg *cfg, bool shared); /** 3 * rpma_conn_cfg_get_cq_size - get CQ size for the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_get_cq_size(const struct rpma_conn_cfg *cfg, uint32_t *cq_size); * * DESCRIPTION * rpma_conn_cfg_get_cq_size() gets the CQ size for the connection. * * RETURN VALUE * The rpma_conn_cfg_get_cq_size() function returns 0 on success or a negative error code on * failure. rpma_conn_cfg_get_cq_size() does not set *cq_size value on failure. * * ERRORS * rpma_conn_cfg_get_cq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg or cq_size is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_set_cq_size(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_get_cq_size(const struct rpma_conn_cfg *cfg, uint32_t *cq_size); /** 3 * rpma_conn_cfg_set_rcq_size - set receive CQ size for the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_set_rcq_size(struct rpma_conn_cfg *cfg, uint32_t rcq_size); * * DESCRIPTION * rpma_conn_cfg_set_rcq_size() sets the receive CQ size for the connection. Please see * the rpma_conn_get_rcq() for details about the receive CQ. If this function is not called, * the rcq_size has the default value (0) set by rpma_conn_cfg_new(3). * * RETURN VALUE * The rpma_conn_cfg_set_rcq_size() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_conn_cfg_set_rcq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg is NULL * * SEE ALSO * rpma_conn_cfg_get_rcq_size(3), rpma_conn_cfg_new(3), rpma_conn_get_rcq(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_conn_cfg_set_rcq_size(struct rpma_conn_cfg *cfg, uint32_t rcq_size); /** 3 * rpma_conn_cfg_get_rcq_size - get receive CQ size for the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_get_rcq_size(const struct rpma_conn_cfg *cfg, uint32_t *rcq_size); * * DESCRIPTION * rpma_conn_cfg_get_rcq_size() gets the receive CQ size for the connection. * Please see the rpma_conn_get_rcq() for details about the receive CQ. * * RETURN VALUE * The rpma_conn_cfg_get_rcq_size() function returns 0 on success or a negative error code on * failure. rpma_conn_cfg_get_rcq_size() does not set *rcq_size value on failure. * * ERRORS * rpma_conn_cfg_get_rcq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg or rcq_size is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_set_rcq_size(3), rpma_conn_get_rcq(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_conn_cfg_get_rcq_size(const struct rpma_conn_cfg *cfg, uint32_t *rcq_size); /** 3 * rpma_conn_cfg_set_sq_size - set SQ size for the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_set_sq_size(struct rpma_conn_cfg *cfg, uint32_t sq_size); * * DESCRIPTION * rpma_conn_cfg_set_sq_size() sets the SQ size for the connection. If this function is not called, * the sq_size has the default value (10) set by rpma_conn_cfg_new(3). * * RETURN VALUE * The rpma_conn_cfg_set_sq_size() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_conn_cfg_set_sq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_get_sq_size(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_set_sq_size(struct rpma_conn_cfg *cfg, uint32_t sq_size); /** 3 * rpma_conn_cfg_get_sq_size - get SQ size for the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_get_sq_size(const struct rpma_conn_cfg *cfg, uint32_t *sq_size); * * DESCRIPTION * rpma_conn_cfg_get_sq_size() gets the SQ size for the connection. * * RETURN VALUE * The rpma_conn_cfg_get_sq_size() function returns 0 on success or a negative error code on * failure. rpma_conn_cfg_get_sq_size() does not set *sq_size value on failure. * * ERRORS * rpma_conn_cfg_get_sq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg or sq_size is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_set_sq_size(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_get_sq_size(const struct rpma_conn_cfg *cfg, uint32_t *sq_size); /** 3 * rpma_conn_cfg_set_rq_size - set RQ size for the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_set_rq_size(struct rpma_conn_cfg *cfg, uint32_t rq_size); * * DESCRIPTION * rpma_conn_cfg_set_rq_size() sets the RQ size for the connection. If this function is not called, * the rq_size has the default value (10) set by rpma_conn_cfg_new(3). * * RETURN VALUE * The rpma_conn_cfg_set_rq_size() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_conn_cfg_set_rq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_get_rq_size(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_set_rq_size(struct rpma_conn_cfg *cfg, uint32_t rq_size); /** 3 * rpma_conn_cfg_get_rq_size - get RQ size for the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * int rpma_conn_cfg_get_rq_size(const struct rpma_conn_cfg *cfg, uint32_t *rq_size); * * DESCRIPTION * rpma_conn_cfg_get_rq_size() gets the RQ size for the connection. * * RETURN VALUE * The rpma_conn_cfg_get_rq_size() function returns 0 on success or a negative error code on * failure. rpma_conn_cfg_get_rq_size() does not set *rq_size value on failure. * * ERRORS * rpma_conn_cfg_get_rq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg or rq_size is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_set_rq_size(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_get_rq_size(const struct rpma_conn_cfg *cfg, uint32_t *rq_size); /* shared RQ */ struct rpma_srq; /** 3 * rpma_conn_cfg_set_srq - set a shared RQ object for the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * struct rpma_srq; * int rpma_conn_cfg_set_srq(struct rpma_conn_cfg *cfg, struct rpma_srq *srq); * * DESCRIPTION * rpma_conn_cfg_set_srq() sets a shared RQ object for the connection. If this function is not * called, the srq has the default value (NULL) set by rpma_conn_cfg_new(3). * * RETURN VALUE * The rpma_conn_cfg_set_srq() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_conn_cfg_set_srq() can fail with the following error: * * - RPMA_E_INVAL - cfg is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_get_srq(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_set_srq(struct rpma_conn_cfg *cfg, struct rpma_srq *srq); /** 3 * rpma_conn_cfg_get_srq - get the shared RQ object from the connection * * SYNOPSIS * * #include * * struct rpma_conn_cfg; * struct rpma_srq; * int rpma_conn_cfg_get_srq(const struct rpma_conn_cfg *cfg, struct rpma_srq **srq_ptr); * * DESCRIPTION * rpma_conn_cfg_get_srq() gets the shared RQ object from the connection. * * RETURN VALUE * The rpma_conn_cfg_get_srq() function returns 0 on success or a negative error code on failure. * rpma_conn_cfg_get_srq() does not set *srq_ptr value on failure. * * ERRORS * rpma_conn_cfg_get_srq() can fail with the following error: * * - RPMA_E_INVAL - cfg or srq_ptr is NULL * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_cfg_set_srq(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_cfg_get_srq(const struct rpma_conn_cfg *cfg, struct rpma_srq **srq_ptr); /* connection */ struct rpma_conn; /** 3 * rpma_conn_get_event_fd - get an event file descriptor of the connection * * SYNOPSIS * * #include * * struct rpma_conn; * int rpma_conn_get_event_fd(const struct rpma_conn *conn, int *fd); * * DESCRIPTION * rpma_conn_get_event_fd() gets an event file descriptor of the connection. * * RETURN VALUE * The rpma_conn_get_event_fd() function returns 0 on success or a negative error code on failure. * rpma_conn_get_event_fd() does not set *fd value on failure. * * ERRORS * rpma_conn_get_event_fd() can fail with the following error: * * - RPMA_E_INVAL - conn or fd is NULL * * SEE ALSO * rpma_conn_next_event(3), rpma_conn_req_connect(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_get_event_fd(const struct rpma_conn *conn, int *fd); enum rpma_conn_event { RPMA_CONN_UNDEFINED = -1, /* Undefined connection event */ RPMA_CONN_ESTABLISHED, /* Connection established */ RPMA_CONN_CLOSED, /* Connection closed */ RPMA_CONN_LOST, /* Connection lost */ RPMA_CONN_REJECTED, /* Connection rejected */ RPMA_CONN_UNREACHABLE /* Connection unreachable */ }; /** 3 * rpma_conn_next_event - obtain a connection status * * SYNOPSIS * * #include * * struct rpma_conn; * enum rpma_conn_event { * RPMA_CONN_UNDEFINED = -1, * RPMA_CONN_ESTABLISHED, * RPMA_CONN_CLOSED, * RPMA_CONN_LOST, * RPMA_CONN_REJECTED, * RPMA_CONN_UNREACHABLE * }; * * int rpma_conn_next_event(struct rpma_conn *conn, * enum rpma_conn_event *event); * * DESCRIPTION * rpma_conn_next_event() obtains the next event from the connection. * Types of events: * - RPMA_CONN_UNDEFINED - undefined connection event * - RPMA_CONN_ESTABLISHED - connection established * - RPMA_CONN_CLOSED - connection closed * - RPMA_CONN_LOST - connection lost * - RPMA_CONN_REJECTED - connection rejected * - RPMA_CONN_UNREACHABLE - connection unreachable * * RETURN VALUE * The rpma_conn_next_event() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_conn_next_event() can fail with the following errors: * * - RPMA_E_INVAL - conn or event is NULL * - RPMA_E_UNKNOWN - unexpected event * - RPMA_E_PROVIDER - rdma_get_cm_event() or rdma_ack_cm_event() failed * - RPMA_E_NOMEM - out of memory * * SEE ALSO * rpma_conn_req_connect(3), rpma_conn_disconnect(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_next_event(struct rpma_conn *conn, enum rpma_conn_event *event); /** 3 * rpma_utils_conn_event_2str - convert RPMA_CONN_* enum to a string * * SYNOPSIS * * #include * * const char *rpma_utils_conn_event_2str(enum rpma_conn_event conn_event); * * enum rpma_conn_event { * RPMA_CONN_UNDEFINED = -1, * RPMA_CONN_ESTABLISHED, * RPMA_CONN_CLOSED, * RPMA_CONN_LOST, * RPMA_CONN_REJECTED, * RPMA_CONN_UNREACHABLE * }; * * DESCRIPTION * rpma_utils_conn_event_2str() converts RPMA_CONN_* enum to the const string representation. * * RETURN VALUE * The rpma_utils_conn_event_2str() function returns the const string representation of * RPMA_CONN_* enums. * * ERRORS * rpma_utils_conn_event_2str() can not fail. * * SEE ALSO * rpma_conn_next_event(3), librpma(7) and https://pmem.io/rpma/ */ const char *rpma_utils_conn_event_2str(enum rpma_conn_event conn_event); struct rpma_conn_private_data { void *ptr; uint8_t len; }; /** 3 * rpma_conn_get_private_data - get a pointer to the connection's private data * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_conn_private_data; * int rpma_conn_get_private_data(const struct rpma_conn *conn, * struct rpma_conn_private_data *pdata); * * DESCRIPTION * rpma_conn_get_private_data() obtains the pointer to the private data given by the other side of * the connection. * * SECURITY WARNING * The connection's private data is insecure. An attacker might modify all data transferred * via the rdma_cm private data. Users should avoid using rpma_conn_get_private_data(3) * and rpma_conn_req_get_private_data(3) API calls and they should utilize TLS/SSL connections * to transfer all configuration data between peers instead. * * RETURN VALUE * The rpma_conn_get_private_data() function returns 0 on success or a negative error code on * failure. rpma_conn_get_private_data() does not set *pdata value on failure. * * ERRORS * rpma_conn_get_private_data() can fail with the following error: * * - RPMA_E_INVAL - conn or pdata is NULL * * SEE ALSO * rpma_conn_req_connect(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_get_private_data(const struct rpma_conn *conn, struct rpma_conn_private_data *pdata); /** 3 * rpma_conn_apply_remote_peer_cfg - apply remote peer cfg to the connection * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_peer_cfg; * int rpma_conn_apply_remote_peer_cfg(struct rpma_conn *conn, * const struct rpma_peer_cfg *pcfg); * * DESCRIPTION * rpma_conn_apply_remote_peer_cfg() applies the remote peer configuration to the connection. * * RETURN VALUE * The rpma_conn_apply_remote_peer_cfg() function returns 0 on success or a negative error code on * failure. rpma_conn_apply_remote_peer_cfg() does not set *pcfg value on failure. * * ERRORS * rpma_conn_apply_remote_peer_cfg() can fail with the following error: * * - RPMA_E_INVAL - conn or pcfg are NULL * * SEE ALSO * rpma_conn_req_connect(3), rpma_peer_cfg_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_apply_remote_peer_cfg(struct rpma_conn *conn, const struct rpma_peer_cfg *pcfg); /** 3 * rpma_conn_get_qp_num - get the connection's qp_num * * SYNOPSIS * * #include * * struct rpma_conn; * int rpma_conn_get_qp_num(const struct rpma_conn *conn, uint32_t *qp_num); * * DESCRIPTION * rpma_conn_get_qp_num() obtains the unique identifier of the connection. * * RETURN VALUE * The rpma_conn_get_qp_num() function returns 0 on success or a negative error code on failure. * rpma_conn_get_qp_num() does not set *qp_num value on failure. * * ERRORS * rpma_conn_get_qp_num() can fail with the following error: * * - RPMA_E_INVAL - conn or qp_num is NULL * * SEE ALSO * rpma_conn_req_new(3), rpma_ep_next_conn_req(3), rpma_conn_req_connect(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_conn_get_qp_num(const struct rpma_conn *conn, uint32_t *qp_num); struct rpma_cq; /** 3 * rpma_conn_get_cq - get the connection's main CQ * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_cq; * int rpma_conn_get_cq(const struct rpma_conn *conn, struct rpma_cq **cq_ptr); * * DESCRIPTION * rpma_conn_get_cq() gets the main CQ from the connection. When the receive CQ is not present * the main CQ allows handling all completions within the connection. When the receive CQ * is present the main CQ allows handling all completions except rpma_recv(3) completions within * the connection. Please see rpma_conn_get_rcq(3) for details about the receive CQ. * * RETURN VALUE * The rpma_conn_get_cq() function returns 0 on success or a negative error code on failure. * rpma_conn_get_cq() does not set *cq_ptr value on failure. * * ERRORS * rpma_conn_get_cq() can fail with the following error: * * - RPMA_E_INVAL - conn or cq_ptr is NULL * * SEE ALSO * rpma_conn_req_connect(3), rpma_conn_get_rcq(3), rpma_cq_wait(3), rpma_cq_get_wc(3), * rpma_cq_get_fd(3), rpma_recv(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_get_cq(const struct rpma_conn *conn, struct rpma_cq **cq_ptr); /** 3 * rpma_conn_get_rcq -- get the connection's receive CQ * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_cq; * int rpma_conn_get_rcq(const struct rpma_conn *conn, struct rpma_cq **rcq_ptr); * * DESCRIPTION * rpma_conn_get_rcq() gets the receive CQ from the connection. The receive CQ allows handling all * rpma_recv(3) completions within the connection. It allows separating rpma_recv(3) completions * processing path from all other completions. The receive CQ is created only if the receive CQ * size in the provided connection configuration is greater than 0. When the receive CQ does not * exist for the given connection the *rcq_ptr == NULL. * * RETURN VALUE * The rpma_conn_get_rcq() function returns 0 on success or a negative error code on failure. * rpma_conn_get_rcq() does not set *rcq_ptr value on failure. * * ERRORS * rpma_conn_get_rcq() can fail with the following error: * * - RPMA_E_INVAL - conn or rcq_ptr is NULL * * SEE ALSO * rpma_conn_cfg_set_rcq_size(3), rpma_conn_req_connect(3), rpma_conn_get_cq(3), rpma_cq_wait(3), * rpma_cq_get_wc(3), rpma_cq_get_fd(3), rpma_recv(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_get_rcq(const struct rpma_conn *conn, struct rpma_cq **rcq_ptr); /** 3 * rpma_conn_disconnect - tear the connection down * * SYNOPSIS * * #include * * struct rpma_conn; * int rpma_conn_disconnect(struct rpma_conn *conn); * * DESCRIPTION * rpma_conn_disconnect() tears the connection down. * * - It may initiate disconnecting the connection. In this case, the end of disconnecting is * signalled by the RPMA_CONN_CLOSED event via rpma_conn_next_event() or * - It may be called after receiving the RPMA_CONN_CLOSED event. In this case, the disconnection * is done when rpma_conn_disconnect() returns with success. * * RETURN VALUE * The rpma_conn_disconnect() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_conn_disconnect() can fail with the following errors: * * - RPMA_E_INVAL - conn is NULL * - RPMA_E_PROVIDER - rdma_disconnect() failed * * SEE ALSO * rpma_conn_delete(3), rpma_conn_next_event(3), rpma_conn_req_connect(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_conn_disconnect(struct rpma_conn *conn); /** 3 * rpma_conn_delete - delete already closed connection * * SYNOPSIS * * #include * * struct rpma_conn; * int rpma_conn_delete(struct rpma_conn **conn_ptr); * * DESCRIPTION * rpma_conn_delete() deletes already closed connection. * * RETURN VALUE * The rpma_conn_delete() function returns 0 on success or a negative error code on failure. * rpma_conn_delete() sets *conn_ptr value to NULL on success and on failure. * * ERRORS * rpma_conn_delete() can fail with the following errors: * - RPMA_E_INVAL - conn_ptr is NULL or munmap() failed * - RPMA_E_PROVIDER - ibv_destroy_cq() or rdma_destroy_id() failed * * SEE ALSO * rpma_conn_disconnect(3), rpma_conn_req_connect(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_delete(struct rpma_conn **conn_ptr); /* incoming / outgoing connection request */ struct rpma_conn_req; /** 3 * rpma_conn_req_new - create a new outgoing connection request object * * SYNOPSIS * * #include * * struct rpma_peer; * struct rpma_conn_cfg; * struct rpma_conn_req; * int rpma_conn_req_new(struct rpma_peer *peer, const char *addr, const char *port, * const struct rpma_conn_cfg *cfg, struct rpma_conn_req **req_ptr); * * DESCRIPTION * rpma_conn_req_new() creates a new outgoing connection request object using reliable, * connection-oriented and message-based (RDMA_PS_TCP) QP communication. * * RETURN VALUE * The rpma_conn_req_new() function returns 0 on success or a negative error code on failure. * rpma_conn_req_new() does not set *req_ptr value on failure. If cfg is NULL, then the default * values are used * - see rpma_conn_cfg_new(3) for more details. * * ERRORS * rpma_conn_req_new() can fail with the following errors: * * - RPMA_E_INVAL - peer, addr, port or req_ptr is NULL * - RPMA_E_NOMEM - out of memory * - RPMA_E_PROVIDER - rdma_create_id(3), rdma_resolve_addr(3), rdma_resolve_route(3) or * ibv_create_cq(3) failed * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_req_connect(3), rpma_conn_req_delete(3), rpma_conn_req_recv(3), * rpma_ep_next_conn_req(3), rpma_peer_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_req_new(struct rpma_peer *peer, const char *addr, const char *port, const struct rpma_conn_cfg *cfg, struct rpma_conn_req **req_ptr); /** 3 * rpma_conn_req_delete - delete the connection requests * * SYNOPSIS * * #include * * struct rpma_conn_req; * int rpma_conn_req_delete(struct rpma_conn_req **req_ptr); * * DESCRIPTION * rpma_conn_req_delete() deletes the connection requests both incoming and outgoing. * * RETURN VALUE * The rpma_conn_req_delete() function returns 0 on success or a negative error code on failure. * rpma_conn_req_delete() sets *req_ptr value to NULL on success and on failure. * * ERRORS * rpma_conn_req_delete() can fail with the following errors: * * - RPMA_E_INVAL - req_ptr is NULL * - RPMA_E_PROVIDER * - rdma_destroy_qp(3) or ibv_destroy_cq(3) failed * - rdma_reject(3) or rdma_ack_cm_event(3) failed (passive side only) * - rdma_destroy_id(3) failed (active side only) * * SEE ALSO * rpma_conn_req_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_req_delete(struct rpma_conn_req **req_ptr); /** 3 * rpma_conn_req_connect - initiate processing the connection request * * SYNOPSIS * * #include * * struct rpma_conn_req; * struct rpma_conn_private_data; * struct rpma_conn; * int rpma_conn_req_connect(struct rpma_conn_req **req_ptr, * const struct rpma_conn_private_data *pdata, struct rpma_conn **conn_ptr); * * DESCRIPTION * rpma_conn_req_connect() initiates processing the connection requests both incoming and outgoing. * The end of processing is signalled by the RPMA_CONN_ESTABLISHED event via * rpma_conn_next_event(). * * RETURN VALUE * The rpma_conn_req_connect() function returns 0 on success or a negative error code on failure. * On success, the newly created connection object is stored in *conn_ptr whereas *req_ptr is * consumed and set to NULL. On failure, rpma_conn_req_connect() does not set *conn_ptr whereas * *req_ptr is consumed and set to NULL. * * ERRORS * rpma_conn_req_connect() can fail with the following errors: * * - RPMA_E_INVAL - req_ptr, *req_ptr or conn_ptr is NULL * - RPMA_E_INVAL - pdata is not NULL whereas pdata->len == 0 * - RPMA_E_NOMEM - out of memory * - RPMA_E_PROVIDER - initiating a connection request failed (active side only) * - RPMA_E_PROVIDER - accepting the connection request failed (passive side only) * - RPMA_E_PROVIDER - freeing a communication event failed (passive side only) * * SEE ALSO * rpma_conn_apply_remote_peer_cfg(3), rpma_conn_delete(3), rpma_conn_disconnect(3), * rpma_conn_get_cq(3), rpma_conn_get_event_fd(3), rpma_conn_get_private_data(3), * rpma_conn_get_rcq(3), rpma_conn_next_event(3), rpma_conn_req_new(3), rpma_ep_next_conn_req(3), * rpma_flush(3), rpma_read(3), rpma_recv(3), rpma_send(3), rpma_write(3), rpma_atomic_write(3), * librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_req_connect(struct rpma_conn_req **req_ptr, const struct rpma_conn_private_data *pdata, struct rpma_conn **conn_ptr); /** 3 * rpma_conn_get_compl_fd - get a file descriptor of the shared completion channel of the connection * * SYNOPSIS * * #include * * struct rpma_conn; * int fd; * int rpma_conn_get_compl_fd(const struct rpma_conn *conn, int *fd); * * DESCRIPTION * rpma_conn_get_compl_fd() gets a file descriptor of the shared completion channel from * the connection. * * RETURN VALUE * The rpma_conn_get_compl_fd() function returns 0 on success or a negative error code on failure. * rpma_conn_get_compl_fd() does not set *fd value on failure. * * ERRORS * rpma_conn_get_compl_fd() can fail with the following errors: * * - RPMA_E_INVAL - conn or fd is NULL * - RPMA_E_NOT_SHARED_CHNL - the completion event channel is not shared * * SEE ALSO * librpma(7), rpma_conn_req_connect(3) and https://pmem.io/rpma/ */ int rpma_conn_get_compl_fd(const struct rpma_conn *conn, int *fd); /** 3 * rpma_conn_wait - wait for a completion event on the shared completion channel from CQ or RCQ * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_cq * int rpma_conn_wait(struct rpma_conn *conn, int flags, struct rpma_cq **cq, bool *is_rcq) * * DESCRIPTION * rpma_conn_wait() waits for a completion event on the shared completion channel from CQ or RCQ, * acks it and returns a CQ that caused the event in the cq argument and a boolean value saying if * it is RCQ or not in the is_rcq argument (if is_rcq is not NULL). If rpma_conn_wait() succeeds, * then all available completions should be collected from the returned cq using rpma_cq_get_wc(3). * * RETURN VALUE * The rpma_conn_wait() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_conn_wait() can fail with the following errors: * * - RPMA_E_INVAL - conn or cq are NULL * - RPMA_E_NOT_SHARED_CHNL - the completion event channel is not shared * - RPMA_E_NO_COMPLETION - ibv_get_cq_event(3) failed * - RPMA_E_UNKNOWN - ibv_get_cq_event(3) returned unknown CQ * - RPMA_E_PROVIDER - ibv_req_notify_cq(3) failed * * SEE ALSO * rpma_conn_req_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_wait(struct rpma_conn *conn, int flags, struct rpma_cq **cq, bool *is_rcq); /** 3 * rpma_conn_req_recv - initiate the receive operation * * SYNOPSIS * * #include * * struct rpma_conn_req; * struct rpma_mr_local; * int rpma_conn_req_recv(struct rpma_conn_req *req, struct rpma_mr_local *dst, size_t offset, * size_t len, const void *op_context); * * DESCRIPTION * rpma_conn_req_recv() initiates the receive operation. It prepares a buffer for a message sent * from other side of the connection. Please see rpma_send(3). This is a variant of rpma_recv(3) * which may be used before the connection is established. op_context is returned in the wr_id * field of the completion (struct ibv_wc). * * RETURN VALUE * The rpma_conn_req_recv() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_conn_req_recv() can fail with the following errors: * * - RPMA_E_INVAL - req or src or op_context is NULL * - RPMA_E_PROVIDER - ibv_post_recv(3) failed * * SEE ALSO * rpma_conn_req_new(3), rpma_mr_reg(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_req_recv(struct rpma_conn_req *req, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context); /* server-side setup */ struct rpma_ep; /** 3 * rpma_ep_listen - create a listening endpoint * * SYNOPSIS * * #include * * struct rpma_peer; * struct rpma_ep; * int rpma_ep_listen(struct rpma_peer *peer, const char *addr, const char *port, * struct rpma_ep **ep_ptr); * * DESCRIPTION * rpma_ep_listen() creates an endpoint and initiates listening for incoming connections using * reliable, connection-oriented and message-based (RDMA_PS_TCP) QP communication. * * RETURN VALUE * The rpma_ep_listen() function returns 0 on success or a negative error code on failure. * rpma_ep_listen() does not set *ep_ptr value on failure. * * ERRORS * rpma_ep_listen() can fail with the following errors: * * - RPMA_E_INVAL - peer, addr, port or ep_ptr is NULL * - RPMA_E_PROVIDER - rdma_create_event_channel(3), rdma_create_id(3), * rdma_getaddrinfo(3), rdma_listen(3) failed * - RPMA_E_NOMEM - out of memory * * SEE ALSO * rpma_ep_get_fd(3), rpma_ep_next_conn_req(3), rpma_ep_shutdown(3), rpma_peer_new(3), librpma(7) * and https://pmem.io/rpma/ */ int rpma_ep_listen(struct rpma_peer *peer, const char *addr, const char *port, struct rpma_ep **ep_ptr); /** 3 * rpma_ep_shutdown - stop listening and delete the endpoint * * SYNOPSIS * * #include * * struct rpma_ep; * int rpma_ep_shutdown(struct rpma_ep **ep_ptr); * * DESCRIPTION * rpma_ep_shutdown() stops listening for incoming connections and deletes the endpoint. * The connections established using the endpoint will still exist after deleting the endpoint. * * RETURN VALUE * The rpma_ep_shutdown() function returns 0 on success or a negative error code on failure. * rpma_ep_shutdown() does not set *ep_ptr value to NULL on failure. * * ERRORS * rpma_ep_shutdown() can fail with the following errors: * * - RPMA_E_INVAL - ep_ptr is NULL * - RPMA_E_PROVIDER - rdma_destroy_id(3) failed * * SEE ALSO * rpma_ep_listen(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_ep_shutdown(struct rpma_ep **ep_ptr); /** 3 * rpma_ep_get_fd - get a file descriptor of the endpoint * * SYNOPSIS * * #include * * struct rpma_ep; * int rpma_ep_get_fd(const struct rpma_ep *ep, int *fd); * * DESCRIPTION * rpma_ep_get_fd() gets the file descriptor of the endpoint. * * RETURN VALUE * The rpma_ep_get_fd() function returns 0 on success or a negative error code on failure. * rpma_ep_get_fd() does not set *fd value on failure. * * ERRORS * rpma_ep_get_fd() can fail with the following error: * * - RPMA_E_INVAL - ep or fd is NULL * * SEE ALSO * rpma_ep_listen(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_ep_get_fd(const struct rpma_ep *ep, int *fd); /** 3 * rpma_ep_next_conn_req - obtain an incoming connection request * * SYNOPSIS * * #include * * struct rpma_ep; * struct rpma_conn_cfg; * struct rpma_conn_req; * int rpma_ep_next_conn_req(struct rpma_ep *ep, const struct rpma_conn_cfg *cfg, * struct rpma_conn_req **req_ptr); * * DESCRIPTION * rpma_ep_next_conn_req() obtains the next connection request from the endpoint. * * RETURN VALUE * The rpma_ep_next_conn_req() function returns 0 on success or a negative error code on failure. * rpma_ep_next_conn_req() does not set *req_ptr value on failure. * * ERRORS * rpma_ep_next_conn_req() can fail with the following errors: * * - RPMA_E_INVAL - ep or req_ptr is NULL * - RPMA_E_INVAL - obtained an event different than a connection request * - RPMA_E_PROVIDER - rdma_get_cm_event(3) failed * - RPMA_E_NOMEM - out of memory * - RPMA_E_NO_EVENT - no next connection request available * * SEE ALSO * rpma_conn_cfg_new(3), rpma_conn_req_delete(3), rpma_conn_req_connect(3), rpma_ep_listen(3), * librpma(7) and https://pmem.io/rpma/ */ int rpma_ep_next_conn_req(struct rpma_ep *ep, const struct rpma_conn_cfg *cfg, struct rpma_conn_req **req_ptr); /** 3 * rpma_conn_req_get_private_data - get a pointer to the request's private data * * SYNOPSIS * * #include * * struct rpma_conn_req; * struct rpma_conn_private_data; * int rpma_conn_req_get_private_data(const struct rpma_conn_req *req, * struct rpma_conn_private_data *pdata); * * DESCRIPTION * rpma_conn_req_get_private_data() obtains the pointer to the connection's private data given by * the other side of the connection before the connection is established. * * SECURITY WARNING * See rpma_conn_get_private_data(3). * * RETURN VALUE * The rpma_conn_req_get_private_data() function returns 0 on success or a negative error code on * failure. rpma_conn_req_get_private_data() does not set *pdata value on failure. * * ERRORS * rpma_conn_req_get_private_data() can fail with the following error: * * - RPMA_E_INVAL - req or pdata is NULL * * SEE ALSO * rpma_conn_get_private_data(3), rpma_ep_next_conn_req(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_conn_req_get_private_data(const struct rpma_conn_req *req, struct rpma_conn_private_data *pdata); /* shared RQ configuration */ struct rpma_srq_cfg; /** 3 * rpma_srq_cfg_new - create a new shared RQ configuration object * * SYNOPSIS * * #include * * struct rpma_srq_cfg; * int rpma_srq_cfg_new(struct rpma_srq_cfg **cfg_ptr); * * DESCRIPTION * rpma_srq_cfg_new() creates a new shared RQ configuration object and fills it with the default * values: * * .rcq_size = 100 * .rq_size = 100 * * Note that rpma_srq_new(3) with the default rcq_size creates its own receive CQ. * * RETURN VALUE * The rpma_srq_cfg_new() function returns 0 on success or a negative error code on failure. * rpma_srq_cfg_new() does not set *cfg_ptr value on failure. * * ERRORS * rpma_srq_cfg_new() can fail with the following error: * * - RPMA_E_INVAL - cfg_ptr is NULL * - RPMA_E_NOMEM - out of memory * * SEE ALSO * rpma_srq_cfg_delete(3), rpma_srq_cfg_get_rcq_size(3), rpma_srq_cfg_get_rq_size(3), * rpma_srq_cfg_set_rcq_size(3), rpma_srq_cfg_set_rq_size(3), rpma_srq_new(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_srq_cfg_new(struct rpma_srq_cfg **cfg_ptr); /** 3 * rpma_srq_cfg_delete - delete the shared RQ configuration object * * SYNOPSIS * * #include * * struct rpma_srq_cfg; * int rpma_srq_cfg_delete(struct rpma_srq_cfg **cfg_ptr); * * DESCRIPTION * rpma_srq_cfg_delete() deletes the shared RQ configuration object. * * RETURN VALUE * The rpma_srq_cfg_delete() function returns 0 on success or a negative error code on failure. * rpma_srq_cfg_delete() sets *cfg_ptr value to NULL on success and on failure. * * ERRORS * rpma_srq_cfg_delete() can fail with the following error: * * - RPMA_E_INVAL - cfg_ptr is NULL * * SEE ALSO * rpma_srq_cfg_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_srq_cfg_delete(struct rpma_srq_cfg **cfg_ptr); /** 3 * rpma_srq_cfg_set_rq_size - set RQ size of the shared RQ * * SYNOPSIS * * #include * * struct rpma_srq_cfg; * int rpma_srq_cfg_set_rq_size(struct rpma_srq_cfg *cfg, uint32_t rq_size); * * DESCRIPTION * rpma_srq_cfg_set_rq_size() sets the RQ size of the shared RQ. If this function is not called, * the rq_size has the default value (100) set by rpma_srq_cfg_new(3). * * RETURN VALUE * The rpma_srq_cfg_set_rq_size() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_srq_cfg_set_rq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg is NULL * * SEE ALSO * rpma_srq_cfg_new(3), rpma_srq_cfg_get_rq_size(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_srq_cfg_set_rq_size(struct rpma_srq_cfg *cfg, uint32_t rq_size); /** 3 * rpma_srq_cfg_get_rq_size - get RQ size of the shared RQ * * SYNOPSIS * * #include * * struct rpma_srq_cfg; * int rpma_srq_cfg_get_rq_size(const struct rpma_srq_cfg *cfg, uint32_t *rq_size); * * DESCRIPTION * rpma_srq_cfg_get_rq_size() gets the RQ size of the shared RQ. * * RETURN VALUE * The rpma_srq_cfg_get_rq_size() function returns 0 on success or a negative error code on * failure. rpma_srq_cfg_get_rq_size() does not set *rq_size value on failure. * * ERRORS * rpma_srq_cfg_get_rq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg or rq_size is NULL * * SEE ALSO * rpma_srq_cfg_new(3), rpma_srq_cfg_set_rq_size(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_srq_cfg_get_rq_size(const struct rpma_srq_cfg *cfg, uint32_t *rq_size); /** 3 * rpma_srq_cfg_set_rcq_size - set receive CQ size of the shared RQ * * SYNOPSIS * * #include * * struct rpma_srq_cfg; * int rpma_srq_cfg_set_rcq_size(struct rpma_srq_cfg *cfg, uint32_t rcq_size); * * DESCRIPTION * rpma_srq_cfg_set_rcq_size() sets the receive CQ size of the shared RQ. If this function is not * called, the rcq_size has the default value (100) set by rpma_srq_cfg_new(3). * * Note that rpma_srq_new(3) does not create its own receive CQ if the size of the receive CQ * equals 0. * * RETURN VALUE * The rpma_srq_cfg_set_rcq_size() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_srq_cfg_set_rcq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg is NULL * * SEE ALSO * rpma_srq_cfg_get_rcq_size(3), rpma_srq_cfg_new(3), rpma_srq_new(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_srq_cfg_set_rcq_size(struct rpma_srq_cfg *cfg, uint32_t rcq_size); /** 3 * rpma_srq_cfg_get_rcq_size - get receive CQ size of the shared RQ * * SYNOPSIS * * #include * * struct rpma_srq_cfg; * int rpma_srq_cfg_get_rcq_size(const struct rpma_srq_cfg *cfg, uint32_t *rcq_size); * * DESCRIPTION * rpma_srq_cfg_get_rcq_size() gets the receive CQ size of the shared RQ. * * RETURN VALUE * The rpma_srq_cfg_get_rcq_size() function returns 0 on success or a negative error code on * failure. rpma_srq_cfg_get_rcq_size() does not set *rcq_size value on failure. * * ERRORS * rpma_srq_cfg_get_rcq_size() can fail with the following error: * * - RPMA_E_INVAL - cfg or rcq_size is NULL * * SEE ALSO * rpma_srq_cfg_new(3), rpma_srq_cfg_set_rcq_size(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_srq_cfg_get_rcq_size(const struct rpma_srq_cfg *cfg, uint32_t *rcq_size); /* shared RQ */ /** 3 * rpma_srq_new - create a new shared RQ object * * SYNOPSIS * * #include * * struct rpma_peer; * struct rpma_srq_cfg; * struct rpma_srq; * int rpma_srq_new(struct rpma_peer *peer, const struct rpma_srq_cfg *cfg, * struct rpma_srq **srq_ptr); * * DESCRIPTION * rpma_srq_new() creates a new shared RQ object including a new shared RQ and a new shared receive * CQ. It does not create the shared receive CQ if the size of the receive CQ in cfg equals 0. * * RETURN VALUE * The rpma_srq_new() function returns 0 on success or a negative error code on failure. * rpma_srq_new() does not set *srq_ptr value on failure. If cfg is NULL, then the default values * are used * - see rpma_srq_cfg_new(3) for more details. * * ERRORS * rpma_srq_new() can fail with the following errors: * * - RPMA_E_INVAL - peer or srq_ptr is NULL * - RPMA_E_NOMEM - out of memory * - RPMA_E_PROVIDER - ibv_create_srq(3), ibv_create_comp_channel(3), ibv_create_cq(3) or * ibv_req_notify_cq(3) failed * * SEE ALSO * rpma_srq_delete(3), rpma_srq_get_rcq(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_srq_new(struct rpma_peer *peer, struct rpma_srq_cfg *cfg, struct rpma_srq **srq_ptr); /** 3 * rpma_srq_delete - delete the shared RQ object * * SYNOPSIS * * #include * * struct rpma_srq; * int rpma_conn_req_delete(struct rpma_srq **srq_ptr); * * DESCRIPTION * rpma_srq_delete() deletes the shared RQ object. * * RETURN VALUE * The rpma_srq_delete() function returns 0 on success or a negative error code on failure. * rpma_srq_delete() sets *srq_ptr value to NULL on success and on failure. * * ERRORS * rpma_srq_delete() can fail with the following errors: * * - RPMA_E_INVAL - srq_ptr is NULL * - RPMA_E_PROVIDER - ibv_destroy_cq(3), ibv_destroy_comp_channel(3), or ibv_destroy_srq(3) failed * * SEE ALSO * rpma_srq_new(3), rpma_srq_get_rcq(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_srq_delete(struct rpma_srq **srq_ptr); /** 3 * rpma_srq_recv - initiate the receive operation in shared RQ * * SYNOPSIS * * #include * * struct rpma_srq; * struct rpma_mr_local; * int rpma_srq_recv(struct rpma_srq *srq, struct rpma_mr_local *dst, size_t offset, * size_t len, const void *op_context); * * DESCRIPTION * If multiple local connections use a shared RQ, rpma_srq_recv() initiates the receive operation * which prepares a buffer for a message sent from other side of these connections. Please see * rpma_send(3). * * All buffers prepared via rpma_srq_recv(3) form an unordered set. When a message arrives it is * placed in one of the buffers awaiting and a completion for the receive operation is generated. * * A buffer for an incoming message has to be prepared beforehand. * * The order of buffers in the set does not affect the order of completions of the receive * operations got via rpma_cq_get_wc(3). * * op_context is returned in the wr_id field of the completion (struct ibv_wc). * * NOTE * In the RDMA standard, receive requests form an ordered queue. * The librpma library does NOT inherit this guarantee. * * RETURN VALUE * The rpma_srq_recv() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_srq_recv() can fail with the following errors: * * - RPMA_E_INVAL - srq == NULL * - RPMA_E_INVAL - dst == NULL && (offset != 0 || len != 0) * - RPMA_E_PROVIDER - ibv_post_srq_recv(3) failed * * SEE ALSO * rpma_mr_reg(3), rpma_srq_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_srq_recv(struct rpma_srq *srq, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context); /** 3 * rpma_srq_get_rcq -- get the receive CQ from the shared RQ object * * SYNOPSIS * * #include * * struct rpma_srq; * struct rpma_cq; * int rpma_srq_get_rcq(const struct rpma_srq *srq, struct rpma_cq **rcq_ptr); * * DESCRIPTION * rpma_srq_get_rcq() gets the receive CQ from the shared RQ object. The receive CQ created by * rpma_srq_new(3) allows handling all rpma_srq_recv(3) completions within the shared RQ. * rpma_srq_cfg_set_rcq_size(3) can change the receive CQ size. * * RETURN VALUE * The rpma_srq_get_rcq() function returns 0 on success or a negative error code on failure. * rpma_srq_get_rcq() does not set *rcq_ptr value on failure. * * ERRORS * rpma_srq_get_rcq() can fail with the following error: * * - RPMA_E_INVAL - srq or rcq_ptr is NULL * * SEE ALSO * rpma_cq_wait(3), rpma_cq_get_wc(3), rpma_cq_get_fd(3), rpma_srq_cfg_set_rcq_size(3), * rpma_srq_new(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_srq_get_rcq(const struct rpma_srq *srq, struct rpma_cq **rcq_ptr); /* remote memory access functions */ /* generate operation completion on error */ #define RPMA_F_COMPLETION_ON_ERROR (1 << 0) /* generate operation completion regardless of its result */ #define RPMA_F_COMPLETION_ALWAYS (1 << 1 | RPMA_F_COMPLETION_ON_ERROR) /** 3 * rpma_read - initiate the read operation * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_mr_local; * struct rpma_mr_remote; * int rpma_read(struct rpma_conn *conn, * struct rpma_mr_local *dst, size_t dst_offset, * const struct rpma_mr_remote *src, size_t src_offset, * size_t len, int flags, const void *op_context); * * DESCRIPTION * rpma_read() initiates transferring data from the remote memory to the local memory. To read a 0 * bytes message, set src and dst to NULL and src_offset, dst_offset and len to 0. * * The attribute flags set the completion notification indicator: * - RPMA_F_COMPLETION_ON_ERROR - generate the completion on error * - RPMA_F_COMPLETION_ALWAYS - generate the completion regardless of result of the operation. * * op_context is returned in the wr_id field of the completion (struct ibv_wc). * * RETURN VALUE * The rpma_read() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_read() can fail with the following errors: * * - RPMA_E_INVAL - conn == NULL || flags == 0 * - RPMA_E_INVAL - dst == NULL && (src != NULL || src_offset != 0 || dst_offset != 0 || len != 0) * - RPMA_E_INVAL - src == NULL && (dst != NULL || src_offset != 0 || dst_offset != 0 || len != 0) * - RPMA_E_PROVIDER - ibv_post_send(3) failed * * SEE ALSO * rpma_conn_req_connect(3), rpma_mr_reg(3), rpma_mr_remote_from_descriptor(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_read(struct rpma_conn *conn, struct rpma_mr_local *dst, size_t dst_offset, const struct rpma_mr_remote *src, size_t src_offset, size_t len, int flags, const void *op_context); /** 3 * rpma_write - initiate the write operation * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_mr_local; * struct rpma_mr_remote; * int rpma_write(struct rpma_conn *conn, * struct rpma_mr_remote *dst, size_t dst_offset, * const struct rpma_mr_local *src, size_t src_offset, * size_t len, int flags, const void *op_context); * * DESCRIPTION * rpma_write() initiates transferring data from the local memory to the remote memory. To write * a 0 bytes message, set src and dst to NULL and src_offset, dst_offset and len to 0. * * The attribute flags set the completion notification indicator: * - RPMA_F_COMPLETION_ON_ERROR - generate the completion on error * - RPMA_F_COMPLETION_ALWAYS - generate the completion regardless of result of the operation. * * op_context is returned in the wr_id field of the completion (struct ibv_wc). * * RETURN VALUE * The rpma_write() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_write() can fail with the following errors: * * - RPMA_E_INVAL - conn == NULL || flags == 0 * - RPMA_E_INVAL - dst == NULL && (src != NULL || src_offset != 0 || dst_offset != 0 || len != 0) * - RPMA_E_INVAL - src == NULL && (dst != NULL || src_offset != 0 || dst_offset != 0 || len != 0) * - RPMA_E_PROVIDER - ibv_post_send(3) failed * * SEE ALSO * rpma_conn_req_connect(3), rpma_mr_reg(3), rpma_mr_remote_from_descriptor(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_write(struct rpma_conn *conn, struct rpma_mr_remote *dst, size_t dst_offset, const struct rpma_mr_local *src, size_t src_offset, size_t len, int flags, const void *op_context); /** 3 * rpma_write_with_imm - initiate the write operation with immediate data * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_mr_local; * struct rpma_mr_remote; * int rpma_write_with_imm(struct rpma_conn *conn, * struct rpma_mr_remote *dst, size_t dst_offset, * const struct rpma_mr_local *src, size_t src_offset, * size_t len, int flags, uint32_t imm, * const void *op_context); * * DESCRIPTION * rpma_write_with_imm() initiates the write operation with immediate data (transferring data from * the local memory to the remote memory. To write a 0 bytes message, set src and dst to NULL and * src_offset, dst_offset and len to 0. * * The attribute flags set the completion notification indicator: * - RPMA_F_COMPLETION_ON_ERROR - generate the completion on error * - RPMA_F_COMPLETION_ALWAYS - generate the completion regardless of result of the operation. * * op_context is returned in the wr_id field of the completion (struct ibv_wc). * * RETURN VALUE * The rpma_write_with_imm() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_write_with_imm() can fail with the following errors: * * - RPMA_E_INVAL - conn == NULL || flags == 0 * - RPMA_E_INVAL - dst == NULL && (src != NULL || src_offset != 0 || dst_offset != 0 || len != 0) * - RPMA_E_INVAL - src == NULL && (dst != NULL || src_offset != 0 || dst_offset != 0 || len != 0) * - RPMA_E_PROVIDER - ibv_post_send(3) failed * * SEE ALSO * rpma_conn_req_connect(3), rpma_mr_reg(3), rpma_mr_remote_from_descriptor(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_write_with_imm(struct rpma_conn *conn, struct rpma_mr_remote *dst, size_t dst_offset, const struct rpma_mr_local *src, size_t src_offset, size_t len, int flags, uint32_t imm, const void *op_context); #define RPMA_ATOMIC_WRITE_ALIGNMENT 8 /** 3 * rpma_atomic_write -- initiate the atomic 8 bytes write operation * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_mr_remote; * int rpma_atomic_write(struct rpma_conn *conn, * struct rpma_mr_remote *dst, size_t dst_offset, * const char src[8], int flags, const void *op_context); * * DESCRIPTION * rpma_atomic_write() initiates the atomic 8 bytes write operation (transferring data from * the local memory to the remote memory). The atomic write operation allows transferring exactly 8 * bytes of data and storing them atomically in the remote memory. * * The attribute flags set the completion notification indicator: * - RPMA_F_COMPLETION_ON_ERROR - generate the completion on error * - RPMA_F_COMPLETION_ALWAYS - generate the completion regardless of result of the operation. * * op_context is returned in the wr_id field of the completion (struct ibv_wc). * * RETURN VALUE * The rpma_atomic_write() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_atomic_write() can fail with the following errors: * * - RPMA_E_INVAL - conn, dst or src is NULL * - RPMA_E_INVAL - dst_offset is not aligned to 8 bytes * - RPMA_E_INVAL - flags are not set (flags == 0) * - RPMA_E_PROVIDER - ibv_post_send(3) failed * * SEE ALSO * rpma_conn_req_connect(3), rpma_mr_reg(3), rpma_mr_remote_from_descriptor(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_atomic_write(struct rpma_conn *conn, struct rpma_mr_remote *dst, size_t dst_offset, const char src[8], int flags, const void *op_context); /* * possible types of rpma_flush() operation */ enum rpma_flush_type { /* flush data down to the persistent domain */ RPMA_FLUSH_TYPE_PERSISTENT, /* flush data deep enough to make it visible on the remote node */ RPMA_FLUSH_TYPE_VISIBILITY, }; /** 3 * rpma_flush - initiate the flush operation * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_mr_remote; * enum rpma_flush_type { * RPMA_FLUSH_TYPE_PERSISTENT, * RPMA_FLUSH_TYPE_VISIBILITY, * }; * * int rpma_flush(struct rpma_conn *conn, * struct rpma_mr_remote *dst, size_t dst_offset, * size_t len, enum rpma_flush_type type, int flags, * const void *op_context); * * DESCRIPTION * rpma_flush() initiates the flush operation (finalizing a transfer of data to the remote memory). * Possible types of rpma_flush() operation: * - RPMA_FLUSH_TYPE_PERSISTENT - flush data down to the persistent domain * - RPMA_FLUSH_TYPE_VISIBILITY - flush data deep enough to make it visible on the remote node * * The attribute flags set the completion notification indicator: * - RPMA_F_COMPLETION_ON_ERROR - generate the completion on error * - RPMA_F_COMPLETION_ALWAYS - generate the completion regardless of result of the operation. * * op_context is returned in the wr_id field of the completion (struct ibv_wc). * * RETURN VALUE * The rpma_flush() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_flush() can fail with the following errors: * * - RPMA_E_INVAL - conn or dst is NULL * - RPMA_E_INVAL - unknown type value * - RPMA_E_INVAL - flags are not set * - RPMA_E_PROVIDER - ibv_post_send(3) failed * - RPMA_E_NOSUPP - type is RPMA_FLUSH_TYPE_PERSISTENT and the direct write to pmem is not * supported * * SEE ALSO * rpma_conn_req_connect(3), rpma_mr_remote_from_descriptor(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_flush(struct rpma_conn *conn, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context); /** 3 * rpma_send - initiate the send operation * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_mr_local; * int rpma_send(struct rpma_conn *conn, const struct rpma_mr_local *src, size_t offset, * size_t len, int flags, const void *op_context); * * DESCRIPTION * rpma_send() initiates the send operation which transfers a message from the local memory to * other side of the connection. To send a 0 byte message, set src to NULL and both offset and len * to 0. * * The attribute flags set the completion notification indicator: * - RPMA_F_COMPLETION_ON_ERROR - generate the completion on error * - RPMA_F_COMPLETION_ALWAYS - generate the completion regardless of result of the operation. * * op_context is returned in the wr_id field of the completion (struct ibv_wc). * * RETURN VALUE * The rpma_send() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_send() can fail with the following errors: * * - RPMA_E_INVAL - conn == NULL || flags == 0 * - RPMA_E_INVAL - src == NULL && (offset != 0 || len != 0) * - RPMA_E_PROVIDER - ibv_post_send(3) failed * * SEE ALSO * rpma_conn_req_connect(3), rpma_mr_reg(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_send(struct rpma_conn *conn, const struct rpma_mr_local *src, size_t offset, size_t len, int flags, const void *op_context); /** 3 * rpma_send_with_imm - initiate the send operation with immediate data * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_mr_local; * int rpma_send_with_imm(struct rpma_conn *conn, const struct rpma_mr_local *src, * size_t offset, size_t len, int flags, uint32_t imm, * const void *op_context); * * DESCRIPTION * rpma_send_with_imm() initiates the send operation with immediate data which transfers a message * from the local memory to other side of the connection. To send a 0 byte message, set src to NULL * and both offset and len to 0. * * The attribute flags set the completion notification indicator: * - RPMA_F_COMPLETION_ON_ERROR - generate the completion on error * - RPMA_F_COMPLETION_ALWAYS - generate the completion regardless of result of the operation. * * op_context is returned in the wr_id field of the completion (struct ibv_wc). * * RETURN VALUE * The rpma_send_with_imm() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_send_with_imm() can fail with the following errors: * * - RPMA_E_INVAL - conn == NULL || flags == 0 * - RPMA_E_INVAL - src == NULL && (offset != 0 || len != 0) * - RPMA_E_PROVIDER - ibv_post_send(3) failed * * SEE ALSO * rpma_conn_req_connect(3), rpma_mr_reg(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_send_with_imm(struct rpma_conn *conn, const struct rpma_mr_local *src, size_t offset, size_t len, int flags, uint32_t imm, const void *op_context); /** 3 * rpma_recv - initiate the receive operation * * SYNOPSIS * * #include * * struct rpma_conn; * struct rpma_mr_local; * int rpma_recv(struct rpma_conn *conn, struct rpma_mr_local *dst, size_t offset, size_t len, * const void *op_context); * * DESCRIPTION * rpma_recv() initiates the receive operation which prepares a buffer for a message sent from * other side of the connection. Please see rpma_send(3). * * All buffers prepared via rpma_recv(3) form an unordered set. When a message arrives it is placed * in one of the buffers awaitaning and a completion for the receive operation is generated. * * A buffer for an incoming message have to be prepared beforehand. * * The order of buffers in the set does not affect the order of completions of receive operations * get via rpma_cq_get_wc(3). * * op_context is returned in the wr_id field of the completion (struct ibv_wc). * * NOTE * In the RDMA standard, receive requests form an ordered queue. * The RPMA does NOT inherit this guarantee. * * RETURN VALUE * The rpma_recv() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_recv() can fail with the following errors: * * - RPMA_E_INVAL - conn == NULL * - RPMA_E_INVAL - dst == NULL && (offset != 0 || len != 0) * - RPMA_E_PROVIDER - ibv_post_recv(3) failed * * SEE ALSO * rpma_conn_req_connect(3), rpma_mr_reg(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_recv(struct rpma_conn *conn, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context); /* completion handling */ /** 3 * rpma_cq_get_fd - get the completion queue's file descriptor * * SYNOPSIS * * #include * * struct rpma_cq; * int rpma_cq_get_fd(const struct rpma_cq *cq, int *fd); * * DESCRIPTION * rpma_cq_get_fd() gets the file descriptor of the completion queue (CQ in short). When a next * completion in the CQ is ready to be consumed by rpma_cq_get_wc(3), the notification is delivered * via the file descriptor. The default mode of the file descriptor is blocking but it can be * changed to non-blocking mode using fcntl(2). The CQ is either the connection's main CQ or * the receive CQ or CQ of shared RQ, please see rpma_conn_get_cq(3), rpma_conn_get_rcq(3) or * rpma_srq_get_rcq() for details. * * Note after spotting the notification using the provided file descriptor you do not have to call * rpma_cq_wait(3) before consuming the completion but it may cause that the next call to * rpma_cq_wait(3) will notify you of already consumed completion. * * RETURN VALUE * The rpma_cq_get_fd() function returns 0 on success or a negative error code on failure. * rpma_cq_get_fd() does not set *fd value on failure. * * ERRORS * rpma_cq_get_fd() can fail with the following error: * * - RPMA_E_INVAL - cq or fd is NULL * * SEE ALSO * fcntl(2), rpma_conn_get_cq(3), rpma_conn_get_rcq(3), rpma_srq_get_rcq(3), rpma_cq_wait(3), * rpma_cq_get_wc(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_cq_get_fd(const struct rpma_cq *cq, int *fd); /** 3 * rpma_cq_wait - wait for a completion and ack it * * SYNOPSIS * * #include * * struct rpma_cq; * int rpma_cq_wait(struct rpma_cq *cq); * * DESCRIPTION * rpma_cq_wait() waits for an incoming completion event and acks it. If rpma_cq_wait() succeeds, * then all available completions should be collected using rpma_cq_get_wc(3) before the next * rpma_cq_wait() call. * * RETURN VALUE * The rpma_cq_wait() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_cq_wait() can fail with the following errors: * * - RPMA_E_INVAL - cq is NULL * - RPMA_E_PROVIDER - ibv_req_notify_cq(3) failed with a provider error * - RPMA_E_NO_COMPLETION - no completions available * - RPMA_E_SHARED_CHANNEL - the completion event channel is shared and cannot be handled by any * particular CQ * * SEE ALSO * rpma_conn_get_cq(3), rpma_conn_get_rcq(3), rpma_cq_get_wc(3), rpma_cq_get_fd(3), librpma(7) and * https://pmem.io/rpma/ */ int rpma_cq_wait(struct rpma_cq *cq); /** 3 * rpma_cq_get_wc - receive one or more completions * * SYNOPSIS * * #include * * struct rpma_cq; * struct ibv_wc; * * int rpma_cq_get_wc(struct rpma_cq *cq, int num_entries, struct ibv_wc *wc, * int *num_entries_got); * * DESCRIPTION * rpma_cq_get_wc() polls the CQ for completions and returns the first num_entries (or all * available completions if the CQ contains fewer than this number) in the wc array exactly like * ibv_poll_cq(3) does. The argument wc is a pointer to an array of ibv_wc structs, as defined in * . The number of got completions is returned in the num_entries_got argument * if it is not NULL. It can be NULL only if num_entries equals 1. All operations generate * completions on error. The operations posted with the RPMA_F_COMPLETION_ALWAYS flag also generate * completions on success. * * Note that if the provided cq is the main CQ and the receive CQ is present on the same connection * this function won't return IBV_WC_RECV and IBV_WC_RECV_RDMA_WITH_IMM at any time. The receive CQ * has to be used instead to collect these completions. Please see the rpma_conn_get_rcq(3) for * details about the receive CQ. * * RETURN VALUE * The rpma_cq_get_wc() function returns 0 on success or a negative error code on failure. On * success, it saves all got completions and their number into the wc and num_entries_got * respectively. If the status of a completion is not equal to IBV_WC_SUCCESS then only * the following attributes are valid: wr_id, status, qp_num, and vendor_err. * * ERRORS * rpma_cq_get_wc() can fail with the following errors: * * - RPMA_E_INVAL - num_entries < 1, cq or wc is NULL, num_entries > 1 and num_entries_got is NULL * - RPMA_E_NO_COMPLETION - no completions available * - RPMA_E_PROVIDER - ibv_poll_cq(3) failed with a provider error * - RPMA_E_UNKNOWN - ibv_poll_cq(3) failed but no provider error is available * * SEE ALSO * rpma_conn_get_cq(3), rpma_conn_get_rcq(3), rpma_conn_req_recv(3), rpma_cq_wait(3), * rpma_cq_get_fd(3), rpma_flush(3), rpma_read(3), rpma_recv(3), rpma_send(3), * rpma_send_with_imm(3), rpma_write(3), rpma_atomic_write(3), rpma_write_with_imm(3), librpma(7) * and https://pmem.io/rpma/ */ int rpma_cq_get_wc(struct rpma_cq *cq, int num_entries, struct ibv_wc *wc, int *num_entries_got); /* error handling */ /** 3 * rpma_err_2str - convert RPMA error code to a string * * SYNOPSIS * * #include * * const char *rpma_err_2str(int ret); * * DESCRIPTION * rpma_err_2str() returns the const string representation of RPMA error codes. * * ERRORS * rpma_err_2str() can not fail. * * SEE ALSO * librpma(7) and https://pmem.io/rpma/ */ const char *rpma_err_2str(int ret); /* librpma logging mechanism control */ /* * Available log levels in librpma. Log levels (except RPMA_LOG_DISABLED) are used in logging API * calls to indicate logging message severity. Log levels are also used to define thresholds for * logging. */ enum rpma_log_level { /* all messages will be suppressed */ RPMA_LOG_DISABLED = -1, /* an error that causes the library to stop working immediately */ RPMA_LOG_LEVEL_FATAL, /* an error that causes the library to stop working properly */ RPMA_LOG_LEVEL_ERROR, /* an errors that could be handled in the upper level */ RPMA_LOG_LEVEL_WARNING, /* * non-massive info mainly related to public API function completions e.g. connection * established */ RPMA_LOG_LEVEL_NOTICE, /* massive info e.g. every write operation indication */ RPMA_LOG_LEVEL_INFO, /* debug info e.g. write operation dump */ RPMA_LOG_LEVEL_DEBUG, }; enum rpma_log_threshold { /* * the main threshold level - the logging messages above this level won't trigger * the logging functions */ RPMA_LOG_THRESHOLD, /* * the auxiliary threshold level - may or may not be used by the logging function */ RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_THRESHOLD_MAX }; /** 3 * rpma_log_set_threshold - set the logging threshold level * * SYNOPSIS * * #include * * int rpma_log_set_threshold(enum rpma_log_threshold threshold, enum rpma_log_level level); * * enum rpma_log_level { * RPMA_LOG_DISABLED, * RPMA_LOG_LEVEL_FATAL, * RPMA_LOG_LEVEL_ERROR, * RPMA_LOG_LEVEL_WARNING, * RPMA_LOG_LEVEL_NOTICE, * RPMA_LOG_LEVEL_INFO, * RPMA_LOG_LEVEL_DEBUG, * }; * * enum rpma_log_threshold { * RPMA_LOG_THRESHOLD, * RPMA_LOG_THRESHOLD_AUX, * RPMA_LOG_THRESHOLD_MAX * }; * * DESCRIPTION * rpma_log_set_threshold() sets the logging threshold level. * * Available thresholds are: * - RPMA_LOG_THRESHOLD - the main threshold used to filter out undesired logging messages. * Messages on a higher level than the primary threshold level are ignored. RPMA_LOG_DISABLED * shall be used to suppress logging. The default value is RPMA_LOG_WARNING. * - RPMA_LOG_THRESHOLD_AUX - the auxiliary threshold intended for use inside the logging function * (please see rpma_log_get_threshold(3)). The logging function may or may not take this * threshold into consideration. The default value is RPMA_LOG_DISABLED. * * Available threshold levels are defined by enum rpma_log_level: * - RPMA_LOG_DISABLED - all messages will be suppressed * - RPMA_LOG_LEVEL_FATAL - an error that causes the library to stop working immediately * - RPMA_LOG_LEVEL_ERROR - an error that causes the library to stop working properly * - RPMA_LOG_LEVEL_WARNING - an error that could be handled in the upper level * - RPMA_LOG_LEVEL_NOTICE - non-massive info mainly related to public API function completions * e.g. connection established * - RPMA_LOG_LEVEL_INFO - massive info e.g. every write operation indication * - RPMA_LOG_LEVEL_DEBUG - debug info e.g. write operation dump * * THE DEFAULT LOGGING FUNCTION * The default logging function writes messages to syslog(3) and to stderr(3), where syslog(3) is * the primary destination (RPMA_LOG_THRESHOLD applies) whereas stderr(3) is the secondary * destination (RPMA_LOG_THRESHOLD_AUX applies). * * RETURN VALUE * rpma_log_syslog_set_threshold() function returns 0 on success or a negative error code on * failure. * * ERRORS * rpma_log_set_threshold() can fail with the following errors: * - RPMA_E_INVAL - threshold is not RPMA_LOG_THRESHOLD nor RPMA_LOG_THRESHOLD_AUX * - RPMA_E_INVAL - level is not a value defined by enum rpma_log_level type * - RPMA_E_AGAIN - a temporary error occurred, the retry may fix the problem * * SEE ALSO * rpma_log_get_threshold(3), rpma_log_set_function(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_log_set_threshold(enum rpma_log_threshold threshold, enum rpma_log_level level); /** 3 * rpma_log_get_threshold - get the logging threshold level * * SYNOPSIS * * #include * * int rpma_log_get_threshold(enum rpma_log_threshold threshold, enum rpma_log_level *level); * * DESCRIPTION * rpma_log_get_threshold() gets the current level of the threshold. * See rpma_log_set_threshold(3) for available thresholds and levels. * * RETURN VALUE * rpma_log_get_threshold() function returns 0 on success or a negative error code on failure. * * ERRORS * rpma_log_get_threshold() can fail with the following errors: * - RPMA_E_INVAL - threshold is not RPMA_LOG_THRESHOLD nor RPMA_LOG_THRESHOLD_AUX * - RPMA_E_INVAL - *level is NULL * * SEE ALSO * rpma_log_set_function(3), rpma_log_set_threshold(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_log_get_threshold(enum rpma_log_threshold threshold, enum rpma_log_level *level); /* * the type used for defining logging functions */ typedef void rpma_log_function( /* the log level of the message */ enum rpma_log_level level, /* name of the source file where the message coming from */ const char *file_name, /* the source file line where the message coming from */ const int line_no, /* the function name where the message coming from */ const char *function_name, /* printf(3)-like format string of the message */ const char *message_format, /* additional arguments of the message format string */ ...); #define RPMA_LOG_USE_DEFAULT_FUNCTION (NULL) /** 3 * rpma_log_set_function - set the logging function * * SYNOPSIS * * #include * * typedef void rpma_log_function( * enum rpma_log_level level, * const char *file_name, * const int line_no, * const char *function_name, * const char *message_format, * ...); * * int rpma_log_set_function(rpma_log_function *log_function); * * DESCRIPTION * rpma_log_set_function() allows choosing the function which will get all the generated logging * messages. The log_function can be either RPMA_LOG_USE_DEFAULT_FUNCTION which will use * the default logging function (built into the library) or a pointer to a user-defined function. * * Parameters of a user-defined log function are as follow: * - level - the log level of the message * - file_name - name of the source file where the message coming from. It could be set to NULL and * in such case neither line_no nor function_name are provided. * - line_no - the source file line where the message coming from * - function_name - the function name where the message coming from * - message_format - printf(3)-like format string of the message * - "..." - additional arguments of the message format string * * THE DEFAULT LOGGING FUNCTION * The initial value of the logging function is RPMA_LOG_USE_DEFAULT_FUNCTION. This function writes * messages to syslog(3) (the primary destination) and to stderr(3) (the secondary destination). * * RETURN VALUE * rpma_log_set_function() function returns 0 on success or error code on failure. * * ERRORS * - RPMA_E_AGAIN - a temporary error occurred, the retry may fix the problem * * NOTE * The logging messages on the levels above the RPMA_LOG_THRESHOLD level won't trigger the logging * function. * * The user defined function must be thread-safe. * * SEE ALSO * rpma_log_get_threshold(3), rpma_log_set_threshold(3), librpma(7) and https://pmem.io/rpma/ */ int rpma_log_set_function(rpma_log_function *log_function); #ifdef __cplusplus } #endif #endif /* LIBRPMA_H */ rpma-1.3.0/src/info.c000066400000000000000000000065731443364775400143740ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * info.c -- librpma info-related implementations */ #include #include #include #include "conn_req.h" #include "debug.h" #include "info.h" #include "log_internal.h" #include "librpma.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif struct rpma_info { /* either active or passive side of the connection */ enum rpma_info_side side; /* a cache of the translated address */ struct rdma_addrinfo *rai; }; /* internal librpma API */ /* * rpma_info_new -- create an address translation cache aka the info object */ int rpma_info_new(const char *addr, const char *port, enum rpma_info_side side, struct rpma_info **info_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); if (addr == NULL || info_ptr == NULL) return RPMA_E_INVAL; /* prepare hints */ struct rdma_addrinfo hints; memset(&hints, 0, sizeof(hints)); if (side == RPMA_INFO_PASSIVE) hints.ai_flags |= RAI_PASSIVE; hints.ai_qp_type = IBV_QPT_RC; hints.ai_port_space = RDMA_PS_TCP; /* query */ struct rdma_addrinfo *rai = NULL; #ifdef RDMA_GETADDRINFO_OLD_SIGNATURE int ret = rdma_getaddrinfo((char *)addr, (char *)port, &hints, &rai); #else int ret = rdma_getaddrinfo(addr, port, &hints, &rai); #endif if (ret) { const char *err = (ret == -1 || ret == EAI_SYSTEM) ? strerror(errno) : gai_strerror(ret); RPMA_LOG_ERROR( "rdma_getaddrinfo(node=%s, service=%s, ai_flags=%s, ai_qp_type=IBV_QPT_RC, ai_port_space=RDMA_PS_TCP): %s", addr, port, (hints.ai_flags & RAI_PASSIVE) ? "passive" : "active", err); return RPMA_E_PROVIDER; } struct rpma_info *info = malloc(sizeof(*info)); if (info == NULL) { ret = RPMA_E_NOMEM; goto err_freeaddrinfo; } info->side = side; info->rai = rai; *info_ptr = info; return 0; err_freeaddrinfo: rdma_freeaddrinfo(rai); return ret; } /* * rpma_info_delete -- release the address translation cache and delete the info object */ int rpma_info_delete(struct rpma_info **info_ptr) { RPMA_DEBUG_TRACE; if (info_ptr == NULL) return RPMA_E_INVAL; struct rpma_info *info = *info_ptr; if (info == NULL) return 0; rdma_freeaddrinfo(info->rai); free(info); *info_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_info_resolve_addr -- resolve the CM ID's destination address * * ASSUMPTIONS * - info != NULL * - id != NULL * - timeout_ms > 0 */ int rpma_info_resolve_addr(const struct rpma_info *info, struct rdma_cm_id *id, int timeout_ms) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int ret = rdma_resolve_addr(id, info->rai->ai_src_addr, info->rai->ai_dst_addr, timeout_ms); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_resolve_addr(src_addr=%s, dst_addr=%s, timeout_ms=%d)", info->rai->ai_src_canonname, info->rai->ai_dst_canonname, timeout_ms); return RPMA_E_PROVIDER; } return 0; } /* * rpma_info_bind_addr -- bind the CM ID to the local address */ int rpma_info_bind_addr(const struct rpma_info *info, struct rdma_cm_id *id) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); if (id == NULL || info == NULL) return RPMA_E_INVAL; int ret = rdma_bind_addr(id, info->rai->ai_src_addr); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_bind_addr(addr=%s)", info->rai->ai_src_canonname); return RPMA_E_PROVIDER; } return 0; } rpma-1.3.0/src/info.h000066400000000000000000000033231443364775400143670ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * info.h -- librpma info-related internal definitions */ #ifndef LIBRPMA_INFO_H #define LIBRPMA_INFO_H #include /* active or passive side of the connection */ enum rpma_info_side { RPMA_INFO_PASSIVE, /* a passive side of the connection */ RPMA_INFO_ACTIVE /* an active side of the connection */ }; struct rpma_info; /* * ERRORS * rpma_info_new() can fail with the following errors: * * - RPMA_E_INVAL - addr or info_ptr is NULL * - RPMA_E_PROVIDER - address translation failed with error * - RPMA_E_NOMEM - out of memory */ int rpma_info_new(const char *addr, const char *port, enum rpma_info_side side, struct rpma_info **info_ptr); /* * ERRORS * rpma_info_delete() can fail with the following error: * * - RPMA_E_INVAL - info_ptr is NULL */ int rpma_info_delete(struct rpma_info **info_ptr); /* * rpma_info_resolve_addr -- resolve the CM ID's destination address * * ASSUMPTIONS: * - info->side == RPMA_INFO_ACTIVE * * ERRORS * rpma_info_resolve_addr() can fail with the following errors: * * - RPMA_E_INVAL - id or info is NULL * - RPMA_E_PROVIDER - resolving the destination address failed */ int rpma_info_resolve_addr(const struct rpma_info *info, struct rdma_cm_id *id, int timeout_ms); /* * rpma_info_bind_addr -- bind the CM ID to the local address * * ASSUMPTIONS: * - info->side == RPMA_INFO_PASSIVE * * ERRORS * rpma_info_bind_addr() can fail with the following errors: * * - RPMA_E_INVAL - id or info is NULL * - RPMA_E_PROVIDER - binding to a local address failed */ int rpma_info_bind_addr(const struct rpma_info *info, struct rdma_cm_id *id); #endif /* LIBRPMA_INFO_H */ rpma-1.3.0/src/librpma.c000066400000000000000000000012141443364775400150520ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * librpma.c -- entry points for librpma */ #include "librpma.h" #include "log_internal.h" /* * librpma_init -- load-time initialization for librpma * * Called automatically by the run-time loader. */ #ifdef MOCK_CONSTRUCTOR void #else __attribute__((constructor)) static void #endif librpma_init(void) { rpma_log_init(); } /* * librpma_fini -- librpma cleanup routine * * Called automatically when the process terminates. */ #ifdef MOCK_CONSTRUCTOR void #else __attribute__((destructor)) static void #endif librpma_fini(void) { rpma_log_fini(); } rpma-1.3.0/src/librpma.map000066400000000000000000000043571443364775400154200ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2019-2022, Intel Corporation # Copyright 2021-2022, Fujitsu # # src/librpma.map -- linker version script for librpma # LIBRPMA_1.0 { global: rpma_atomic_write; rpma_conn_apply_remote_peer_cfg; rpma_conn_cfg_delete; rpma_conn_cfg_get_compl_channel; rpma_conn_cfg_get_cq_size; rpma_conn_cfg_get_rcq_size; rpma_conn_cfg_get_rq_size; rpma_conn_cfg_get_sq_size; rpma_conn_cfg_get_srq; rpma_conn_cfg_get_timeout; rpma_conn_cfg_new; rpma_conn_cfg_set_compl_channel; rpma_conn_cfg_set_cq_size; rpma_conn_cfg_set_rcq_size; rpma_conn_cfg_set_rq_size; rpma_conn_cfg_set_sq_size; rpma_conn_cfg_set_srq; rpma_conn_cfg_set_timeout; rpma_conn_delete; rpma_conn_disconnect; rpma_conn_get_cq; rpma_conn_get_compl_fd; rpma_conn_get_event_fd; rpma_conn_get_private_data; rpma_conn_get_qp_num; rpma_conn_get_rcq; rpma_conn_next_event; rpma_conn_req_connect; rpma_conn_req_delete; rpma_conn_req_get_private_data; rpma_conn_req_new; rpma_conn_req_recv; rpma_conn_wait; rpma_cq_get_fd; rpma_cq_get_wc; rpma_cq_wait; rpma_ep_get_fd; rpma_ep_listen; rpma_ep_next_conn_req; rpma_ep_shutdown; rpma_err_2str; rpma_flush; rpma_log_get_threshold; rpma_log_set_function; rpma_log_set_threshold; rpma_mr_advise; rpma_mr_dereg; rpma_mr_get_descriptor; rpma_mr_get_descriptor_size; rpma_mr_get_ptr; rpma_mr_get_size; rpma_mr_reg; rpma_mr_remote_delete; rpma_mr_remote_from_descriptor; rpma_mr_remote_get_flush_type; rpma_mr_remote_get_size; rpma_peer_cfg_delete; rpma_peer_cfg_from_descriptor; rpma_peer_cfg_get_descriptor; rpma_peer_cfg_get_descriptor_size; rpma_peer_cfg_get_direct_write_to_pmem; rpma_peer_cfg_new; rpma_peer_cfg_set_direct_write_to_pmem; rpma_peer_delete; rpma_peer_new; rpma_read; rpma_recv; rpma_send; rpma_send_with_imm; rpma_srq_cfg_delete; rpma_srq_cfg_get_rcq_size; rpma_srq_cfg_get_rq_size; rpma_srq_cfg_new; rpma_srq_cfg_set_rcq_size; rpma_srq_cfg_set_rq_size; rpma_srq_delete; rpma_srq_get_rcq; rpma_srq_new; rpma_srq_recv; rpma_utils_conn_event_2str; rpma_utils_get_ibv_context; rpma_utils_ibv_context_is_odp_capable; rpma_write; rpma_write_with_imm; local: *; }; rpma-1.3.0/src/log.c000066400000000000000000000110431443364775400142060ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * log.c -- support for logging output to either syslog or stderr or * via user defined function */ #include #include #include #ifdef ATOMIC_OPERATIONS_SUPPORTED #include #endif /* ATOMIC_OPERATIONS_SUPPORTED */ #include #include "librpma.h" #include "log_default.h" #include "log_internal.h" /* * Default levels of the logging thresholds */ #ifdef DEBUG #define RPMA_LOG_THRESHOLD_DEFAULT RPMA_LOG_LEVEL_DEBUG #define RPMA_LOG_THRESHOLD_AUX_DEFAULT RPMA_LOG_LEVEL_WARNING #else #define RPMA_LOG_THRESHOLD_DEFAULT RPMA_LOG_LEVEL_WARNING #define RPMA_LOG_THRESHOLD_AUX_DEFAULT RPMA_LOG_DISABLED #endif /* * Rpma_log_function -- pointer to the logging function saved as uintptr_t to make it _Atomic, * because function pointers cannot be _Atomic. By default it is rpma_log_default_function(), * but could be a user-defined logging function provided via rpma_log_set_function(). */ #ifdef ATOMIC_OPERATIONS_SUPPORTED _Atomic #endif /* ATOMIC_OPERATIONS_SUPPORTED */ uintptr_t Rpma_log_function; /* threshold levels */ #ifdef ATOMIC_OPERATIONS_SUPPORTED _Atomic #endif /* ATOMIC_OPERATIONS_SUPPORTED */ enum rpma_log_level Rpma_log_threshold[] = { RPMA_LOG_THRESHOLD_DEFAULT, RPMA_LOG_THRESHOLD_AUX_DEFAULT }; /* * rpma_log_init -- initialize and set the default logging function */ void rpma_log_init() { /* enable the default logging function */ rpma_log_default_init(); while (RPMA_E_AGAIN == rpma_log_set_function( RPMA_LOG_USE_DEFAULT_FUNCTION)) ; } /* * rpma_log_fini -- disable logging and cleanup the default logging function */ void rpma_log_fini() { /* * NULL-ed function pointer turns off the logging. No matter if * the previous value was the default logging function or a user * logging function. */ Rpma_log_function = 0; /* cleanup the default logging function */ rpma_log_default_fini(); } /* public librpma log API */ #if defined(RPMA_UNIT_TESTS) && !defined(ATOMIC_OPERATIONS_SUPPORTED) int mock__sync_bool_compare_and_swap__function(uintptr_t *ptr, uintptr_t oldval, uintptr_t newval); #define __sync_bool_compare_and_swap \ mock__sync_bool_compare_and_swap__function #endif /* * rpma_log_set_function -- set the log function pointer either to * a user-provided function pointer or to the default logging function. */ int rpma_log_set_function(rpma_log_function *log_function) { if (log_function == RPMA_LOG_USE_DEFAULT_FUNCTION) log_function = rpma_log_default_function; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&Rpma_log_function, (uintptr_t)log_function, __ATOMIC_SEQ_CST); return 0; #else uintptr_t log_function_old = Rpma_log_function; if (__sync_bool_compare_and_swap(&Rpma_log_function, log_function_old, (uintptr_t)log_function)) return 0; else return RPMA_E_AGAIN; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ } #if defined(RPMA_UNIT_TESTS) && !defined(ATOMIC_OPERATIONS_SUPPORTED) #undef __sync_bool_compare_and_swap int mock__sync_bool_compare_and_swap__threshold(enum rpma_log_level *ptr, enum rpma_log_level oldval, enum rpma_log_level newval); #define __sync_bool_compare_and_swap \ mock__sync_bool_compare_and_swap__threshold #endif /* * rpma_log_set_threshold -- set the log level threshold */ int rpma_log_set_threshold(enum rpma_log_threshold threshold, enum rpma_log_level level) { if (threshold != RPMA_LOG_THRESHOLD && threshold != RPMA_LOG_THRESHOLD_AUX) return RPMA_E_INVAL; if (level < RPMA_LOG_DISABLED || level > RPMA_LOG_LEVEL_DEBUG) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&Rpma_log_threshold[threshold], level, __ATOMIC_SEQ_CST); return 0; #else enum rpma_log_level level_old; while (RPMA_E_AGAIN == rpma_log_get_threshold(threshold, &level_old)) ; if (__sync_bool_compare_and_swap(&Rpma_log_threshold[threshold], level_old, level)) return 0; else return RPMA_E_AGAIN; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ } #ifdef RPMA_UNIT_TESTS #undef __sync_bool_compare_and_swap #endif /* * rpma_log_get_threshold -- get the log level threshold */ int rpma_log_get_threshold(enum rpma_log_threshold threshold, enum rpma_log_level *level) { if (threshold != RPMA_LOG_THRESHOLD && threshold != RPMA_LOG_THRESHOLD_AUX) return RPMA_E_INVAL; if (level == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *level = atomic_load_explicit(&Rpma_log_threshold[threshold], __ATOMIC_SEQ_CST); #else *level = Rpma_log_threshold[threshold]; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } rpma-1.3.0/src/log_default.c000066400000000000000000000104371443364775400157200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * log_default.c -- the default logging function with support for logging either * to syslog or to stderr */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include "log_default.h" #include "log_internal.h" static const char rpma_log_level_names[6][9] = { [RPMA_LOG_LEVEL_FATAL] = "*FATAL* ", [RPMA_LOG_LEVEL_ERROR] = "*ERROR* ", [RPMA_LOG_LEVEL_WARNING] = "*WARN* ", [RPMA_LOG_LEVEL_NOTICE] = "*NOTE* ", [RPMA_LOG_LEVEL_INFO] = "*INFO* ", [RPMA_LOG_LEVEL_DEBUG] = "*DEBUG* ", }; static const int rpma_log_level_syslog_severity[] = { [RPMA_LOG_LEVEL_FATAL] = LOG_CRIT, [RPMA_LOG_LEVEL_ERROR] = LOG_ERR, [RPMA_LOG_LEVEL_WARNING] = LOG_WARNING, [RPMA_LOG_LEVEL_NOTICE] = LOG_NOTICE, [RPMA_LOG_LEVEL_INFO] = LOG_INFO, [RPMA_LOG_LEVEL_DEBUG] = LOG_DEBUG, }; /* * rpma_get_timestamp_prefix -- provide actual time in a readable string * * NOTE * This function is static now, so we know all possible calls of snprintf() * and we conclude it can not fail. * * ASSUMPTIONS: * - buf != NULL && buf_size >= 16 */ static void rpma_get_timestamp_prefix(char *buf, size_t buf_size) { struct tm info; char date[24]; struct timespec ts; long usec; const char error_message[] = "[time error] "; if (clock_gettime(CLOCK_REALTIME, &ts)) goto err_message; if (NULL == localtime_r(&ts.tv_sec, &info)) goto err_message; usec = ts.tv_nsec / 1000; if (!strftime(date, sizeof(date), "%b %d %H:%M:%S", &info)) goto err_message; /* it cannot fail - please see the note above */ (void) snprintf(buf, buf_size, "%s.%06ld ", date, usec); if (strnlen(buf, buf_size) == buf_size) goto err_message; return; err_message: memcpy(buf, error_message, sizeof(error_message)); } /* * rpma_log_default_function -- default logging function used to log a message * to syslog and/or stderr * * The message is started with prefix composed from file, line, func parameters followed by string * pointed by format. If format includes format specifiers (subsequences beginning with %), * the additional arguments following format are formatted and inserted in the message. * * ASSUMPTIONS: * - level >= RPMA_LOG_LEVEL_FATAL && level <= RPMA_LOG_LEVEL_DEBUG * - level <= Rpma_log_threshold[RPMA_LOG_THRESHOLD] * - file == NULL || (file != NULL && function != NULL) */ void rpma_log_default_function(enum rpma_log_level level, const char *file_name, const int line_no, const char *function_name, const char *message_format, ...) { char file_info_buffer[256] = ""; const char *file_info = file_info_buffer; char message[1024] = ""; const char file_info_error[] = "[file info error]: "; if (RPMA_LOG_DISABLED == level) return; va_list arg; va_start(arg, message_format); if (vsnprintf(message, sizeof(message), message_format, arg) < 0) { va_end(arg); return; } va_end(arg); if (file_name) { /* extract base_file_name */ const char *base_file_name = strrchr(file_name, '/'); if (!base_file_name) base_file_name = file_name; else /* skip '/' */ base_file_name++; if (snprintf(file_info_buffer, sizeof(file_info_buffer), "%s: %3d: %s: ", base_file_name, line_no, function_name) < 0) { file_info = file_info_error; } } if (level <= Rpma_log_threshold[RPMA_LOG_THRESHOLD_AUX] || level == RPMA_LOG_LEVEL_ALWAYS) { char times_tamp[45] = ""; rpma_get_timestamp_prefix(times_tamp, sizeof(times_tamp)); (void) fprintf(stderr, "%s[%ld] %s%s%s", times_tamp, syscall(SYS_gettid), rpma_log_level_names[(level == RPMA_LOG_LEVEL_ALWAYS) ? RPMA_LOG_LEVEL_DEBUG : level], file_info, message); } /* do not log to syslog in case of RPMA_LOG_LEVEL_ALWAYS */ if (RPMA_LOG_LEVEL_ALWAYS == level) return; /* assumed: level <= Rpma_log_threshold[RPMA_LOG_THRESHOLD] */ syslog(rpma_log_level_syslog_severity[level], "%s%s%s", rpma_log_level_names[level], file_info, message); } /* * rpma_log_default_init -- open a connection to the system logger */ void rpma_log_default_init(void) { openlog("rpma", LOG_PID, LOG_LOCAL7); } /* * rpma_log_default_fini -- close the descriptor being used to write to * the system logger */ void rpma_log_default_fini(void) { closelog(); } rpma-1.3.0/src/log_default.h000066400000000000000000000007761443364775400157320ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * log_default.h -- the default logging function definitions */ #ifndef LIBRPMA_LOG_DEFAULT_H #define LIBRPMA_LOG_DEFAULT_H #include "librpma.h" void rpma_log_default_function(enum rpma_log_level level, const char *file_name, const int line_no, const char *function_name, const char *message_format, ...); void rpma_log_default_init(void); void rpma_log_default_fini(void); #endif /* LIBRPMA_LOG_DEFAULT_H */ rpma-1.3.0/src/log_internal.h000066400000000000000000000043471443364775400161200ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * log_internal.h -- internal logging interfaces used by librpma */ #ifndef LIBRPMA_LOG_INTERNAL_H #define LIBRPMA_LOG_INTERNAL_H #include #include #include "librpma.h" #include "log_default.h" #ifdef ATOMIC_OPERATIONS_SUPPORTED #include #endif /* ATOMIC_OPERATIONS_SUPPORTED */ /* pointer to the logging function */ extern #ifdef ATOMIC_OPERATIONS_SUPPORTED _Atomic #endif /* ATOMIC_OPERATIONS_SUPPORTED */ uintptr_t Rpma_log_function; /* threshold levels */ extern #ifdef ATOMIC_OPERATIONS_SUPPORTED _Atomic #endif /* ATOMIC_OPERATIONS_SUPPORTED */ enum rpma_log_level Rpma_log_threshold[RPMA_LOG_THRESHOLD_MAX]; void rpma_log_init(); void rpma_log_fini(); #define RPMA_LOG(level, format, ...) \ do { \ if (level <= Rpma_log_threshold[RPMA_LOG_THRESHOLD] && 0 != Rpma_log_function) { \ ((rpma_log_function *)Rpma_log_function)(level, __FILE__, __LINE__, \ __func__, format, ##__VA_ARGS__); \ } \ } while (0) #define RPMA_LOG_LEVEL_ALWAYS (RPMA_LOG_DISABLED - 1) #define RPMA_LOG_ALWAYS(format, ...) \ rpma_log_default_function(RPMA_LOG_LEVEL_ALWAYS, __FILE__, __LINE__, __func__, \ format "\n", ##__VA_ARGS__) /* * Set of macros that should be used as the primary API for logging. * Direct call to rpma_log shall be used only in exceptional, corner cases. */ #define RPMA_LOG_DEBUG(format, ...) \ RPMA_LOG(RPMA_LOG_LEVEL_DEBUG, format "\n", ##__VA_ARGS__) #define RPMA_LOG_INFO(format, ...) \ RPMA_LOG(RPMA_LOG_LEVEL_INFO, format "\n", ##__VA_ARGS__) #define RPMA_LOG_NOTICE(format, ...) \ RPMA_LOG(RPMA_LOG_LEVEL_NOTICE, format "\n", ##__VA_ARGS__) #define RPMA_LOG_WARNING(format, ...) \ RPMA_LOG(RPMA_LOG_LEVEL_WARNING, format "\n", ##__VA_ARGS__) #define RPMA_LOG_ERROR(format, ...) \ RPMA_LOG(RPMA_LOG_LEVEL_ERROR, format "\n", ##__VA_ARGS__) #define RPMA_LOG_FATAL(format, ...) \ RPMA_LOG(RPMA_LOG_LEVEL_FATAL, format "\n", ##__VA_ARGS__) /* * 'f' stands here for 'function' or 'format' where the latter may accept * additional arguments. */ #define RPMA_LOG_ERROR_WITH_ERRNO(e, f, ...) \ RPMA_LOG_ERROR(f " failed: %s", ##__VA_ARGS__, strerror(abs(e))); #endif /* LIBRPMA_LOG_INTERNAL_H */ rpma-1.3.0/src/mr.c000066400000000000000000000411511443364775400140460ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2021-2023, Fujitsu Limited */ /* * mr.c -- librpma memory region-related implementations */ #include #include #include #include "librpma.h" #include "debug.h" #include "log_internal.h" #include "mr.h" #include "peer.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif #define STATIC_ASSERT(cond, msg) typedef char static_assertion_##msg[(cond) ? 1 : -1] #define SIZEOF_IN_BITS(type) (8 * sizeof(type)) #define MAX_VALUE_OF(type) ((1 << SIZEOF_IN_BITS(type)) - 1) #define RPMA_MR_DESC_SIZE (2 * sizeof(uint64_t) + sizeof(uint32_t) + sizeof(uint8_t)) /* a bit-wise OR of all allowed values */ #define USAGE_ALL_ALLOWED (RPMA_MR_USAGE_READ_SRC | RPMA_MR_USAGE_READ_DST |\ RPMA_MR_USAGE_WRITE_SRC | RPMA_MR_USAGE_WRITE_DST |\ RPMA_MR_USAGE_SEND | RPMA_MR_USAGE_RECV |\ RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY |\ RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT) /* * Make sure the size of the usage field in the rpma_mr_get_descriptor() * and rpma_mr_remote_from_descriptor() functions ('uint8_t' as for now) * is big enough to store all possible 'RPMA_MR_USAGE_*' values. */ STATIC_ASSERT(USAGE_ALL_ALLOWED <= MAX_VALUE_OF(uint8_t), usage_too_small); /* generate operation completion on success */ #define RPMA_F_COMPLETION_ON_SUCCESS (RPMA_F_COMPLETION_ALWAYS & ~RPMA_F_COMPLETION_ON_ERROR) struct rpma_mr_local { struct ibv_mr *ibv_mr; /* an IBV memory registration object */ int usage; /* usage of the memory region */ }; struct rpma_mr_remote { uint64_t raddr; /* the base virtual address of the memory region */ uint64_t size; /* the size of the memory being registered */ uint32_t rkey; /* remote key of the memory region */ int usage; /* usage of the memory region */ }; /* internal librpma API */ /* * rpma_mr_read -- post an RDMA read from src to dst */ int rpma_mr_read(struct ibv_qp *qp, struct rpma_mr_local *dst, size_t dst_offset, const struct rpma_mr_remote *src, size_t src_offset, size_t len, int flags, const void *op_context) { RPMA_DEBUG_TRACE; struct ibv_send_wr wr = {0}; struct ibv_sge sge = {0}; if (src == NULL) { /* source */ wr.wr.rdma.remote_addr = 0; wr.wr.rdma.rkey = 0; /* destination */ wr.sg_list = NULL; wr.num_sge = 0; } else { /* source */ wr.wr.rdma.remote_addr = src->raddr + src_offset; wr.wr.rdma.rkey = src->rkey; /* destination */ sge.addr = (uint64_t)((uintptr_t)dst->ibv_mr->addr + dst_offset); sge.length = (uint32_t)len; sge.lkey = dst->ibv_mr->lkey; wr.sg_list = &sge; wr.num_sge = 1; } wr.wr_id = (uint64_t)op_context; wr.next = NULL; wr.opcode = IBV_WR_RDMA_READ; wr.send_flags = (flags & RPMA_F_COMPLETION_ON_SUCCESS) ? IBV_SEND_SIGNALED : 0; struct ibv_send_wr *bad_wr; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int ret = ibv_post_send(qp, &wr, &bad_wr); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_post_send(src_addr=0x%x, rkey=0x%x, dst_addr=0x%x, length=%u, lkey=0x%x, wr_id=0x%x, opcode=IBV_WR_RDMA_READ, send_flags=%s)", wr.wr.rdma.remote_addr, wr.wr.rdma.rkey, sge.addr, sge.length, sge.lkey, wr.wr_id, (flags & RPMA_F_COMPLETION_ON_SUCCESS) ? "IBV_SEND_SIGNALED" : "0"); return RPMA_E_PROVIDER; } return 0; } /* * rpma_mr_write -- post an RDMA write from src to dst */ int rpma_mr_write(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_offset, const struct rpma_mr_local *src, size_t src_offset, size_t len, int flags, enum ibv_wr_opcode operation, uint32_t imm, const void *op_context) { RPMA_DEBUG_TRACE; struct ibv_send_wr wr = {0}; struct ibv_sge sge = {0}; if (src == NULL) { /* source */ wr.sg_list = NULL; wr.num_sge = 0; /* destination */ wr.wr.rdma.remote_addr = 0; wr.wr.rdma.rkey = 0; } else { /* source */ sge.addr = (uint64_t)((uintptr_t)src->ibv_mr->addr + src_offset); sge.length = (uint32_t)len; sge.lkey = src->ibv_mr->lkey; wr.sg_list = &sge; wr.num_sge = 1; /* destination */ wr.wr.rdma.remote_addr = dst->raddr + dst_offset; wr.wr.rdma.rkey = dst->rkey; } wr.wr_id = (uint64_t)op_context; wr.next = NULL; wr.opcode = operation; switch (wr.opcode) { case IBV_WR_RDMA_WRITE: break; case IBV_WR_RDMA_WRITE_WITH_IMM: wr.imm_data = htonl(imm); break; default: RPMA_LOG_ERROR("unsupported wr.opcode == %d", wr.opcode); return RPMA_E_NOSUPP; } RPMA_FAULT_INJECTION(RPMA_E_NOSUPP, { wr.opcode = IBV_WR_RDMA_READ; }); wr.send_flags = (flags & RPMA_F_COMPLETION_ON_SUCCESS) ? IBV_SEND_SIGNALED : 0; struct ibv_send_wr *bad_wr; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int ret = ibv_post_send(qp, &wr, &bad_wr); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_post_send(dst_addr=0x%x, rkey=0x%x, src_addr=0x%x, length=%u, lkey=0x%x, wr_id=0x%x, opcode=IBV_WR_RDMA_WRITE, send_flags=%s)", wr.wr.rdma.remote_addr, wr.wr.rdma.rkey, sge.addr, sge.length, sge.lkey, wr.wr_id, (flags & RPMA_F_COMPLETION_ON_SUCCESS) ? "IBV_SEND_SIGNALED" : "0"); return RPMA_E_PROVIDER; } return 0; } /* * rpma_mr_atomic_write -- post the atomic 8 bytes RDMA write from src to dst */ int rpma_mr_atomic_write(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_offset, const char src[8], int flags, const void *op_context) { RPMA_DEBUG_TRACE; #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED struct ibv_qp_ex *qpx = ibv_qp_to_qp_ex(qp); /* check if the created QP supports native atomic write */ if (qpx && qpx->wr_atomic_write) { ibv_wr_start(qpx); qpx->wr_id = (uint64_t)op_context; qpx->wr_flags = (flags & RPMA_F_COMPLETION_ON_SUCCESS) ? IBV_SEND_SIGNALED : 0; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); ibv_wr_atomic_write(qpx, dst->rkey, dst->raddr + dst_offset, src); int ret = ibv_wr_complete(qpx); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_wr_complete()"); return RPMA_E_PROVIDER; } return 0; } #endif struct ibv_send_wr wr = {0}; struct ibv_sge sge = {0}; /* source */ sge.addr = (uint64_t)((uintptr_t)src); sge.length = 8; /* 8-bytes atomic write with IBV_SEND_INLINE flag */ wr.sg_list = &sge; wr.num_sge = 1; /* destination */ wr.wr.rdma.remote_addr = dst->raddr + dst_offset; wr.wr.rdma.rkey = dst->rkey; wr.wr_id = (uint64_t)op_context; wr.next = NULL; wr.opcode = IBV_WR_RDMA_WRITE; /* * IBV_SEND_FENCE is used here to force any ongoing read operation * (that may emulate a remote flush) to be finished before * the atomic write is executed. */ wr.send_flags = IBV_SEND_INLINE | IBV_SEND_FENCE; if (flags & RPMA_F_COMPLETION_ON_SUCCESS) wr.send_flags |= IBV_SEND_SIGNALED; struct ibv_send_wr *bad_wr; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int ret = ibv_post_send(qp, &wr, &bad_wr); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_post_send(dst_addr=0x%x, rkey=0x%x, src_addr=0x%x, wr_id=0x%x, opcode=IBV_WR_RDMA_WRITE, send_flags=%s)", wr.wr.rdma.remote_addr, wr.wr.rdma.rkey, sge.addr, wr.wr_id, (flags & RPMA_F_COMPLETION_ON_SUCCESS) ? "IBV_SEND_SIGNALED" : "0"); return RPMA_E_PROVIDER; } return 0; } /* * rpma_mr_send -- post an RDMA send from src */ int rpma_mr_send(struct ibv_qp *qp, const struct rpma_mr_local *src, size_t offset, size_t len, int flags, enum ibv_wr_opcode operation, uint32_t imm, const void *op_context) { RPMA_DEBUG_TRACE; struct ibv_send_wr wr = {0}; struct ibv_sge sge; /* source */ if (src == NULL) { wr.sg_list = NULL; wr.num_sge = 0; } else { sge.addr = (uint64_t)((uintptr_t)src->ibv_mr->addr + offset); sge.length = (uint32_t)len; sge.lkey = src->ibv_mr->lkey; wr.sg_list = &sge; wr.num_sge = 1; } wr.next = NULL; wr.opcode = operation; switch (wr.opcode) { case IBV_WR_SEND: break; case IBV_WR_SEND_WITH_IMM: wr.imm_data = htonl(imm); break; default: RPMA_LOG_ERROR("unsupported wr.opcode == %d", wr.opcode); return RPMA_E_NOSUPP; } RPMA_FAULT_INJECTION(RPMA_E_NOSUPP, { wr.opcode = IBV_WR_RDMA_READ; }); wr.wr_id = (uint64_t)op_context; wr.send_flags = (flags & RPMA_F_COMPLETION_ON_SUCCESS) ? IBV_SEND_SIGNALED : 0; struct ibv_send_wr *bad_wr; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int ret = ibv_post_send(qp, &wr, &bad_wr); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_post_send"); return RPMA_E_PROVIDER; } return 0; } /* * rpma_mr_recv -- post an RDMA recv from dst */ int rpma_mr_recv(struct ibv_qp *qp, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context) { RPMA_DEBUG_TRACE; struct ibv_recv_wr wr = {0}; struct ibv_sge sge; /* source */ if (dst == NULL) { wr.sg_list = NULL; wr.num_sge = 0; } else { sge.addr = (uint64_t)((uintptr_t)dst->ibv_mr->addr + offset); sge.length = (uint32_t)len; sge.lkey = dst->ibv_mr->lkey; wr.sg_list = &sge; wr.num_sge = 1; } wr.next = NULL; wr.wr_id = (uint64_t)op_context; struct ibv_recv_wr *bad_wr; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int ret = ibv_post_recv(qp, &wr, &bad_wr); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_post_recv"); return RPMA_E_PROVIDER; } return 0; } /* * rpma_mr_srq_recv -- post an RDMA recv from dst to the shared RQ */ int rpma_mr_srq_recv(struct ibv_srq *ibv_srq, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context) { RPMA_DEBUG_TRACE; struct ibv_recv_wr wr = {0}; struct ibv_sge sge; /* source */ if (dst == NULL) { wr.sg_list = NULL; wr.num_sge = 0; } else { sge.addr = (uint64_t)((uintptr_t)dst->ibv_mr->addr + offset); sge.length = (uint32_t)len; sge.lkey = dst->ibv_mr->lkey; wr.sg_list = &sge; wr.num_sge = 1; } wr.next = NULL; wr.wr_id = (uint64_t)op_context; struct ibv_recv_wr *bad_wr; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int ret = ibv_post_srq_recv(ibv_srq, &wr, &bad_wr); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_post_srq_recv"); return RPMA_E_PROVIDER; } return 0; } #ifdef NATIVE_FLUSH_SUPPORTED /* * rpma_mr_flush -- initiate the native flush operation */ int rpma_mr_flush(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context) { uint8_t native_type = 0; switch (type) { case RPMA_FLUSH_TYPE_VISIBILITY: native_type = IBV_FLUSH_GLOBAL; break; case RPMA_FLUSH_TYPE_PERSISTENT: native_type = IBV_FLUSH_PERSISTENT; } struct ibv_qp_ex *qpx = ibv_qp_to_qp_ex(qp); ibv_wr_start(qpx); qpx->wr_id = (uint64_t)op_context; qpx->wr_flags = (flags & RPMA_F_COMPLETION_ON_SUCCESS) ? IBV_SEND_SIGNALED : 0; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); ibv_wr_flush(qpx, dst->rkey, dst->raddr + dst_offset, len, native_type, IBV_FLUSH_RANGE); int ret = ibv_wr_complete(qpx); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_wr_complete()"); return RPMA_E_PROVIDER; } return 0; } #endif /* public librpma API */ /* * rpma_mr_reg -- create a local memory registration object */ int rpma_mr_reg(struct rpma_peer *peer, void *ptr, size_t size, int usage, struct rpma_mr_local **mr_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_NOMEM, {}); int ret; if (peer == NULL || ptr == NULL || size == 0 || mr_ptr == NULL) return RPMA_E_INVAL; if (usage == 0 || (usage & ~USAGE_ALL_ALLOWED)) return RPMA_E_INVAL; struct rpma_mr_local *mr; mr = malloc(sizeof(struct rpma_mr_local)); if (mr == NULL) return RPMA_E_NOMEM; struct ibv_mr *ibv_mr; ret = rpma_peer_setup_mr_reg(peer, &ibv_mr, ptr, size, usage); if (ret) { free(mr); return ret; } mr->ibv_mr = ibv_mr; mr->usage = usage; *mr_ptr = mr; return 0; } /* * rpma_mr_dereg -- delete a local memory registration object */ int rpma_mr_dereg(struct rpma_mr_local **mr_ptr) { RPMA_DEBUG_TRACE; if (mr_ptr == NULL) return RPMA_E_INVAL; if (*mr_ptr == NULL) return 0; int ret = 0; struct rpma_mr_local *mr = *mr_ptr; errno = ibv_dereg_mr(mr->ibv_mr); if (errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_dereg_mr()"); ret = RPMA_E_PROVIDER; } free(mr); *mr_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return ret; } /* * rpma_mr_get_descriptor -- get a descriptor of memory region */ int rpma_mr_get_descriptor(const struct rpma_mr_local *mr, void *desc) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (mr == NULL || desc == NULL) return RPMA_E_INVAL; char *buff = (char *)desc; uint64_t addr = htole64((uint64_t)mr->ibv_mr->addr); memcpy(buff, &addr, sizeof(uint64_t)); buff += sizeof(uint64_t); uint64_t length = htole64((uint64_t)mr->ibv_mr->length); memcpy(buff, &length, sizeof(uint64_t)); buff += sizeof(uint64_t); uint32_t rkey = htole32(mr->ibv_mr->rkey); memcpy(buff, &rkey, sizeof(uint32_t)); buff += sizeof(uint32_t); *((uint8_t *)buff) = (uint8_t)mr->usage; return 0; } /* * rpma_mr_remote_from_descriptor -- create a remote memory region from a descriptor */ int rpma_mr_remote_from_descriptor(const void *desc, size_t desc_size, struct rpma_mr_remote **mr_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (desc == NULL || mr_ptr == NULL) return RPMA_E_INVAL; char *buff = (char *)desc; uint64_t raddr; uint64_t size; uint32_t rkey; if (desc_size < RPMA_MR_DESC_SIZE) { RPMA_LOG_ERROR( "incorrect size of the descriptor: %i bytes (should be at least: %i bytes)", desc_size, RPMA_MR_DESC_SIZE); return RPMA_E_INVAL; } memcpy(&raddr, buff, sizeof(uint64_t)); buff += sizeof(uint64_t); memcpy(&size, buff, sizeof(uint64_t)); buff += sizeof(uint64_t); memcpy(&rkey, buff, sizeof(uint32_t)); buff += sizeof(uint32_t); uint8_t usage = *(uint8_t *)buff; if (usage == 0) { RPMA_LOG_ERROR("usage type of memory is not set"); return RPMA_E_INVAL; } struct rpma_mr_remote *mr = malloc(sizeof(struct rpma_mr_remote)); if (mr == NULL) return RPMA_E_NOMEM; mr->raddr = le64toh(raddr); mr->size = le64toh(size); mr->rkey = le32toh(rkey); mr->usage = usage; *mr_ptr = mr; RPMA_LOG_DEBUG("new struct rpma_mr_remote {raddr=0x%" PRIx64 ", size=%" PRIu64 ", rkey=0x%" PRIx32 ", usage=0x%" PRIx8 "}", raddr, size, rkey, usage); return 0; } /* * rpma_mr_get_descriptor_size -- get size of a memory region descriptor */ int rpma_mr_get_descriptor_size(const struct rpma_mr_local *mr, size_t *desc_size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (mr == NULL || desc_size == NULL) return RPMA_E_INVAL; *desc_size = RPMA_MR_DESC_SIZE; return 0; } /* * rpma_mr_get_ptr -- get a local registered memory pointer */ int rpma_mr_get_ptr(const struct rpma_mr_local *mr, void **ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (mr == NULL || ptr == NULL) return RPMA_E_INVAL; *ptr = mr->ibv_mr->addr; return 0; } /* * rpma_mr_get_size -- get a local registered memory size */ int rpma_mr_get_size(const struct rpma_mr_local *mr, size_t *size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (mr == NULL || size == NULL) return RPMA_E_INVAL; *size = mr->ibv_mr->length; return 0; } /* * rpma_mr_remote_get_size -- get a remote memory region size */ int rpma_mr_remote_get_size(const struct rpma_mr_remote *mr, size_t *size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (mr == NULL || size == NULL) return RPMA_E_INVAL; *size = mr->size; return 0; } /* * rpma_mr_remote_delete -- delete a remote memory region's structure */ int rpma_mr_remote_delete(struct rpma_mr_remote **mr_ptr) { RPMA_DEBUG_TRACE; if (mr_ptr == NULL) return RPMA_E_INVAL; if (*mr_ptr == NULL) return 0; free(*mr_ptr); *mr_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_mr_remote_get_flush_type -- get a flush type supported by the remote memory region */ int rpma_mr_remote_get_flush_type(const struct rpma_mr_remote *mr, int *flush_type) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (mr == NULL || flush_type == NULL) return RPMA_E_INVAL; *flush_type = mr->usage & (RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT | RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY); return 0; } /* * rpma_mr_advise -- give advice about an address range in a memory registration */ int rpma_mr_advise(struct rpma_mr_local *mr, size_t offset, size_t len, int advice, uint32_t flags) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); #ifdef IBV_ADVISE_MR_SUPPORTED struct ibv_sge sg_list; sg_list.lkey = mr->ibv_mr->lkey; sg_list.addr = (uint64_t)((uintptr_t)mr->ibv_mr->addr + offset); sg_list.length = (uint32_t)len; int ret = ibv_advise_mr(mr->ibv_mr->pd, (enum ibv_advise_mr_advice)advice, flags, &sg_list, 1); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(ret, "ibv_advise_mr()"); if (ret == EOPNOTSUPP || ret == ENOTSUP) return RPMA_E_NOSUPP; else if (ret == EFAULT || ret == EINVAL) return RPMA_E_INVAL; else return RPMA_E_PROVIDER; } return 0; #else RPMA_LOG_ERROR("ibv_advise_mr() is not supported by the system"); return RPMA_E_NOSUPP; #endif } rpma-1.3.0/src/mr.h000066400000000000000000000061741443364775400140610ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2022-2023, Fujitsu Limited */ /* * mr.h -- librpma memory region-related internal definitions */ #ifndef LIBRPMA_MR_H #define LIBRPMA_MR_H #include "librpma.h" #include /* * ASSUMPTIONS * - qp != NULL && flags != 0 * - (src != NULL && dst != NULL) || * (src == NULL && dst == NULL && * dst_offset == 0 && src_offset == 0 && len == 0) * * ERRORS * rpma_mr_read() can fail with the following error: * * - RPMA_E_PROVIDER - ibv_post_send(3) failed */ int rpma_mr_read(struct ibv_qp *qp, struct rpma_mr_local *dst, size_t dst_offset, const struct rpma_mr_remote *src, size_t src_offset, size_t len, int flags, const void *op_context); /* * ASSUMPTIONS * - qp != NULL && flags != 0 * - (src != NULL && dst != NULL) || * (src == NULL && dst == NULL && * dst_offset == 0 && src_offset == 0 && len == 0) * * ERRORS * rpma_mr_write() can fail with the following errors: * * - RPMA_E_NOSUPP - unsupported 'operation' argument * - RPMA_E_PROVIDER - ibv_post_send(3) failed */ int rpma_mr_write(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_offset, const struct rpma_mr_local *src, size_t src_offset, size_t len, int flags, enum ibv_wr_opcode operation, uint32_t imm, const void *op_context); /* * ASSUMPTIONS * - qp != NULL && dst != NULL && src != NULL && flags != 0 * * ERRORS * rpma_mr_atomic_write() can fail with the following error: * * - RPMA_E_PROVIDER - ibv_post_send(3) failed */ int rpma_mr_atomic_write(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_offset, const char src[8], int flags, const void *op_context); /* * ASSUMPTIONS * - qp != NULL && flags != 0 * - src != NULL || (offset == 0 && len == 0) * * ERRORS * rpma_mr_send() can fail with the following error: * * - RPMA_E_NOSUPP - unsupported 'operation' argument * - RPMA_E_PROVIDER - ibv_post_send(3) failed */ int rpma_mr_send(struct ibv_qp *qp, const struct rpma_mr_local *src, size_t offset, size_t len, int flags, enum ibv_wr_opcode operation, uint32_t imm, const void *op_context); /* * ASSUMPTIONS * - qp != NULL * - dst != NULL || (offset == 0 && len == 0) * * ERRORS * rpma_mr_recv() can fail with the following error: * * - RPMA_E_PROVIDER - ibv_post_recv(3) failed */ int rpma_mr_recv(struct ibv_qp *qp, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context); /* * ASSUMPTIONS * - srq != NULL * - dst != NULL || (offset == 0 && len == 0) * * ERRORS * rpma_mr_srq_recv() can fail with the following error: * * - RPMA_E_PROVIDER - ibv_post_srq_recv(3) failed */ int rpma_mr_srq_recv(struct ibv_srq *ibv_srq, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context); /* * ASSUMPTIONS * - qp != NULL && dst != NULL && flags != 0 * * ERRORS * rpma_mr_flush() can fail with the following error: * * - RPMA_E_PROVIDER - ibv_wr_complete(3) failed */ int rpma_mr_flush(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context); #endif /* LIBRPMA_MR_H */ rpma-1.3.0/src/peer.c000066400000000000000000000260251443364775400143660ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2021-2023, Fujitsu Limited */ /* * peer.c -- librpma peer-related implementations */ #include #include #include #include "conn_req.h" #include "debug.h" #include "log_internal.h" #include "peer.h" #include "srq.h" #include "srq_cfg.h" #include "utils.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif /* the maximum number of scatter/gather elements in any Work Request */ #define RPMA_MAX_SGE 1 /* the maximum message size (in bytes) that can be posted inline */ #define RPMA_MAX_INLINE_DATA 8 struct rpma_peer { struct ibv_pd *pd; /* a protection domain */ int is_odp_supported; /* is On-Demand Paging supported */ int is_native_atomic_write_supported; /* is native atomic write supported */ int is_native_flush_supported; /* is native flush supported */ }; /* internal librpma API */ /* * rpma_peer_usage2access -- convert usage to access * * Note: APM type of flush requires the same access as RPMA_MR_USAGE_READ_SRC */ static int rpma_peer_usage2access(struct rpma_peer *peer, int usage) { RPMA_DEBUG_TRACE; enum ibv_transport_type type = peer->pd->context->device->transport_type; int access = 0; if (usage & RPMA_MR_USAGE_READ_SRC) access |= IBV_ACCESS_REMOTE_READ; #ifdef NATIVE_FLUSH_SUPPORTED if (peer->is_native_flush_supported) { if (usage & RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY) access |= IBV_ACCESS_FLUSH_GLOBAL; if (usage & RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT) access |= IBV_ACCESS_FLUSH_PERSISTENT; } else { if (usage & (RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY | RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT)) access |= IBV_ACCESS_REMOTE_READ; } #else if (usage & (RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY | RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT)) access |= IBV_ACCESS_REMOTE_READ; #endif if (usage & RPMA_MR_USAGE_READ_DST) { access |= IBV_ACCESS_LOCAL_WRITE; /* * iWARP implements the READ operation as the WRITE operation * in the opposite direction. */ if (type == IBV_TRANSPORT_IWARP) access |= IBV_ACCESS_REMOTE_WRITE; } if (usage & RPMA_MR_USAGE_WRITE_SRC) access |= IBV_ACCESS_LOCAL_WRITE; if (usage & RPMA_MR_USAGE_WRITE_DST) /* * If IBV_ACCESS_REMOTE_WRITE is set, then IBV_ACCESS_LOCAL_WRITE must be set too. */ access |= IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE; if (usage & RPMA_MR_USAGE_RECV) access |= IBV_ACCESS_LOCAL_WRITE; /* * There is no IBV_ACCESS_* value to be set for RPMA_MR_USAGE_SEND. */ return access; } /* * rpma_peer_create_srq -- create a new shared RQ and a new shared receive CQ * if the size of the receive CQ in cfg is greater than 0 * * ASSUMPTIONS * - peer != NULL && cfg != NULL && ibv_srq_ptr != NULL && rcq_ptr != NULL */ int rpma_peer_create_srq(struct rpma_peer *peer, struct rpma_srq_cfg *cfg, struct ibv_srq **ibv_srq_ptr, struct rpma_cq **rcq_ptr) { RPMA_DEBUG_TRACE; uint32_t rq_size = 0; /* read size of the shared RQ from the configuration */ (void) rpma_srq_cfg_get_rq_size(cfg, &rq_size); struct ibv_srq_init_attr srq_init_attr; srq_init_attr.srq_context = NULL; srq_init_attr.attr.max_wr = rq_size; srq_init_attr.attr.max_sge = 1; srq_init_attr.attr.srq_limit = 0; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); struct ibv_srq *ibv_srq = ibv_create_srq(peer->pd, &srq_init_attr); if (ibv_srq == NULL) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_create_srq()"); return RPMA_E_PROVIDER; } /* read size of the shared receive CQ from the configuration */ int rcqe; (void) rpma_srq_cfg_get_rcqe(cfg, &rcqe); int ret = 0; struct rpma_cq *rcq = NULL; if (rcqe) { ret = rpma_cq_new(peer->pd->context, rcqe, NULL, &rcq); if (ret) goto err_srq_delete; } *ibv_srq_ptr = ibv_srq; *rcq_ptr = rcq; return 0; err_srq_delete: (void) ibv_destroy_srq(ibv_srq); return ret; } /* * rpma_peer_setup_qp -- allocate a QP associated with the CM ID * * ASSUMPTIONS * - cfg != NULL */ int rpma_peer_setup_qp(struct rpma_peer *peer, struct rdma_cm_id *id, struct rpma_cq *cq, struct rpma_cq *rcq, const struct rpma_conn_cfg *cfg) { RPMA_DEBUG_TRACE; if (peer == NULL || id == NULL || cq == NULL) return RPMA_E_INVAL; /* read SQ and RQ sizes from the configuration */ uint32_t sq_size = 0; uint32_t rq_size = 0; struct rpma_srq *srq = NULL; (void) rpma_conn_cfg_get_sq_size(cfg, &sq_size); (void) rpma_conn_cfg_get_rq_size(cfg, &rq_size); /* get the shared RQ object from the connection */ (void) rpma_conn_cfg_get_srq(cfg, &srq); struct ibv_srq *ibv_srq = srq ? rpma_srq_get_ibv_srq(srq) : NULL; struct ibv_cq *ibv_cq = rpma_cq_get_ibv_cq(cq); struct ibv_qp_init_attr_ex qp_init_attr; qp_init_attr.qp_context = NULL; qp_init_attr.send_cq = ibv_cq; qp_init_attr.recv_cq = rcq ? rpma_cq_get_ibv_cq(rcq) : ibv_cq; qp_init_attr.srq = ibv_srq; qp_init_attr.cap.max_send_wr = sq_size; qp_init_attr.cap.max_recv_wr = rq_size; qp_init_attr.cap.max_send_sge = RPMA_MAX_SGE; qp_init_attr.cap.max_recv_sge = RPMA_MAX_SGE; qp_init_attr.cap.max_inline_data = RPMA_MAX_INLINE_DATA; /* * Reliable Connection - since we are using e.g. IBV_WR_RDMA_READ. * For details please see ibv_post_send(3). */ qp_init_attr.qp_type = IBV_QPT_RC; /* * Every Work Request has to decide whether to generate CQ entry for its successful * completion. Please see IBV_SEND_SIGNALED in ibv_post_send(3). */ qp_init_attr.sq_sig_all = 0; qp_init_attr.comp_mask = IBV_QP_INIT_ATTR_PD; #if defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || defined(NATIVE_FLUSH_SUPPORTED) qp_init_attr.comp_mask |= IBV_QP_INIT_ATTR_SEND_OPS_FLAGS; qp_init_attr.send_ops_flags = 0; #endif #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED if (peer->is_native_atomic_write_supported) qp_init_attr.send_ops_flags |= IBV_QP_EX_WITH_ATOMIC_WRITE; #endif #ifdef NATIVE_FLUSH_SUPPORTED if (peer->is_native_flush_supported) qp_init_attr.send_ops_flags |= IBV_QP_EX_WITH_FLUSH; #endif qp_init_attr.pd = peer->pd; /* * The actual capabilities and properties of the created QP are returned through * qp_init_attr. */ RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); if (rdma_create_qp_ex(id, &qp_init_attr)) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_create_qp_ex(max_send_wr=%" PRIu32 ", max_recv_wr=%" PRIu32 ", max_send/recv_sge=%i, max_inline_data=%i, qp_type=IBV_QPT_RC, sq_sig_all=0)", sq_size, rq_size, RPMA_MAX_SGE, RPMA_MAX_INLINE_DATA); return RPMA_E_PROVIDER; } return 0; } /* * Since rdma-core v27.0-105-g5a750676 * ibv_reg_mr() has been defined as a macro * and its signature has been changed so that * the 'access' argument is of the 'unsigned int' type now: * * https://github.com/linux-rdma/rdma-core/commit/5a750676e8312715100900c6336bbc98577e082b */ #if defined(ibv_reg_mr) #define RPMA_IBV_ACCESS(access) (unsigned)access #else #define RPMA_IBV_ACCESS(access) access #endif /* * rpma_peer_setup_mr_reg -- register a memory region using ibv_reg_mr() */ int rpma_peer_setup_mr_reg(struct rpma_peer *peer, struct ibv_mr **ibv_mr_ptr, void *addr, size_t length, int usage) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); int access = rpma_peer_usage2access(peer, usage); *ibv_mr_ptr = ibv_reg_mr(peer->pd, addr, length, RPMA_IBV_ACCESS(access)); if (*ibv_mr_ptr != NULL) return 0; #ifdef ON_DEMAND_PAGING_SUPPORTED if (errno != EOPNOTSUPP) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_reg_mr()"); return RPMA_E_PROVIDER; } if (!peer->is_odp_supported) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "Peer does not support On-Demand Paging: " "ibv_reg_mr(addr=%p, length=%zu, access=%i)", addr, length, access); return RPMA_E_PROVIDER; } /* * If the registration failed with EOPNOTSUPP and On-Demand Paging is * supported we can retry the memory registration with * the IBV_ACCESS_ON_DEMAND flag. */ RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); *ibv_mr_ptr = ibv_reg_mr(peer->pd, addr, length, RPMA_IBV_ACCESS(access) | IBV_ACCESS_ON_DEMAND); if (*ibv_mr_ptr == NULL) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "Memory registration with On-Demand Paging (maybe FSDAX?) support failed: " "ibv_reg_mr(addr=%p, length=%zu, access=%i|IBV_ACCESS_ON_DEMAND)", addr, length, access); return RPMA_E_PROVIDER; } return 0; #else RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_reg_mr()"); return RPMA_E_PROVIDER; #endif } /* public librpma API */ /* * rpma_peer_new -- create a new peer object encapsulating a newly allocated * verbs protection domain for provided ibv_context */ int rpma_peer_new(struct ibv_context *ibv_ctx, struct rpma_peer **peer_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); int is_odp_supported = 0; int is_native_atomic_write_supported = 0; int is_native_flush_supported = 0; int ret; if (ibv_ctx == NULL || peer_ptr == NULL) return RPMA_E_INVAL; ret = rpma_utils_ibv_context_is_atomic_write_capable(ibv_ctx, &is_native_atomic_write_supported); if (ret) return ret; if (!is_native_atomic_write_supported) RPMA_LOG_INFO( "Native atomic write is not supported - ordinary RDMA write will be used instead."); ret = rpma_utils_ibv_context_is_flush_capable(ibv_ctx, &is_native_flush_supported); if (ret) return ret; if (!is_native_flush_supported) RPMA_LOG_INFO( "Native flush is not supported - ordinary RDMA read will be used instead."); ret = rpma_utils_ibv_context_is_odp_capable(ibv_ctx, &is_odp_supported); if (ret) return ret; /* * The ibv_alloc_pd(3) manual page does not document that this function returns any error * via errno but seemingly it is. For the usability sake, we try to deduce what really * happened using the errno value. To make sure the errno value was set * by the ibv_alloc_pd(3) function it is zeroed out before the function call. */ RPMA_FAULT_INJECTION(RPMA_E_NOMEM, { errno = ENOMEM; }); RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); RPMA_FAULT_INJECTION(RPMA_E_UNKNOWN, {}); errno = 0; struct ibv_pd *pd = ibv_alloc_pd(ibv_ctx); if (pd == NULL) { if (errno == ENOMEM) { return RPMA_E_NOMEM; } else if (errno != 0) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_alloc_pd()"); return RPMA_E_PROVIDER; } else { return RPMA_E_UNKNOWN; } } RPMA_FAULT_INJECTION_GOTO(RPMA_E_NOMEM, err_dealloc_pd); struct rpma_peer *peer = malloc(sizeof(*peer)); if (peer == NULL) { ret = RPMA_E_NOMEM; goto err_dealloc_pd; } peer->pd = pd; peer->is_odp_supported = is_odp_supported; peer->is_native_atomic_write_supported = is_native_atomic_write_supported; peer->is_native_flush_supported = is_native_flush_supported; *peer_ptr = peer; return 0; err_dealloc_pd: ibv_dealloc_pd(pd); return ret; } /* * rpma_peer_delete -- attempt deallocating the peer-encapsulated verbs protection domain; * if succeeded delete the peer */ int rpma_peer_delete(struct rpma_peer **peer_ptr) { RPMA_DEBUG_TRACE; if (peer_ptr == NULL) return RPMA_E_INVAL; struct rpma_peer *peer = *peer_ptr; if (peer == NULL) return 0; int ret = ibv_dealloc_pd(peer->pd); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_dealloc_pd()"); ret = RPMA_E_PROVIDER; } free(peer); *peer_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return ret; } rpma-1.3.0/src/peer.h000066400000000000000000000026431443364775400143730ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * peer.h -- librpma peer-related internal definitions */ #ifndef LIBRPMA_PEER_H #define LIBRPMA_PEER_H #include "librpma.h" #include "cq.h" #include /* * ASSUMPTIONS * - peer != NULL && cfg != NULL && ibv_srq_ptr != NULL && rcq_ptr != NULL * * ERRORS * rpma_peer_create_srq() can fail with the following errors: * * - RPMA_E_PROVIDER - creating a new shared RQ or a shared receive CQ failed * - RPMA_E_NOMEM - out of memory */ int rpma_peer_create_srq(struct rpma_peer *peer, struct rpma_srq_cfg *cfg, struct ibv_srq **ibv_srq_ptr, struct rpma_cq **rcq_ptr); /* * ERRORS * rpma_peer_setup_qp() can fail with the following errors: * * - RPMA_E_INVAL - peer, id or cq is NULL * - RPMA_E_PROVIDER - allocating a QP failed */ int rpma_peer_setup_qp(struct rpma_peer *peer, struct rdma_cm_id *id, struct rpma_cq *cq, struct rpma_cq *rcq, const struct rpma_conn_cfg *cfg); /* * ASSUMPTIONS * - peer != NULL && ibv_mr_ptr != NULL && addr != NULL && length > 0 && && peer->pd != NULL * * ERRORS * rpma_peer_setup_mr_reg() can fail with the following error: * * - RPMA_E_PROVIDER - registering the memory region failed */ int rpma_peer_setup_mr_reg(struct rpma_peer *peer, struct ibv_mr **ibv_mr_ptr, void *addr, size_t length, int usage); #endif /* LIBRPMA_PEER_H */ rpma-1.3.0/src/peer_cfg.c000066400000000000000000000103671443364775400152070ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * peer_cfg.c -- librpma peer-configuration-related implementations */ #include #include #include #include #include "librpma.h" #include "log_internal.h" #include "debug.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif #ifdef ATOMIC_OPERATIONS_SUPPORTED #include #endif /* ATOMIC_OPERATIONS_SUPPORTED */ #define SUPPORTED2STR(var) ((var) ? "supported" : "unsupported") static bool RPMA_DEFAULT_DIRECT_WRITE_TO_PMEM = false; struct rpma_peer_cfg { #ifdef ATOMIC_OPERATIONS_SUPPORTED _Atomic #endif /* ATOMIC_OPERATIONS_SUPPORTED */ bool direct_write_to_pmem; }; /* public librpma API */ /* * rpma_peer_cfg_new -- create a new peer configuration object */ int rpma_peer_cfg_new(struct rpma_peer_cfg **pcfg_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_NOMEM, {}); if (pcfg_ptr == NULL) return RPMA_E_INVAL; struct rpma_peer_cfg *cfg = malloc(sizeof(struct rpma_peer_cfg)); if (cfg == NULL) return RPMA_E_NOMEM; /* set default values */ #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_init(&cfg->direct_write_to_pmem, RPMA_DEFAULT_DIRECT_WRITE_TO_PMEM); #else cfg->direct_write_to_pmem = RPMA_DEFAULT_DIRECT_WRITE_TO_PMEM; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ *pcfg_ptr = cfg; return 0; } /* * rpma_peer_cfg_delete -- delete the peer configuration object */ int rpma_peer_cfg_delete(struct rpma_peer_cfg **pcfg_ptr) { RPMA_DEBUG_TRACE; if (pcfg_ptr == NULL) return RPMA_E_INVAL; free(*pcfg_ptr); *pcfg_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_peer_cfg_set_direct_write_to_pmem -- declare if direct write to PMEM is supported */ int rpma_peer_cfg_set_direct_write_to_pmem(struct rpma_peer_cfg *pcfg, bool supported) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (pcfg == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&pcfg->direct_write_to_pmem, supported, __ATOMIC_SEQ_CST); #else pcfg->direct_write_to_pmem = supported; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_peer_cfg_get_direct_write_to_pmem -- check if direct write to PMEM is supported */ int rpma_peer_cfg_get_direct_write_to_pmem(const struct rpma_peer_cfg *pcfg, bool *supported) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (pcfg == NULL || supported == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *supported = atomic_load_explicit((_Atomic bool *)&pcfg->direct_write_to_pmem, __ATOMIC_SEQ_CST); #else *supported = pcfg->direct_write_to_pmem; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_peer_cfg_get_descriptor -- get a descriptor of a peer configuration */ int rpma_peer_cfg_get_descriptor(const struct rpma_peer_cfg *pcfg, void *desc) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (pcfg == NULL || desc == NULL) return RPMA_E_INVAL; bool direct_write_to_pmem; rpma_peer_cfg_get_direct_write_to_pmem(pcfg, &direct_write_to_pmem); *((uint8_t *)desc) = (uint8_t)direct_write_to_pmem; return 0; } /* * rpma_peer_cfg_get_descriptor_size -- get size of the peer configuration descriptor */ int rpma_peer_cfg_get_descriptor_size(const struct rpma_peer_cfg *pcfg, size_t *desc_size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (pcfg == NULL || desc_size == NULL) return RPMA_E_INVAL; *desc_size = sizeof(uint8_t); return 0; } /* * rpma_peer_cfg_from_descriptor -- create a peer configuration from a descriptor */ int rpma_peer_cfg_from_descriptor(const void *desc, size_t desc_size, struct rpma_peer_cfg **pcfg_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (desc == NULL || pcfg_ptr == NULL) return RPMA_E_INVAL; if (desc_size < sizeof(uint8_t)) { RPMA_LOG_ERROR( "incorrect size of the descriptor: %i bytes (should be at least %i bytes)", desc_size, sizeof(uint8_t)); return RPMA_E_INVAL; } struct rpma_peer_cfg *cfg = malloc(sizeof(struct rpma_peer_cfg)); if (cfg == NULL) return RPMA_E_NOMEM; cfg->direct_write_to_pmem = *(uint8_t *)desc; *pcfg_ptr = cfg; RPMA_LOG_INFO("INFO: Direct Write To PMem is %s", SUPPORTED2STR(cfg->direct_write_to_pmem)); return 0; } rpma-1.3.0/src/private_data.c000066400000000000000000000022101443364775400160640ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * private_data.c -- a store for connections' private data */ #include #include #include "private_data.h" #include "debug.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif /* * rpma_private_data_store -- store a copy of the data provided via the CM event object */ int rpma_private_data_store(struct rdma_cm_event *edata, struct rpma_conn_private_data *pdata) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_NOMEM, {}); const void *ptr = edata->param.conn.private_data; uint8_t len = edata->param.conn.private_data_len; if (ptr == NULL || len == 0) return 0; /* allocate a buffer for a copy of data from ptr */ void *ptr_copy = malloc(len); if (ptr_copy == NULL) return RPMA_E_NOMEM; /* copy the data to the buffer */ memcpy(ptr_copy, ptr, len); pdata->ptr = ptr_copy; pdata->len = len; return 0; } /* * rpma_private_data_delete -- free the private data */ void rpma_private_data_delete(struct rpma_conn_private_data *pdata) { RPMA_DEBUG_TRACE; free(pdata->ptr); pdata->ptr = NULL; pdata->len = 0; } rpma-1.3.0/src/private_data.h000066400000000000000000000015261443364775400161020ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * private_data.h -- a store for connections' private data (definitions) */ #ifndef LIBRPMA_PRIVATE_DATA_H #define LIBRPMA_PRIVATE_DATA_H #include #include "librpma.h" /* * ASSUMPTIONS * - edata != NULL * - edata->event == RDMA_CM_EVENT_CONNECT_REQUEST || edata->event == RDMA_CM_EVENT_ESTABLISHED * - pdata != NULL * - pdata == {NULL, 0} * * ERRORS * rpma_private_data_store() can fail with the following error: * * - RPMA_E_NOMEM - out of memory */ int rpma_private_data_store(struct rdma_cm_event *edata, struct rpma_conn_private_data *pdata); /* * ASSUMPTIONS * - pdata != NULL * * The function cannot fail. */ void rpma_private_data_delete(struct rpma_conn_private_data *pdata); #endif /* LIBRPMA_PRIVATE_DATA_H */ rpma-1.3.0/src/rpma_err.c000066400000000000000000000017021443364775400152350ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2022, Intel Corporation */ /* * rpma_err.c -- error-handling related librpma definitions */ #include "librpma.h" /* public librpma API */ /* * rpma_err_2str -- return const string representation of an RPMA error */ const char * rpma_err_2str(int ret) { switch (ret) { case 0: return "Success"; case RPMA_E_NOSUPP: return "Not supported"; case RPMA_E_PROVIDER: return "Provider error occurred"; case RPMA_E_NOMEM: return "Out of memory"; case RPMA_E_INVAL: return "Invalid argument"; case RPMA_E_NO_COMPLETION: return "No next completion available"; case RPMA_E_NO_EVENT: return "No next event available"; case RPMA_E_AGAIN: return "Temporary error, try again"; case RPMA_E_SHARED_CHANNEL: return "Completion channel is shared"; case RPMA_E_NOT_SHARED_CHNL: return "Completion channel is not shared"; case RPMA_E_UNKNOWN: default: return "Unknown error"; } } rpma-1.3.0/src/srq.c000066400000000000000000000056111443364775400142360ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq.c -- librpma shared-RQ-related implementations */ #include #include #include "cq.h" #include "debug.h" #include "librpma.h" #include "log_internal.h" #include "peer.h" #include "mr.h" #include "srq_cfg.h" #include "srq.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif struct rpma_srq { struct ibv_srq *ibv_srq; struct rpma_cq *rcq; }; /* internal librpma API */ /* * rpma_srq_get_ibv_srq -- get the shared CQ member from the rpma_srq object * * ASSUMPTIONS * - srq != NULL */ struct ibv_srq * rpma_srq_get_ibv_srq(const struct rpma_srq *srq) { return srq->ibv_srq; } /* public librpma API */ /* * rpma_srq_new -- create a new shared RQ and a new shared receive CQ * if the size of the receive CQ in cfg is greater than 0 */ int rpma_srq_new(struct rpma_peer *peer, struct rpma_srq_cfg *cfg, struct rpma_srq **srq_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (peer == NULL || srq_ptr == NULL) return RPMA_E_INVAL; if (cfg == NULL) cfg = rpma_srq_cfg_default(); struct ibv_srq *ibv_srq = NULL; struct rpma_cq *rcq = NULL; int ret = rpma_peer_create_srq(peer, cfg, &ibv_srq, &rcq); if (ret) return ret; RPMA_FAULT_INJECTION_GOTO(RPMA_E_NOMEM, err_rpma_rcq_delete); *srq_ptr = (struct rpma_srq *)malloc(sizeof(struct rpma_srq)); if (*srq_ptr == NULL) { ret = RPMA_E_NOMEM; goto err_rpma_rcq_delete; } (*srq_ptr)->ibv_srq = ibv_srq; (*srq_ptr)->rcq = rcq; return 0; err_rpma_rcq_delete: (void) rpma_cq_delete(&rcq); (void) ibv_destroy_srq(ibv_srq); return ret; } /* * rpma_srq_delete -- delete the shared RQ and the shared receive CQ */ int rpma_srq_delete(struct rpma_srq **srq_ptr) { RPMA_DEBUG_TRACE; if (srq_ptr == NULL) return RPMA_E_INVAL; struct rpma_srq *srq = *srq_ptr; int ret = 0; if (srq == NULL) return ret; ret = rpma_cq_delete(&srq->rcq); if (srq->ibv_srq) { errno = ibv_destroy_srq(srq->ibv_srq); if (!ret && errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_destroy_srq()"); ret = RPMA_E_PROVIDER; } } free(srq); *srq_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); return ret; } /* * rpma_srq_recv -- initiate the receive operation in shared RQ */ int rpma_srq_recv(struct rpma_srq *srq, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (srq == NULL || (dst == NULL && (offset != 0 || len != 0))) return RPMA_E_INVAL; return rpma_mr_srq_recv(srq->ibv_srq, dst, offset, len, op_context); } /* * rpma_srq_get_rcq -- get the receive CQ from the shared RQ object */ int rpma_srq_get_rcq(const struct rpma_srq *srq, struct rpma_cq **rcq_ptr) { RPMA_DEBUG_TRACE; if (srq == NULL || rcq_ptr == NULL) return RPMA_E_INVAL; *rcq_ptr = srq->rcq; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } rpma-1.3.0/src/srq.h000066400000000000000000000006061443364775400142420ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Fujitsu */ /* * srq.h -- librpma shared-RQ-related internal definitions */ #ifndef LIBRPMA_SRQ_H #define LIBRPMA_SRQ_H #include #include "librpma.h" /* * ERRORS * rpma_srq_get_ibv_srq() cannot fail. */ struct ibv_srq *rpma_srq_get_ibv_srq(const struct rpma_srq *srq); #endif /* LIBRPMA_SRQ_H */ rpma-1.3.0/src/srq_cfg.c000066400000000000000000000104011443364775400150460ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* Copyright 2022, Intel Corporation */ /* * srq_cfg.c -- librpma shared-RQ-configuration-related implementations */ #include #include #ifdef ATOMIC_OPERATIONS_SUPPORTED #include #endif /* ATOMIC_OPERATIONS_SUPPORTED */ #include "common.h" #include "debug.h" #include "librpma.h" #ifdef TEST_MOCK_ALLOC #include "cmocka_alloc.h" #endif #define RPMA_DEFAULT_SRQ_SIZE 20 struct rpma_srq_cfg { #ifdef ATOMIC_OPERATIONS_SUPPORTED _Atomic uint32_t rq_size; _Atomic uint32_t rcq_size; #else uint32_t rq_size; uint32_t rcq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ }; static struct rpma_srq_cfg Srq_cfg_default = { .rq_size = RPMA_DEFAULT_SRQ_SIZE, .rcq_size = RPMA_DEFAULT_SRQ_SIZE }; /* internal librpma API */ /* * rpma_srq_cfg_default -- return pointer to default share RQ configuration * object */ struct rpma_srq_cfg * rpma_srq_cfg_default() { RPMA_DEBUG_TRACE; return &Srq_cfg_default; } /* * rpma_srq_cfg_get_rcqe -- ibv_create_cq(..., int cqe, ...) compatible variant * of rpma_srq_cfg_get_rcq_size(). Round down the rcq_size when it is too big * for storing into an int type of value. Convert otherwise. */ void rpma_srq_cfg_get_rcqe(const struct rpma_srq_cfg *cfg, int *rcqe) { RPMA_DEBUG_TRACE; uint32_t rcq_size = 0; (void) rpma_srq_cfg_get_rcq_size(cfg, &rcq_size); *rcqe = CLIP_TO_INT(rcq_size); } /* public librpma API */ /* * rpma_srq_cfg_new -- create a new shared RQ configuration */ int rpma_srq_cfg_new(struct rpma_srq_cfg **cfg_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg_ptr == NULL) return RPMA_E_INVAL; RPMA_FAULT_INJECTION(RPMA_E_NOMEM, {}); *cfg_ptr = malloc(sizeof(struct rpma_srq_cfg)); if (*cfg_ptr == NULL) return RPMA_E_NOMEM; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_init(&(*cfg_ptr)->rq_size, atomic_load_explicit(&Srq_cfg_default.rq_size, __ATOMIC_SEQ_CST)); atomic_init(&(*cfg_ptr)->rcq_size, atomic_load_explicit(&Srq_cfg_default.rcq_size, __ATOMIC_SEQ_CST)); #else memcpy(*cfg_ptr, &Srq_cfg_default, sizeof(struct rpma_srq_cfg)); #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_srq_cfg_delete -- delete the shared RQ configuration */ int rpma_srq_cfg_delete(struct rpma_srq_cfg **cfg_ptr) { RPMA_DEBUG_TRACE; if (cfg_ptr == NULL) return RPMA_E_INVAL; if (*cfg_ptr == NULL) return 0; free(*cfg_ptr); *cfg_ptr = NULL; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_srq_cfg_set_rq_size -- set shared RQ size */ int rpma_srq_cfg_set_rq_size(struct rpma_srq_cfg *cfg, uint32_t rq_size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&cfg->rq_size, rq_size, __ATOMIC_SEQ_CST); #else cfg->rq_size = rq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_srq_cfg_get_rq_size -- get shared RQ size */ int rpma_srq_cfg_get_rq_size(const struct rpma_srq_cfg *cfg, uint32_t *rq_size) { RPMA_DEBUG_TRACE; if (cfg == NULL || rq_size == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *rq_size = atomic_load_explicit((_Atomic uint32_t *)&cfg->rq_size, __ATOMIC_SEQ_CST); #else *rq_size = cfg->rq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } /* * rpma_srq_cfg_set_rcq_size -- set shared receive CQ size */ int rpma_srq_cfg_set_rcq_size(struct rpma_srq_cfg *cfg, uint32_t rcq_size) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (cfg == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED atomic_store_explicit(&cfg->rcq_size, rcq_size, __ATOMIC_SEQ_CST); #else cfg->rcq_size = rcq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ return 0; } /* * rpma_srq_cfg_get_rcq_size -- get shared receive CQ size */ int rpma_srq_cfg_get_rcq_size(const struct rpma_srq_cfg *cfg, uint32_t *rcq_size) { RPMA_DEBUG_TRACE; if (cfg == NULL || rcq_size == NULL) return RPMA_E_INVAL; #ifdef ATOMIC_OPERATIONS_SUPPORTED *rcq_size = atomic_load_explicit((_Atomic uint32_t *)&cfg->rcq_size, __ATOMIC_SEQ_CST); #else *rcq_size = cfg->rcq_size; #endif /* ATOMIC_OPERATIONS_SUPPORTED */ RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); return 0; } rpma-1.3.0/src/srq_cfg.h000066400000000000000000000010421443364775400150540ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Fujitsu */ /* * srq_cfg.h -- librpma shared-RQ-configuration-related internal definitions */ #ifndef LIBRPMA_SRQ_CFG_H #define LIBRPMA_SRQ_CFG_H #include "librpma.h" /* * ERRORS * rpma_srq_cfg_default() cannot fail. */ struct rpma_srq_cfg *rpma_srq_cfg_default(); /* * ERRORS * rpma_srq_cfg_get_rcqe() cannot fail. * * ASSUMPTIONS * cfg != NULL && rcqe != NULL */ void rpma_srq_cfg_get_rcqe(const struct rpma_srq_cfg *cfg, int *rcqe); #endif /* LIBRPMA_SRQ_CFG_H */ rpma-1.3.0/src/utils.c000066400000000000000000000122501443364775400145660ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2022-2023, Fujitsu Limited */ /* * utils.c -- generic helper functions for librpma */ #include #include "librpma.h" #include "debug.h" #include "log_internal.h" #include "info.h" #include "utils.h" /* internal librpma API */ /* * rpma_utils_ibv_context_is_atomic_write_capable -- query the extended device * context's capabilities and check if kernel supports native atomic write. */ int rpma_utils_ibv_context_is_atomic_write_capable(struct ibv_context *ibv_ctx, int *is_atomic_write_capable) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); *is_atomic_write_capable = 0; #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED /* query an RDMA device's attributes */ struct ibv_device_attr_ex attr = {{{0}}}; errno = ibv_query_device_ex(ibv_ctx, NULL /* input */, &attr); if (errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_query_device_ex(attr={0})"); return RPMA_E_PROVIDER; } /* check whether native atomic write is supported in kernel */ if (attr.device_cap_flags_ex & IB_UVERBS_DEVICE_ATOMIC_WRITE) *is_atomic_write_capable = 1; #endif return 0; } /* * rpma_utils_ibv_context_is_flush_capable -- query the extended device * context's capabilities and check if kernel supports both of native flush * types (Global visibility and Persistence). */ int rpma_utils_ibv_context_is_flush_capable(struct ibv_context *ibv_ctx, int *is_flush_capable) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); *is_flush_capable = 0; #ifdef NATIVE_FLUSH_SUPPORTED /* query an RDMA device's attributes */ struct ibv_device_attr_ex attr = {{{0}}}; errno = ibv_query_device_ex(ibv_ctx, NULL /* input */, &attr); if (errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_query_device_ex(attr={0})"); return RPMA_E_PROVIDER; } /* check whether both global visibility and persistence are supported in kernel */ if ((attr.device_cap_flags_ex & IB_UVERBS_DEVICE_FLUSH_GLOBAL) && (attr.device_cap_flags_ex & IB_UVERBS_DEVICE_FLUSH_PERSISTENT)) *is_flush_capable = 1; #endif return 0; } /* public librpma API */ /* * rpma_utils_get_ibv_context -- obtain an RDMA device context by IP address */ int rpma_utils_get_ibv_context(const char *addr, enum rpma_util_ibv_context_type type, struct ibv_context **ibv_ctx_ptr) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_INVAL, {}); if (addr == NULL || ibv_ctx_ptr == NULL) return RPMA_E_INVAL; enum rpma_info_side side; switch (type) { case RPMA_UTIL_IBV_CONTEXT_LOCAL: side = RPMA_INFO_PASSIVE; break; case RPMA_UTIL_IBV_CONTEXT_REMOTE: side = RPMA_INFO_ACTIVE; break; default: return RPMA_E_INVAL; } struct rpma_info *info; int ret = rpma_info_new(addr, NULL /* port */, side, &info); if (ret) return ret; struct rdma_cm_id *temp_id; RPMA_FAULT_INJECTION_GOTO(RPMA_E_PROVIDER, err_info_delete); ret = rdma_create_id(NULL, &temp_id, NULL, RDMA_PS_TCP); if (ret) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "rdma_create_id()"); ret = RPMA_E_PROVIDER; goto err_info_delete; } if (side == RPMA_INFO_PASSIVE) { ret = rpma_info_bind_addr(info, temp_id); if (ret) goto err_destroy_id; } else { ret = rpma_info_resolve_addr(info, temp_id, RPMA_DEFAULT_TIMEOUT_MS); if (ret) goto err_destroy_id; } /* obtain the device */ *ibv_ctx_ptr = temp_id->verbs; err_destroy_id: (void) rdma_destroy_id(temp_id); err_info_delete: (void) rpma_info_delete(&info); return ret; } /* * rpma_utils_ibv_context_is_odp_capable -- query the extended device context's capabilities and * check if it supports On-Demand Paging */ int rpma_utils_ibv_context_is_odp_capable(struct ibv_context *ibv_ctx, int *is_odp_capable) { RPMA_DEBUG_TRACE; RPMA_FAULT_INJECTION(RPMA_E_PROVIDER, {}); if (ibv_ctx == NULL || is_odp_capable == NULL) return RPMA_E_INVAL; *is_odp_capable = 0; #ifdef ON_DEMAND_PAGING_SUPPORTED /* query an RDMA device's attributes */ struct ibv_device_attr_ex attr = {{{0}}}; errno = ibv_query_device_ex(ibv_ctx, NULL /* input */, &attr); if (errno) { RPMA_LOG_ERROR_WITH_ERRNO(errno, "ibv_query_device_ex(attr={0})"); return RPMA_E_PROVIDER; } /* * Check whether On-Demand Paging is supported for all required types of operations. */ struct ibv_odp_caps *odp_caps = &attr.odp_caps; if (odp_caps->general_caps & IBV_ODP_SUPPORT) { /* flags for the Reliable Connected transport type */ uint32_t rc_odp_caps = odp_caps->per_transport_caps.rc_odp_caps; if ((rc_odp_caps & IBV_ODP_SUPPORT_WRITE) && (rc_odp_caps & IBV_ODP_SUPPORT_READ)) { *is_odp_capable = 1; } } #endif return 0; } /* * rpma_utils_conn_event_2str -- return const string representation of RPMA_CONN_* enums */ const char * rpma_utils_conn_event_2str(enum rpma_conn_event conn_event) { switch (conn_event) { case RPMA_CONN_UNDEFINED: return "Undefined connection event"; case RPMA_CONN_ESTABLISHED: return "Connection established"; case RPMA_CONN_CLOSED: return "Connection closed"; case RPMA_CONN_LOST: return "Connection lost"; case RPMA_CONN_REJECTED: return "Connection rejected"; case RPMA_CONN_UNREACHABLE: return "Connection unreachable"; default: return "Unsupported connection event"; } } rpma-1.3.0/src/utils.h000066400000000000000000000016371443364775400146020ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright (c) 2022-2023, Fujitsu Limited */ /* * utils.h -- the internal definitions of generic helper functions */ #ifndef LIBRPMA_UTILS_H #define LIBRPMA_UTILS_H #include "librpma.h" /* * ERRORS * rpma_utils_ibv_context_is_atomic_write_capable() can fail with the following error: * * - RPMA_E_PROVIDER - ibv_query_device_ex() failed * * ASSUMPTIONS * ibv_ctx != NULL && is_atomic_write_capable != NULL */ int rpma_utils_ibv_context_is_atomic_write_capable(struct ibv_context *ibv_ctx, int *is_atomic_write_capable); /* * ERRORS * rpma_utils_ibv_context_is_flush_capable() can fail with the following error: * * - RPMA_E_PROVIDER - ibv_query_device_ex() failed * * ASSUMPTIONS * ibv_ctx != NULL && is_flush_capable != NULL */ int rpma_utils_ibv_context_is_flush_capable(struct ibv_context *ibv_ctx, int *is_flush_capable); #endif /* LIBRPMA_UTILS_H */ rpma-1.3.0/src/valgrind/000077500000000000000000000000001443364775400150705ustar00rootroot00000000000000rpma-1.3.0/src/valgrind/valgrind_internal.h000066400000000000000000000217211443364775400207460ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2015-2020, Intel Corporation */ /* * valgrind_internal.h -- internal definitions for valgrind macros */ #ifndef RPMA_VALGRIND_INTERNAL_H #define RPMA_VALGRIND_INTERNAL_H 1 #if VG_PMEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_MEMCHECK_ENABLED || \ VG_DRD_ENABLED #define ANY_VG_TOOL_ENABLED 1 #else #define ANY_VG_TOOL_ENABLED 0 #endif #if ANY_VG_TOOL_ENABLED extern unsigned _On_valgrind; #define On_valgrind __builtin_expect(_On_valgrind, 0) #include "valgrind/valgrind.h" #else #define On_valgrind (0) #endif #if VG_HELGRIND_ENABLED #include "valgrind/helgrind.h" #endif #if VG_DRD_ENABLED #include "valgrind/drd.h" #endif #if VG_HELGRIND_ENABLED || VG_DRD_ENABLED #define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do {\ if (On_valgrind) \ ANNOTATE_HAPPENS_BEFORE((obj));\ } while (0) #define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do {\ if (On_valgrind) \ ANNOTATE_HAPPENS_AFTER((obj));\ } while (0) #define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\ if (On_valgrind) \ ANNOTATE_NEW_MEMORY((addr), (size));\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {\ if (On_valgrind) \ ANNOTATE_IGNORE_READS_BEGIN();\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_END() do {\ if (On_valgrind) \ ANNOTATE_IGNORE_READS_END();\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {\ if (On_valgrind) \ ANNOTATE_IGNORE_WRITES_BEGIN();\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {\ if (On_valgrind) \ ANNOTATE_IGNORE_WRITES_END();\ } while (0) /* Supported by both helgrind and drd. */ #define VALGRIND_HG_DRD_DISABLE_CHECKING(addr, size) do {\ if (On_valgrind) \ VALGRIND_HG_DISABLE_CHECKING((addr), (size));\ } while (0) #else #define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do { (void)(obj); } while (0) #define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do { (void)(obj); } while (0) #define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\ (void) (addr);\ (void) (size);\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {} while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_END() do {} while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {} while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {} while (0) #define VALGRIND_HG_DRD_DISABLE_CHECKING(addr, size) do {\ (void) (addr);\ (void) (size);\ } while (0) #endif #if VG_PMEMCHECK_ENABLED #include "valgrind/pmemcheck.h" #define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_REGISTER_PMEM_MAPPING((addr), (len));\ } while (0) #define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\ if (On_valgrind)\ VALGRIND_PMC_REGISTER_PMEM_FILE((desc), (base_addr), (size), \ (offset));\ } while (0) #define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_REMOVE_PMEM_MAPPING((addr), (len));\ } while (0) #define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_CHECK_IS_PMEM_MAPPING((addr), (len));\ } while (0) #define VALGRIND_PRINT_PMEM_MAPPINGS do {\ if (On_valgrind)\ VALGRIND_PMC_PRINT_PMEM_MAPPINGS;\ } while (0) #define VALGRIND_DO_FLUSH(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_DO_FLUSH((addr), (len));\ } while (0) #define VALGRIND_DO_FENCE do {\ if (On_valgrind)\ VALGRIND_PMC_DO_FENCE;\ } while (0) #define VALGRIND_DO_PERSIST(addr, len) do {\ if (On_valgrind) {\ VALGRIND_PMC_DO_FLUSH((addr), (len));\ VALGRIND_PMC_DO_FENCE;\ }\ } while (0) #define VALGRIND_SET_CLEAN(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_SET_CLEAN(addr, len);\ } while (0) #define VALGRIND_WRITE_STATS do {\ if (On_valgrind)\ VALGRIND_PMC_WRITE_STATS;\ } while (0) #define VALGRIND_EMIT_LOG(emit_log) do {\ if (On_valgrind)\ VALGRIND_PMC_EMIT_LOG((emit_log));\ } while (0) #define VALGRIND_START_TX do {\ if (On_valgrind)\ VALGRIND_PMC_START_TX;\ } while (0) #define VALGRIND_START_TX_N(txn) do {\ if (On_valgrind)\ VALGRIND_PMC_START_TX_N(txn);\ } while (0) #define VALGRIND_END_TX do {\ if (On_valgrind)\ VALGRIND_PMC_END_TX;\ } while (0) #define VALGRIND_END_TX_N(txn) do {\ if (On_valgrind)\ VALGRIND_PMC_END_TX_N(txn);\ } while (0) #define VALGRIND_ADD_TO_TX(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_ADD_TO_TX(addr, len);\ } while (0) #define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_ADD_TO_TX_N(txn, addr, len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_REMOVE_FROM_TX(addr, len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_REMOVE_FROM_TX_N(txn, addr, len);\ } while (0) #define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\ if (On_valgrind)\ VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(addr, len);\ } while (0) #else #define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\ (void) (desc);\ (void) (base_addr);\ (void) (size);\ (void) (offset);\ } while (0) #define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_PRINT_PMEM_MAPPINGS do {} while (0) #define VALGRIND_DO_FLUSH(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_DO_FENCE do {} while (0) #define VALGRIND_DO_PERSIST(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_SET_CLEAN(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_WRITE_STATS do {} while (0) #define VALGRIND_EMIT_LOG(emit_log) do {\ (void) (emit_log);\ } while (0) #define VALGRIND_START_TX do {} while (0) #define VALGRIND_START_TX_N(txn) do { (void) (txn); } while (0) #define VALGRIND_END_TX do {} while (0) #define VALGRIND_END_TX_N(txn) do {\ (void) (txn);\ } while (0) #define VALGRIND_ADD_TO_TX(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\ (void) (txn);\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\ (void) (txn);\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #endif #if VG_MEMCHECK_ENABLED #include "valgrind/memcheck.h" #define VALGRIND_DO_DISABLE_ERROR_REPORTING do {\ if (On_valgrind)\ VALGRIND_DISABLE_ERROR_REPORTING;\ } while (0) #define VALGRIND_DO_ENABLE_ERROR_REPORTING do {\ if (On_valgrind)\ VALGRIND_ENABLE_ERROR_REPORTING;\ } while (0) #define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed) do {\ if (On_valgrind)\ VALGRIND_CREATE_MEMPOOL(heap, rzB, is_zeroed);\ } while (0) #define VALGRIND_DO_DESTROY_MEMPOOL(heap) do {\ if (On_valgrind)\ VALGRIND_DESTROY_MEMPOOL(heap);\ } while (0) #define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size) do {\ if (On_valgrind)\ VALGRIND_MEMPOOL_ALLOC(heap, addr, size);\ } while (0) #define VALGRIND_DO_MEMPOOL_FREE(heap, addr) do {\ if (On_valgrind)\ VALGRIND_MEMPOOL_FREE(heap, addr);\ } while (0) #define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size) do {\ if (On_valgrind)\ VALGRIND_MEMPOOL_CHANGE(heap, addrA, addrB, size);\ } while (0) #define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len) do {\ if (On_valgrind)\ VALGRIND_MAKE_MEM_DEFINED(addr, len);\ } while (0) #define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len) do {\ if (On_valgrind)\ VALGRIND_MAKE_MEM_UNDEFINED(addr, len);\ } while (0) #define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len) do {\ if (On_valgrind)\ VALGRIND_MAKE_MEM_NOACCESS(addr, len);\ } while (0) #define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len) do {\ if (On_valgrind)\ VALGRIND_CHECK_MEM_IS_ADDRESSABLE(addr, len);\ } while (0) #else #define VALGRIND_DO_DISABLE_ERROR_REPORTING do {} while (0) #define VALGRIND_DO_ENABLE_ERROR_REPORTING do {} while (0) #define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed)\ do { (void) (heap); (void) (rzB); (void) (is_zeroed); } while (0) #define VALGRIND_DO_DESTROY_MEMPOOL(heap)\ do { (void) (heap); } while (0) #define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size)\ do { (void) (heap); (void) (addr); (void) (size); } while (0) #define VALGRIND_DO_MEMPOOL_FREE(heap, addr)\ do { (void) (heap); (void) (addr); } while (0) #define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size)\ do {\ (void) (heap); (void) (addrA); (void) (addrB); (void) (size);\ } while (0) #define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len)\ do { (void) (addr); (void) (len); } while (0) #define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len)\ do { (void) (addr); (void) (len); } while (0) #define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len)\ do { (void) (addr); (void) (len); } while (0) #define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len)\ do { (void) (addr); (void) (len); } while (0) #endif #endif rpma-1.3.0/tests/000077500000000000000000000000001443364775400136355ustar00rootroot00000000000000rpma-1.3.0/tests/CMakeLists.txt000066400000000000000000000045731443364775400164060ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2022, Intel Corporation # include(cmake/ctest_helpers.cmake) add_custom_target(tests) add_flag(-Wall) find_packages() add_cstyle(tests-all ${CMAKE_CURRENT_SOURCE_DIR}/*.[ch] ${CMAKE_CURRENT_SOURCE_DIR}/*/*/*.[ch]) add_check_whitespace(tests-all ${CMAKE_CURRENT_SOURCE_DIR}/*.[ch] ${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt ${CMAKE_CURRENT_SOURCE_DIR}/*/CMakeLists.txt ${CMAKE_CURRENT_SOURCE_DIR}/*/*/*.[ch] ${CMAKE_CURRENT_SOURCE_DIR}/*/*/CMakeLists.txt) if(TESTS_RDMA_CONNECTION) if("$ENV{RPMA_TESTING_IP}" STREQUAL "") message(WARNING "\nWARNING: The RPMA_TESTING_IP environment variable is not set! " "It should contain an IP address of a configured RDMA-capable network interface.\n" "There are possible two alternative actions:\n" "a) Run 'make config_softroce' to configure SoftRoCE" " and to get the IP of a SoftRoCE-configured network interface" " and set the RPMA_TESTING_IP environment variable to silent this message.\n" "or:\n" "b) Set the TESTS_RDMA_CONNECTION cmake variable to OFF in order to disable tests" " that require a configured RDMA-capable network interface.") elseif("$ENV{RPMA_TESTING_IP}" STREQUAL "127.0.0.1") message(FATAL_ERROR "RPMA_TESTING_IP cannot be set to '127.0.0.1' " "(use an IP address of a configured RDMA-capable network interface instead)") else() message(STATUS "IP address used as a configured RDMA-capable network interface " "(defined by the RPMA_TESTING_IP environment variable): $ENV{RPMA_TESTING_IP}") endif() else() message(STATUS "NOTICE: all multi-threaded and integration tests will be disabled " "because TESTS_RDMA_CONNECTION is OFF" "(they require a configured RDMA-capable network interface)") endif() # set NPROC to number of processing units available execute_process(COMMAND nproc OUTPUT_VARIABLE NPROC WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_QUIET) message(STATUS "Number of processing units available: ${NPROC}") math(EXPR NPROCdiv2 "${NPROC} / 2") # We assume 16 threads is maximum for drd and helgrind if(NPROCdiv2 LESS 16) set(MAX_THREADS ${NPROCdiv2}) else() set(MAX_THREADS 16) endif() message(STATUS "Maximum number of threads for drd and helgrind: ${MAX_THREADS}") add_subdirectory(unit) if(TESTS_RDMA_CONNECTION AND VALGRIND_FOUND) add_subdirectory(multithreaded) endif() rpma-1.3.0/tests/cmake/000077500000000000000000000000001443364775400147155ustar00rootroot00000000000000rpma-1.3.0/tests/cmake/ctest_helpers.cmake000066400000000000000000000152571443364775400205750ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2022, Intel Corporation # # # ctest_helpers.cmake - helper functions for tests/CMakeLists.txt # set(TEST_ROOT_DIR ${PROJECT_SOURCE_DIR}/tests) set(TEST_UNIT_COMMON_DIR ${TEST_ROOT_DIR}/unit/common) set(TEST_MT_COMMON_DIR ${TEST_ROOT_DIR}/multithreaded/common) set(GLOBAL_TEST_ARGS -DPERL_EXECUTABLE=${PERL_EXECUTABLE} -DMATCH_SCRIPT=${PROJECT_SOURCE_DIR}/tests/match -DPARENT_DIR=${TEST_DIR} -DTESTS_USE_FORCED_PMEM=${TESTS_USE_FORCED_PMEM} -DTEST_ROOT_DIR=${TEST_ROOT_DIR}) if(TESTS_VERBOSE_OUTPUT) set(GLOBAL_TEST_ARGS ${GLOBAL_TEST_ARGS} --trace-expand) endif() set(INCLUDE_DIRS .. .) include_directories(${INCLUDE_DIRS}) link_directories(${LIBS_DIRS}) function(find_gdb) execute_process(COMMAND gdb --help RESULT_VARIABLE GDB_RET OUTPUT_QUIET ERROR_QUIET) if(GDB_RET) set(GDB_FOUND 0 CACHE INTERNAL "") message(WARNING "GDB NOT found, some tests will be skipped") else() set(GDB_FOUND 1 CACHE INTERNAL "") endif() endfunction() function(find_packages) pkg_check_modules(CMOCKA REQUIRED cmocka) if(NOT CMOCKA_FOUND) message(FATAL_ERROR "Cmocka not found. Cmocka is required to run tests.") endif() endfunction() # Function to build test with custom build options (e.g. passing defines) # Example: build_test_ext(NAME ... SRC_FILES ....c BUILD_OPTIONS -D...) function(build_test_lib_ext) set(oneValueArgs NAME) set(multiValueArgs SRC_FILES BUILD_OPTIONS) cmake_parse_arguments(TEST "" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) build_test_lib(${TEST_NAME} ${TEST_SRC_FILES}) target_compile_definitions(${TEST_NAME} PRIVATE ${TEST_BUILD_OPTIONS}) endfunction() function(build_test_lib name) set(srcs ${ARGN}) prepend(srcs ${CMAKE_CURRENT_SOURCE_DIR} ${srcs}) add_executable(${name} ${srcs}) target_link_libraries(${name} rpma cmocka) target_include_directories(${name} PRIVATE ${LIBRPMA_INCLUDE_DIRS}) add_dependencies(tests ${name}) endfunction() function(build_test_src) set(options UNIT) set(oneValueArgs NAME) set(multiValueArgs SRCS) cmake_parse_arguments(TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${TEST_NAME} ${TEST_SRCS}) target_include_directories(${TEST_NAME} PRIVATE ${LIBRPMA_INCLUDE_DIRS} ${LIBRPMA_SOURCE_DIR}) if(TEST_UNIT) target_include_directories(${TEST_NAME} PRIVATE ${TEST_UNIT_COMMON_DIR}) endif() # do not link with the rpma library target_link_libraries(${TEST_NAME} cmocka) add_dependencies(tests ${TEST_NAME}) endfunction() set(vg_tracers memcheck helgrind drd pmemcheck) # Configures testcase ${name} ${testcase} using tracer ${tracer}, cmake_script is used to run test function(add_testcase name tracer testcase cmake_script) set(executable ${name}) add_test(NAME ${executable}_${testcase}_${tracer} COMMAND ${CMAKE_COMMAND} ${GLOBAL_TEST_ARGS} -DTEST_NAME=${executable}_${testcase}_${tracer} -DTESTCASE=${testcase} -DSRC_DIR=${CMAKE_CURRENT_SOURCE_DIR} -DBIN_DIR=${CMAKE_CURRENT_BINARY_DIR}/${executable}_${testcase}_${tracer} -DTEST_EXECUTABLE=$ -DTRACER=${tracer} -DLONG_TESTS=${LONG_TESTS} -DMAX_THREADS=${MAX_THREADS} -DVALGRIND_S_OPTION=${VALGRIND_S_OPTION} -P ${cmake_script}) set_tests_properties(${name}_${testcase}_${tracer} PROPERTIES ENVIRONMENT "LC_ALL=C;PATH=$ENV{PATH};" FAIL_REGULAR_EXPRESSION Sanitizer) if (${tracer} STREQUAL pmemcheck) # XXX: if we use FATAL_ERROR in test.cmake - pmemcheck passes anyway set_tests_properties(${name}_${testcase}_${tracer} PROPERTIES FAIL_REGULAR_EXPRESSION "CMake Error") endif() endfunction() function(skip_test name message) add_test(NAME ${name}_${message} COMMAND ${CMAKE_COMMAND} -P ${TEST_ROOT_DIR}/true.cmake) set_tests_properties(${name}_${message} PROPERTIES COST 0) endfunction() # adds testcase with name, tracer, and cmake_script responsible for running it function(add_test_common name tracer testcase cmake_script) if(${tracer} STREQUAL "") set(tracer none) endif() if (${tracer} IN_LIST vg_tracers) if (NOT VALGRIND_FOUND) skip_test(${name}_${testcase}_${tracer} "SKIPPED_BECAUSE_OF_MISSING_VALGRIND") return() endif() if (DEBUG_USE_ASAN OR DEBUG_USE_UBSAN) skip_test(${name}_${testcase}_${tracer} "SKIPPED_BECAUSE_SANITIZER_USED") return() endif() endif() if (${tracer} STREQUAL "pmemcheck" AND NOT VALGRIND_PMEMCHECK_FOUND) skip_test(${name}_${testcase}_${tracer} "SKIPPED_BECAUSE_OF_MISSING_PMEMCHECK") return() endif() # if test was not build if (NOT TARGET ${name}) return() endif() add_testcase(${name} ${tracer} ${testcase} ${cmake_script}) endfunction() function(add_test_generic) set(options GROUP_SCRIPT) set(oneValueArgs NAME SCRIPT CASE) set(multiValueArgs TRACERS) cmake_parse_arguments(TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) if("${TEST_CASE}" STREQUAL "") set(STR_TEST_CASE "") else() set(STR_TEST_CASE "_${TEST_CASE}") endif() if(NOT "${TEST_SCRIPT}" STREQUAL "") # SCRIPT is set set(cmake_script ${CMAKE_CURRENT_SOURCE_DIR}/${TEST_SCRIPT}) elseif(TEST_GROUP_SCRIPT) # GROUP_SCRIPT is set set(cmake_script ${CMAKE_CURRENT_SOURCE_DIR}/../run_group${STR_TEST_CASE}.cmake) elseif(NOT "${TEST_CASE}" STREQUAL "") # CASE is set set(cmake_script ${CMAKE_CURRENT_SOURCE_DIR}/${TEST_NAME}${STR_TEST_CASE}.cmake) else() # none of: SCRIPT nor GROUP_SCRIPT nor CASE is set set(cmake_script ${CMAKE_CURRENT_SOURCE_DIR}/../../cmake/run_default.cmake) endif() if("${TEST_CASE}" STREQUAL "") set(TEST_CASE "0") # TEST_CASE is required by add_test_common() endif() if("${TEST_TRACERS}" STREQUAL "") add_test_common(${TEST_NAME} none ${TEST_CASE} ${cmake_script}) else() foreach(tracer ${TEST_TRACERS}) add_test_common(${TEST_NAME} ${tracer} ${TEST_CASE} ${cmake_script}) endforeach() endif() endfunction() function(add_multithreaded) set(options USE_LIBIBVERBS) set(oneValueArgs NAME BIN) set(multiValueArgs SRCS) cmake_parse_arguments(MULTITHREADED "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) set(target mtt-${MULTITHREADED_NAME}-${MULTITHREADED_BIN}) prepend(srcs ${CMAKE_CURRENT_SOURCE_DIR} ${srcs}) add_executable(${target} ${TEST_MT_COMMON_DIR}/mtt.c ${MULTITHREADED_SRCS}) target_include_directories(${target} PRIVATE ${TEST_MT_COMMON_DIR} ${LIBRPMA_INCLUDE_DIRS}) set_target_properties(${target} PROPERTIES OUTPUT_NAME ${MULTITHREADED_BIN}) target_link_libraries(${target} ${LIBRPMA_LIBRARIES} pthread) if(MULTITHREADED_USE_LIBIBVERBS) target_include_directories(${target} PRIVATE ${LIBIBVERBS_INCLUDE_DIRS}) target_link_libraries(${target} ${LIBIBVERBS_LIBRARIES}) endif() add_test_generic(NAME ${target} GROUP_SCRIPT TRACERS none memcheck drd helgrind) endfunction() rpma-1.3.0/tests/cmake/helpers.cmake000066400000000000000000000220121443364775400173560ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2022, Intel Corporation # set(DIR ${PARENT_DIR}/${TEST_NAME}) function(setup) execute_process(COMMAND ${CMAKE_COMMAND} -E remove_directory ${PARENT_DIR}/${TEST_NAME}) execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${PARENT_DIR}/${TEST_NAME}) execute_process(COMMAND ${CMAKE_COMMAND} -E remove_directory ${BIN_DIR}) execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${BIN_DIR}) endfunction() function(print_logs) message(STATUS "Test ${TEST_NAME}:") if(EXISTS ${BIN_DIR}/${TEST_NAME}.out) file(READ ${BIN_DIR}/${TEST_NAME}.out OUT) message(STATUS "Stdout:\n${OUT}") endif() if(EXISTS ${BIN_DIR}/${TEST_NAME}.err) file(READ ${BIN_DIR}/${TEST_NAME}.err ERR) message(STATUS "Stderr:\n${ERR}") endif() endfunction() # Performs cleanup and log matching. function(finish) print_logs() if(EXISTS ${SRC_DIR}/${TEST_NAME}.err.match) match(${BIN_DIR}/${TEST_NAME}.err ${SRC_DIR}/${TEST_NAME}.err.match) endif() if(EXISTS ${SRC_DIR}/${TEST_NAME}.out.match) match(${BIN_DIR}/${TEST_NAME}.out ${SRC_DIR}/${TEST_NAME}.out.match) endif() execute_process(COMMAND ${CMAKE_COMMAND} -E remove_directory ${PARENT_DIR}/${TEST_NAME}) endfunction() # Verifies ${log_file} matches ${match_file} using "match". function(match log_file match_file) execute_process(COMMAND ${PERL_EXECUTABLE} ${MATCH_SCRIPT} -o ${log_file} ${match_file} RESULT_VARIABLE MATCH_ERROR) if(MATCH_ERROR) message(FATAL_ERROR "Log does not match: ${MATCH_ERROR}") endif() endfunction() # Verifies file exists function(check_file_exists file) if(NOT EXISTS ${file}) message(FATAL_ERROR "${file} doesn't exist") endif() endfunction() # Verifies file doesn't exist function(check_file_doesnt_exist file) if(EXISTS ${file}) message(FATAL_ERROR "${file} exists") endif() endfunction() # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=810295 # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=780173 # https://bugs.kde.org/show_bug.cgi?id=303877 # # valgrind issues an unsuppressable warning when exceeding # the brk segment, causing matching failures. We can safely # ignore it because malloc() will fallback to mmap() anyway. # # list of ignored warnings should match the list provided by PMDK: # https://github.com/pmem/pmdk/blob/main/src/test/unittest/unittest.sh function(valgrind_ignore_warnings valgrind_log) execute_process(COMMAND bash "-c" "cat ${valgrind_log} | grep -v \ -e \"WARNING: Serious error when reading debug info\" \ -e \"When reading debug info from \" \ -e \"Ignoring non-Dwarf2/3/4 block in .debug_info\" \ -e \"Last block truncated in .debug_info; ignoring\" \ -e \"parse_CU_Header: is neither DWARF2 nor DWARF3 nor DWARF4\" \ -e \"brk segment overflow\" \ -e \"see section Limitations in user manual\" \ -e \"Warning: set address range perms: large range\"\ -e \"further instances of this message will not be shown\"\ > ${valgrind_log}.tmp mv ${valgrind_log}.tmp ${valgrind_log}") endfunction() function(execute_common expect_success output_file name) if(TESTS_USE_FORCED_PMEM) set(ENV{PMEM_IS_PMEM_FORCE} 1) endif() if(${TRACER} STREQUAL pmemcheck) if(TESTS_USE_FORCED_PMEM) # pmemcheck runs really slow with pmem, disable it set(ENV{PMEM_IS_PMEM_FORCE} 0) endif() set(TRACE valgrind --error-exitcode=99 --tool=pmemcheck) set(ENV{LIBRPMA_TRACER_PMEMCHECK} 1) elseif(${TRACER} STREQUAL memcheck) set(TRACE valgrind --error-exitcode=99 --tool=memcheck --leak-check=full ${VALGRIND_S_OPTION} --gen-suppressions=all --suppressions=${TEST_ROOT_DIR}/memcheck-libibverbs-librdmacm.supp) set(ENV{LIBRPMA_TRACER_MEMCHECK} 1) elseif(${TRACER} STREQUAL helgrind) set(TRACE valgrind --error-exitcode=99 --tool=helgrind ${VALGRIND_S_OPTION} --gen-suppressions=all --suppressions=${TEST_ROOT_DIR}/helgrind.supp) set(ENV{LIBRPMA_TRACER_HELGRIND} 1) elseif(${TRACER} STREQUAL drd) set(TRACE valgrind --error-exitcode=99 --tool=drd ${VALGRIND_S_OPTION} --gen-suppressions=all --suppressions=${TEST_ROOT_DIR}/drd.supp) set(ENV{LIBRPMA_TRACER_DRD} 1) elseif(${TRACER} STREQUAL gdb) set(TRACE gdb --batch --command=${GDB_BATCH_FILE} --args) set(ENV{LIBRPMA_TRACER_GDB} 1) elseif(${TRACER} MATCHES "none.*") # nothing else() message(FATAL_ERROR "Unknown tracer '${TRACER}'") endif() if (NOT $ENV{CGDB}) set(TRACE timeout -s SIGALRM -k 200s 180s ${TRACE}) endif() string(REPLACE ";" " " TRACE_STR "${TRACE}") message(STATUS "Executing: ${TRACE_STR} ${name} ${ARGN}") set(cmd ${TRACE} ${name} ${ARGN}) if($ENV{CGDB}) find_program(KONSOLE NAMES konsole) find_program(GNOME_TERMINAL NAMES gnome-terminal) find_program(CGDB NAMES cgdb) if (NOT KONSOLE AND NOT GNOME_TERMINAL) message(FATAL_ERROR "konsole or gnome-terminal not found.") elseif (NOT CGDB) message(FATAL_ERROR "cdgb not found.") elseif(NOT (${TRACER} STREQUAL none)) message(FATAL_ERROR "Cannot use cgdb with ${TRACER}") else() if (KONSOLE) set(cmd konsole -e cgdb --args ${cmd}) elseif(GNOME_TERMINAL) set(cmd gnome-terminal --tab --active --wait -- cgdb --args ${cmd}) endif() endif() endif() if(${output_file} STREQUAL none) execute_process(COMMAND ${cmd} OUTPUT_QUIET RESULT_VARIABLE res) else() execute_process(COMMAND ${cmd} RESULT_VARIABLE res OUTPUT_FILE ${BIN_DIR}/${TEST_NAME}.out ERROR_FILE ${BIN_DIR}/${TEST_NAME}.err) endif() print_logs() # memcheck and pmemcheck match files should follow name pattern: # testname_testcasenr_memcheck/pmemcheck.err.match # If they do exist, ignore test result - it will be verified during # log matching in finish() function. if(EXISTS ${SRC_DIR}/${TEST_NAME}.err.match) valgrind_ignore_warnings(${BIN_DIR}/${TEST_NAME}.err) # pmemcheck is a special snowflake and it doesn't set exit code when # it detects an error, so we have to look at its output if match file # was not found. else() if(${TRACER} STREQUAL pmemcheck) if(NOT EXISTS ${BIN_DIR}/${TEST_NAME}.err) message(FATAL_ERROR "${TEST_NAME}.err not found.") endif() file(READ ${BIN_DIR}/${TEST_NAME}.err PMEMCHECK_ERR) message(STATUS "Stderr:\n${PMEMCHECK_ERR}\nEnd of stderr") if(NOT PMEMCHECK_ERR MATCHES "ERROR SUMMARY: 0") message(FATAL_ERROR "${TRACE} ${name} ${ARGN} failed: ${res}") endif() endif() if(res AND expect_success) message(FATAL_ERROR "${TRACE} ${name} ${ARGN} failed: ${res}") endif() if(NOT res AND NOT expect_success) message(FATAL_ERROR "${TRACE} ${name} ${ARGN} unexpectedly succeeded: ${res}") endif() endif() if(${TRACER} STREQUAL pmemcheck) unset(ENV{LIBRPMA_TRACER_PMEMCHECK}) elseif(${TRACER} STREQUAL memcheck) unset(ENV{LIBRPMA_TRACER_MEMCHECK}) elseif(${TRACER} STREQUAL helgrind) unset(ENV{LIBRPMA_TRACER_HELGRIND}) elseif(${TRACER} STREQUAL drd) unset(ENV{LIBRPMA_TRACER_DRD}) elseif(${TRACER} STREQUAL gdb) unset(ENV{LIBRPMA_TRACER_GDB}) endif() if(TESTS_USE_FORCED_PMEM) unset(ENV{PMEM_IS_PMEM_FORCE}) endif() endfunction() function(check_target name) if(NOT EXISTS ${name}) message(FATAL_ERROR "Tests were not found! If not built, run make first.") endif() endfunction() # Generic command executor which handles failures and prints command output # to specified file. function(execute_with_output out name) check_target(${name}) execute_common(true ${out} ${name} ${ARGN}) endfunction() # Generic command executor which handles failures but ignores output. function(execute_ignore_output name) check_target(${name}) execute_common(true none ${name} ${ARGN}) endfunction() # Executes test command ${name} and verifies its status. # First argument of the command is test directory name. # Optional function arguments are passed as consecutive arguments to # the command. function(execute name) check_target(${name}) execute_common(true ${TRACER}_${TESTCASE} ${name} ${ARGN}) endfunction() # Executes test command ${name} under GDB. # First argument of the command is a gdb batch file. # Second argument of the command is the test command. # Optional function arguments are passed as consecutive arguments to # the command. function(crash_with_gdb gdb_batch_file name) check_target(${name}) set(PREV_TRACER ${TRACER}) set(TRACER gdb) set(GDB_BATCH_FILE ${gdb_batch_file}) execute_common(true ${TRACER}_${TESTCASE} ${name} ${ARGN}) set(TRACER ${PREV_TRACER}) endfunction() rpma-1.3.0/tests/cmake/run_default.cmake000066400000000000000000000002571443364775400202330ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # include(${SRC_DIR}/../../cmake/helpers.cmake) setup() execute(${TEST_EXECUTABLE}) finish() rpma-1.3.0/tests/cmocka_headers.h000066400000000000000000000005041443364775400167350ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * cmocka_headers.h -- include all headers required by cmocka */ #ifndef LIBRPMA_CMOCKA_H #define LIBRPMA_CMOCKA_H 1 #include #include #include #include #endif /* LIBRPMA_CMOCKA_H */ rpma-1.3.0/tests/drd.supp000066400000000000000000000244201443364775400153210ustar00rootroot00000000000000# # The following suppressions are suited for debug build # of librpma on Ubuntu 22.04 used in the CircleCI. # They may not work for any other OS, OS version, # rdma-core version and for the release build. # # # All suppressions should follow the following format: # # Assessment: (XXX - to mark assessment that is not done yet) # # This suppression indicates a lack of MT safety. # # OS: a version of OS # OFED: a version of OFED if used # libibverbs: a version of libibverbs # librdmacm: a version of librdmacm # rdma-core: a version of the 'rdma-core' package if installed # # Occurs in traces of: # - a function name of public API of librpma # # There are 3 types of suppressions: # # 1) general one (on an external API) # { # Conflicting store of size 4 # drd:ConflictingAccess # ... # fun:ibv_dontfork_range # ... # } # # 2) explicit librpma API call # { # Conflicting store of size 4 # drd:ConflictingAccess # ... # fun:rpma_conn_cfg_set_timeout # ... # } # # 3) detailed external one (on an external API) # { # Conflicting store of size 4 # drd:ConflictingAccess # ... # fun:funA # fun:funB # fun:funC # fun:function_call_used directly form_public_API # ... # } # # # Assessment: this suppression indicates a lack of MT safety. # # syslog(3) has the "MT-Safe env locale" attributes(7) # and therefore it can be considered as not MT-safe by valgrind. # This issue can cause that logs of syslog(3) can be corrupted, # but it does not affect the MT-safety of the librpma library. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # It can occur in traces of all functions of librpma API. # { syslog(): race while reading the name of time zone drd:ConflictingAccess ... fun:__vsyslog_internal fun:*syslog* fun:rpma_log_default_function ... } # # Assessment: these 2 suppressions indicate a lack of MT safety. # # rpma_log_default_function() calls rpma_get_timestamp_prefix() # which calls localtime_r(3). # localtime_r(3) has the "MT-Safe env locale" attributes(7) # and therefore it is considered as not MT-safe by valgrind. # This issue can cause that logs of rpma_log_default_function() can be corrupted, # but it does not affect the MT-safety of the librpma library. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # They can occur in traces of all functions of librpma API. # { Race while reading the name of time zone ("GMT"). drd:ConflictingAccess ... fun:__tz_convert fun:rpma_get_timestamp_prefix fun:rpma_log_default_function ... } { Race while reading the name of time zone ("GMT") - non-existing code path (unknown Valgrind issue on CircleCI) drd:ConflictingAccess ... fun:__tz_convert fun:rpma_log_default_function ... } # # Assessment: this suppression does not indicate a lack of MT safety. # # cma_dev_cnt was a global counter of elements populating the global array of # devices detected by librdmacm. It was used as an indicator of already done # initialization. It was checked before locking a mutex required to make any # changes to the global array and changing the counter itself as follows: # # static int cma_dev_cnt; # ... # int ucma_init(void) # { # if (cma_dev_cnt) # return 0; # # pthread_mutex_lock(&mut); # if (cma_dev_cnt) { # pthread_mutex_unlock(&mut); # return 0; # } # ... # cma_dev_cnt = dev_cnt; # ... # pthread_mutex_unlock(&mut); # ... # } # # But having a race, in this case, should do no harm since the counter is also # checked after locking the mutex. So, even writing a new value to the counter # even if it will be torn and read partially by another thread it won't lead # to abnormal behaviour. # # Note: This issue is no longer the case for rdma-core >= v30.0 since there # instead of a global array is a global list. But the logic seems to be # the same: a quick check (a comparison) followed by the mutex lock and # recheck of the condition so the final assessment should be the same. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { Conflicting store/load of size 4 drd:ConflictingAccess ... fun:ucma_init ... fun:rdma_getaddrinfo ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # - rpma_ep_listen # { Conflicting store of size 8 drd:ConflictingAccess fun:idm_set fun:ucma_insert_id fun:rdma_create_id2.part.0 ... } # # Assessment: these 2 suppressions indicate a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # - rpma_ep_shutdown # { Conflicting load of size 8 drd:ConflictingAccess fun:idm_clear fun:ucma_remove_id fun:ucma_free_id fun:rdma_destroy_id ... } { Conflicting load of size 8 drd:ConflictingAccess fun:UnknownInlinedFun fun:UnknownInlinedFun fun:ucma_free_id fun:rdma_destroy_id ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_conn_req_delete # { rpma_conn_req_delete drd:ConflictingAccess obj:*librdmacm.so* fun:rdma_destroy_id ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_mr_reg # - rpma_conn_req_new # { Conflicting store of size 4 drd:ConflictingAccess fun:ibv_dontfork_range ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_mr_dereg # - rpma_conn_req_delete # { Conflicting store of size 4 drd:ConflictingAccess fun:ibv_dofork_range ... } # # Assessment: this suppression does NOT indicate a lack of MT safety. # It is a SoftRoCE specific mutex error (Mutex not locked). # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_conn_req_new # - rpma_ep_next_conn_req # { SoftRoCE specific mutex error (Mutex not locked) drd:MutexErr fun:pthread_spin_init@* fun:rxe_create_qp fun:UnknownInlinedFun fun:rdma_create_qp_ex ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: Ubuntu 22.04 # OFED: MLNX_OFED 5.6-2.0.9.0 # libibverbs: 1.14.40.0 # librdmacm: 1.3.40.0 # # Occurs in traces of: # - rpma_conn_req_new # - rpma_ep_listen # { Conflicting store of size 8 drd:ConflictingAccess fun:UnknownInlinedFun fun:UnknownInlinedFun fun:rdma_create_id2.part.0 ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_conn_req_new # - rpma_cq_wait # { Conflicting store of size 8 drd:ConflictingAccess ... fun:ibv_req_notify_cq ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_conn_req_new # { Conflicting Access drd:ConflictingAccess obj:*librdmacm.so* fun:rpma_conn_req_new ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_ep_listen # { Conflicting Access drd:ConflictingAccess obj:*librdmacm.so* fun:rpma_ep_listen ... } # # Assessment: these 5 suppressions indicate a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # They all occur in traces of: # - rpma_utils_get_ibv_context # { Conflicting Access drd:ConflictingAccess ... obj:*librdmacm.so* fun:rdma_getaddrinfo fun:rpma_info_new fun:rpma_utils_get_ibv_context ... } { Conflicting Access drd:ConflictingAccess ... fun:rdma_create_event_channel obj:*librdmacm.so* fun:rpma_utils_get_ibv_context ... } { Conflicting Access drd:ConflictingAccess obj:*librdmacm.so* fun:rdma_create_id fun:rpma_utils_get_ibv_context ... } { Conflicting Access drd:ConflictingAccess obj:*librdmacm.so* fun:rpma_utils_get_ibv_context ... } { Conflicting Access drd:ConflictingAccess fun:rdma_bind_addr fun:rpma_info_bind_addr fun:rpma_utils_get_ibv_context ... } # # Assessment: this suppression indicates a lack of MT safety when SoftRoCE is used. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs - 1.14.39.0 # librdmacm - 1.3.39.0 # # Occurs in traces of: # - rpma_conn_req_new # - rpma_ep_next_conn_req # { General suppression related to SoftRoCE issue with mutex drd:MutexErr ... obj:*librxe-rdmav34.so* ... fun:rdma_create_qp_ex fun:rpma_peer_setup_qp ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_srq_delete # { Conflicting Access drd:ConflictingAccess ... fun:rpma_srq_delete ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_srq_new # { Conflicting Access drd:ConflictingAccess ... fun:rpma_srq_new ... } # # Assessment: these 2 suppressions indicate a lack of MT safety when SoftRoCE is used. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs - 1.14.39.0 # librdmacm - 1.3.39.0 # # Occurs in traces of: # - rpma_srq_new # { Conditional Error drd:CondErr ... fun:ibv_create_srq* fun:rpma_peer_create_srq fun:rpma_srq_new ... } { General suppression related to SoftRoCE issue with mutex drd:MutexErr ... fun:pthread_mutex_init@* ... fun:rpma_srq_new ... } rpma-1.3.0/tests/helgrind.supp000066400000000000000000000216151443364775400163470ustar00rootroot00000000000000# # The following suppressions are suited for debug build # of librpma on Ubuntu 22.04 used in the CircleCI. # They may not work for any other OS, OS version, # rdma-core version and for the release build. # # # All suppressions should follow the following format: # # Assessment: (XXX - to mark assessment that is not done yet) # # This suppression indicates a lack of MT safety. # # OS: a version of OS # OFED: a version of OFED if used # libibverbs: a version of libibverbs # librdmacm: a version of librdmacm # rdma-core: a version of the 'rdma-core' package if installed # # Occurs in traces of: # - a function name of public API of librpma # # There are 3 types of suppressions: # # 1) general one (on an external API) # { # Possible data race during write of size 4 # Helgrind:Race # ... # fun:ibv_dontfork_range # ... # } # # 2) explicit librpma API call # { # Possible data race during write of size 4 # Helgrind:Race # ... # fun:rpma_conn_cfg_set_timeout # ... # } # # 3) detailed external one (on an external API) # { # Possible data race during write of size 4 # Helgrind:Race # ... # fun:funA # fun:funB # fun:funC # fun:function_call_used directly form_public_API # ... # } # # # Assessment: this suppression indicates a lack of MT safety. # # syslog(3) has the "MT-Safe env locale" attributes(7) # and therefore it can be considered as not MT-safe by valgrind. # This issue can cause that logs of syslog(3) can be corrupted, # but it does not affect the MT-safety of the librpma library. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # It can occur in traces of all functions of librpma API. # { syslog(): race while reading the name of time zone Helgrind:Race ... fun:__vsyslog_internal fun:*syslog* fun:rpma_log_default_function ... } # # Assessment: these 2 suppressions indicate a lack of MT safety. # # rpma_log_default_function() calls rpma_get_timestamp_prefix() # which calls localtime_r(3). # localtime_r(3) has the "MT-Safe env locale" attributes(7) # and therefore it is considered as not MT-safe by valgrind. # This issue can cause that logs of rpma_log_default_function() can be corrupted, # but it does not affect the MT-safety of the librpma library. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # They can occur in traces of all functions of librpma API. # { Race while reading the name of time zone ("GMT"). Helgrind:Race ... fun:__tz_convert fun:rpma_get_timestamp_prefix fun:rpma_log_default_function ... } { Race while reading the name of time zone ("GMT") - non-existing code path (unknown Valgrind issue on CircleCI) Helgrind:Race ... fun:__tz_convert fun:rpma_log_default_function ... } # # Assessment: this suppression does not indicate a lack of MT safety. # # For details please see drd.supp. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { Possible data race during write of size 4 Helgrind:Race ... fun:ucma_init ... fun:rdma_getaddrinfo ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # - rpma_ep_listen { Possible data race during read of size 8 Helgrind:Race # fun:idm_set or fun:UnknownInlinedFun ... fun:ucma_insert_id fun:rdma_create_id2.part.0 ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # - rpma_ep_shutdown # - rpma_conn_req_delete { Possible data race during read of size 8 Helgrind:Race fun:idm_clear fun:ucma_remove_id fun:ucma_free_id fun:rdma_destroy_id ... } { Possible data race during read of size 8 Helgrind:Race fun:UnknownInlinedFun fun:UnknownInlinedFun fun:ucma_free_id fun:rdma_destroy_id ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_conn_req_delete # { rpma_conn_req_delete Helgrind:Race obj:*librdmacm.so* fun:rdma_destroy_id ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { Possible data race during read of size 8 Helgrind:Race # fun:open_cdev or none ... fun:rdma_create_event_channel fun:ucma_alloc_id fun:rdma_create_id2.part.0 ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { Possible data race during read of size 4 Helgrind:Race # fun:ucma_get_device or none ... fun:rdma_bind_addr fun:rpma_info_bind_addr fun:rpma_utils_get_ibv_context ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_mr_reg # - rpma_conn_req_new # { Possible data race during read of size 4 Helgrind:Race fun:ibv_dontfork_range ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_mr_dereg # - rpma_conn_req_delete # { Possible data race during read of size 4 Helgrind:Race fun:ibv_dofork_range ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS: Ubuntu 22.04 # OFED: MLNX_OFED 5.6-2.0.9.0 # libibverbs: 1.14.40.0 # librdmacm: 1.3.40.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # - rpma_conn_req_new # - rpma_ep_listen # { Possible data race during write of size 8 Helgrind:Race fun:UnknownInlinedFun fun:UnknownInlinedFun fun:rdma_create_id2.part.0 ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_conn_req_new # - rpma_cq_wait # { Possible data race during write of size 8 Helgrind:Race ... fun:ibv_req_notify_cq ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_conn_req_new # { Possible data race Helgrind:Race obj:*librdmacm.so* fun:rpma_conn_req_new ... } # # Assessment: this suppression indicates a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # Occurs in traces of: # - rpma_ep_listen # { Possible data race Helgrind:Race obj:*librdmacm.so* fun:rpma_ep_listen ... } # # Assessment: these 6 suppressions indicate a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # They occur in traces of: # - rpma_utils_get_ibv_context # { Possible data race Helgrind:Race obj:*librdmacm.so* fun:rpma_utils_get_ibv_context ... } { Possible data race Helgrind:Race fun:rdma_create_event_channel obj:*librdmacm.so* fun:rpma_utils_get_ibv_context ... } { Possible data race Helgrind:Race obj:*librdmacm.so* fun:rdma_create_event_channel obj:*librdmacm.so* fun:rpma_utils_get_ibv_context ... } { Possible data race Helgrind:Race fun:strlen fun:__vfprintf_internal fun:__vasprintf_internal fun:__asprintf_chk fun:rdma_create_event_channel obj:*librdmacm.so* fun:rpma_utils_get_ibv_context ... } { Possible data race Helgrind:Race ... obj:*librdmacm.so* fun:rdma_getaddrinfo fun:rpma_info_new fun:rpma_utils_get_ibv_context ... } { Possible data race Helgrind:Race obj:*librdmacm.so* fun:rdma_create_id fun:rpma_utils_get_ibv_context ... } # # Assessment: this suppression indicate a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # They occur in traces of: # - rpma_srq_delete # { Possible data race Helgrind:Race ... fun:rpma_srq_delete ... } # # Assessment: this suppression indicate a lack of MT safety. # # OS - Ubuntu 22.04 # OFED - MLNX_OFED 5.6-2.0.9.0 # libibverbs - 1.14.40.0 # librdmacm - 1.3.40.0 # # They occur in traces of: # - rpma_srq_new # { Possible data race Helgrind:Race ... fun:ibv_create_srq* fun:rpma_peer_create_srq fun:rpma_srq_new ... } rpma-1.3.0/tests/match000077500000000000000000000163131443364775400146630ustar00rootroot00000000000000#!/usr/bin/env perl # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2014-2020, Intel Corporation # # # match -- compare an output file with expected results # # usage: match [-adoqv] [match-file]... # # this script compares the output from a test run, stored in a file, with # the expected output. comparison is done line-by-line until either all # lines compare correctly (exit code 0) or a miscompare is found (exit # code nonzero). # # expected output is stored in a ".match" file, which contains a copy of # the expected output with embedded tokens for things that should not be # exact matches. the supported tokens are: # # $(N) an integer (i.e. one or more decimal digits) # $(NC) one or more decimal digits with comma separators # $(FP) a floating point number # $(S) ascii string # $(X) hex number # $(XX) hex number prefixed with 0x # $(W) whitespace # $(nW) non-whitespace # $(*) any string # $(DD) output of a "dd" run # $(OPT) line is optional (may be missing, matched if found) # $(OPX) ends a contiguous list of $(OPT)...$(OPX) lines, at least # one of which must match # ${string1|string2} string1 OR string2 # # Additionally, if any "X.ignore" file exists, strings or phrases found per # line in the file will be ignored if found as a substring in the # corresponding output file (making it easy to skip entire output lines). # # arguments are: # # -a find all files of the form "X.match" in the current # directory and match them again the corresponding file "X". # # -o custom output filename - only one match file can be given # # -d debug -- show lots of debug output # # -q don't print log files on mismatch # # -v verbose -- show every line as it is being matched # use strict; use Getopt::Std; use Encode; use v5.10; select STDERR; binmode(STDOUT, ":utf8"); binmode(STDERR, ":utf8"); my $Me = $0; $Me =~ s,.*/,,; our ($opt_a, $opt_d, $opt_q, $opt_v, $opt_o); $SIG{HUP} = $SIG{INT} = $SIG{TERM} = $SIG{__DIE__} = sub { die @_ if $^S; my $errstr = shift; die "FAIL: $Me: $errstr"; }; sub usage { my $msg = shift; warn "$Me: $msg\n" if $msg; warn "Usage: $Me [-adqv] [match-file]...\n"; warn " or: $Me [-dqv] -o output-file match-file...\n"; exit 1; } getopts('adoqv') or usage; my %match2file; if ($opt_a) { usage("-a and filename arguments are mutually exclusive") if $#ARGV != -1; opendir(DIR, '.') or die "opendir: .: $!\n"; my @matchfiles = grep { /(.*)\.match$/ && -f $1 } readdir(DIR); closedir(DIR); die "no files found to process\n" unless @matchfiles; foreach my $mfile (@matchfiles) { die "$mfile: $!\n" unless open(F, $mfile); close(F); my $ofile = $mfile; $ofile =~ s/\.match$//; die "$mfile found but cannot open $ofile: $!\n" unless open(F, $ofile); close(F); $match2file{$mfile} = $ofile; } } elsif ($opt_o) { usage("-o argument requires two paths") if $#ARGV != 1; $match2file{$ARGV[1]} = $ARGV[0]; } else { usage("no match-file arguments found") if $#ARGV == -1; # to improve the failure case, check all filename args exist and # are provided in pairs now, before going through and processing them foreach my $mfile (@ARGV) { my $ofile = $mfile; usage("$mfile: not a .match file") unless $ofile =~ s/\.match$//; usage("$mfile: $!") unless open(F, $mfile); close(F); usage("$ofile: $!") unless open(F, $ofile); close(F); $match2file{$mfile} = $ofile; } } my $mfile; my $ofile; my $ifile; print "Files to be processed:\n" if $opt_v; foreach $mfile (sort keys %match2file) { $ofile = $match2file{$mfile}; $ifile = $ofile . ".ignore"; $ifile = undef unless (-f $ifile); if ($opt_v) { print " match-file \"$mfile\" output-file \"$ofile\""; if ($ifile) { print " ignore-file $ifile\n"; } else { print "\n"; } } match($mfile, $ofile, $ifile); } exit 0; # # strip_it - user can optionally ignore lines from files that contain # any number of substrings listed in a file called "X.ignore" where X # is the name of the output file. # sub strip_it { my ($ifile, $file, $input) = @_; # if there is no ignore file just return unaltered input return $input unless $ifile; my @lines_in = split /^/, $input; my $output; my $line_in; my @i_file = split /^/, snarf($ifile); my $i_line; my $ignore_it = 0; foreach $line_in (@lines_in) { my @i_lines = @i_file; foreach $i_line (@i_lines) { chop($i_line); if (index($line_in, $i_line) != -1) { $ignore_it = 1; if ($opt_v) { print "Ignoring (from $file): $line_in"; } } } if ($ignore_it == 0) { $output .= $line_in; } $ignore_it = 0; } return $output; } # # match -- process a match-file, output-file pair # sub match { my ($mfile, $ofile, $ifile) = @_; my $pat; my $output = snarf($ofile); $output = strip_it($ifile, $ofile, $output); my $all_lines = $output; my $line_pat = 0; my $line_out = 0; my $opt = 0; my $opx = 0; my $opt_found = 0; my $fstr = snarf($mfile); $fstr = strip_it($ifile, $mfile, $fstr); for (split /^/, $fstr) { $pat = $_; $line_pat++; $line_out++; s/([*+?|{}.\\^\$\[()])/\\$1/g; s/\\\$\\\(FP\\\)/[-+]?\\d*\\.?\\d+([eE][-+]?\\d+)?/g; s/\\\$\\\(N\\\)/[-+]?\\d+/g; s/\\\$\\\(NC\\\)/[-+]?\\d+(,[0-9]+)*/g; s/\\\$\\\(\\\*\\\)/\\p{Print}*/g; s/\\\$\\\(S\\\)/\\P{IsC}+/g; s/\\\$\\\(X\\\)/\\p{XPosixXDigit}+/g; s/\\\$\\\(XX\\\)/0x\\p{XPosixXDigit}+/g; s/\\\$\\\(W\\\)/\\p{Blank}*/g; s/\\\$\\\(nW\\\)/\\p{Graph}*/g; s/\\\$\\\{([^|]*)\\\|([^|]*)\\\}/($1|$2)/g; s/\\\$\\\(DD\\\)/\\d+\\+\\d+ records in\n\\d+\\+\\d+ records out\n\\d+ bytes \\\(\\d+ .B\\\) copied, [.0-9e-]+[^,]*, [.0-9]+ .B.s/g; if (s/\\\$\\\(OPT\\\)//) { $opt = 1; } elsif (s/\\\$\\\(OPX\\\)//) { $opx = 1; } else { $opt_found = 0; } if ($opt_v) { my @lines = split /\n/, $output; my $line; if (@lines) { $line = $lines[0]; } else { $line = "[EOF]"; } printf("%s:%-3d %s%s:%-3d %s\n", $mfile, $line_pat, $pat, $ofile, $line_out, $line); } print " => /$_/\n" if $opt_d; print " [$output]\n" if $opt_d; unless ($output =~ s/^$_//) { if ($opt || ($opx && $opt_found)) { printf("%s:%-3d [skipping optional line]\n", $ofile, $line_out) if $opt_v; $line_out--; $opt = 0; } else { if (!$opt_v) { if ($opt_q) { print "[MATCHING FAILED]\n"; } else { print "[MATCHING FAILED, COMPLETE FILE ($ofile) BELOW]\n$all_lines\n[EOF]\n"; } $opt_v = 1; match($mfile, $ofile); } die "$mfile:$line_pat did not match pattern\n"; } } elsif ($opt) { $opt_found = 1; } $opx = 0; } if ($output ne '') { if (!$opt_v) { if ($opt_q) { print "[MATCHING FAILED]\n"; } else { print "[MATCHING FAILED, COMPLETE FILE ($ofile) BELOW]\n$all_lines\n[EOF]\n"; } } # make it a little more print-friendly... $output =~ s/\n/\\n/g; die "line $line_pat: unexpected output: \"$output\"\n"; } } # # snarf -- slurp an entire file into memory # sub snarf { my ($file) = @_; my $fh; open($fh, '<', $file) or die "$file $!\n"; local $/; $_ = <$fh>; close $fh; # check known encodings or die my $decoded; my @encodings = ("UTF-8", "UTF-16", "UTF-16LE", "UTF-16BE"); foreach my $enc (@encodings) { eval { $decoded = decode( $enc, $_, Encode::FB_CROAK ) }; if (!$@) { $decoded =~ s/\R/\n/g; return $decoded; } } die "$Me: ERROR: Unknown file encoding"; } rpma-1.3.0/tests/memcheck-libibverbs-librdmacm.supp000066400000000000000000000124201443364775400223720ustar00rootroot00000000000000# # The following suppressions are suited for debug build # of librpma on Ubuntu 22.04 used in the CircleCI. # They may not work for any other OS, OS version, # rdma-core version and for the release build. # # # Assessment: this suppression indicates a memory leak # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { Verbs device, once it is opened, it is never closed. Memcheck:Leak match-leak-kinds: possible fun:calloc ... fun:try_driver fun:try_drivers fun:try_all_drivers fun:ibverbs_get_device_list fun:ibv_get_device_list* fun:ucma_init ... } # # Assessment: this suppression indicates a memory leak # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { Sysfs device, once it is allocated, is not freed. Memcheck:Leak match-leak-kinds: possible fun:calloc fun:find_sysfs_devs_nl_cb fun:nl_recvmsgs_report fun:nl_recvmsgs fun:rdmanl_get_devices fun:find_sysfs_devs_nl fun:ibverbs_get_device_list fun:ibv_get_device_list* fun:ucma_init ... } # # Assessment: this suppression indicates a memory leak # # OS: Ubuntu 22.04 # libibverbs, version 1.14.39.0 # librdmacm, version 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { To be analysed later Memcheck:Leak match-leak-kinds: possible fun:calloc ... fun:rdma_getaddrinfo fun:rpma_info_new fun:rpma_utils_get_ibv_context ... } # # Assessment: this suppression indicates a memory leak # # OS: Ubuntu 22.04 # libibverbs, version 1.14.39.0 # librdmacm, version 1.3.39.0 # # Occurs in traces of: # - rpma_conn_req_new # { To be analysed later Memcheck:Leak match-leak-kinds: possible fun:calloc ... fun:rdma_create_qp_ex fun:rpma_peer_setup_qp fun:rpma_conn_req_new_from_id fun:rpma_conn_req_new ... } # # Assessment: this suppression indicates a memory leak # # OS: Ubuntu 22.04 # libibverbs, version 1.14.39.0 # librdmacm, version 1.3.39.0 # # Occurs in traces of: # - rpma_info_resolve_addr # { To be analysed later Memcheck:Leak match-leak-kinds: possible fun:calloc ... fun:verbs_open_device ... fun:rdma_get_cm_event ... fun:rpma_info_resolve_addr } # # Assessment: this suppression indicates a memory leak # # OS: Ubuntu 22.04 # libibverbs, version 1.14.39.0 # librdmacm, version 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { To be analysed later Memcheck:Leak match-leak-kinds: possible fun:malloc ... fun:rdma_get_cm_event ... fun:rpma_info_resolve_addr fun:rpma_utils_get_ibv_context ... } # # Assessment: this suppression indicates a memory leak # # OS: Ubuntu 22.04 # libibverbs, version 1.14.39.0 # librdmacm, version 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { To be analysed later Memcheck:Leak match-leak-kinds: possible fun:calloc ... fun:verbs_open_device ... fun:rpma_info_bind_addr fun:rpma_utils_get_ibv_context ... } # # Assessment: this suppression indicates a memory leak # # OS: Ubuntu 22.04 # libibverbs, version 1.14.39.0 # librdmacm, version 1.3.39.0 # # Occurs in traces of: # - rpma_ep_next_conn_req # { To be analysed later Memcheck:Leak match-leak-kinds: possible fun:calloc ... fun:rdma_create_qp_ex fun:rpma_peer_setup_qp fun:rpma_conn_req_new_from_id fun:rpma_conn_req_new_from_cm_event fun:rpma_ep_next_conn_req ... } # # Assessment: this suppression indicates a memory leak # # OS: Ubuntu 22.04 # libibverbs, version 1.14.39.0 # librdmacm, version 1.3.39.0 # # Occurs in traces of: # - rpma_utils_get_ibv_context # { To be analysed later Memcheck:Leak match-leak-kinds: possible fun:malloc ... fun:rpma_info_bind_addr fun:rpma_utils_get_ibv_context ... } # # Assessment: this suppression indicates a memory leak # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of MTT framework. # { Memory leak in pthread_create Memcheck:Leak match-leak-kinds: possible ... fun:calloc fun:allocate_dtv fun:_dl_allocate_tls fun:allocate_stack fun:pthread_create@@GLIBC_* fun:mtt_run fun:main } # # Assessment: this suppression indicates that a conditional jump # or move depends on uninitialised value(s) # # OS: ubuntu-2204:2022.04.1 of CircleCI # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of # - rpma_srq_new # { Conditional jump or move depends on uninitialised value(s) Memcheck:Cond obj:/usr/lib/x86_64-linux-gnu/libibverbs.so* fun:ibv_cmd_create_srq obj:/usr/lib/x86_64-linux-gnu/libibverbs/librxe-rdma*.so fun:ibv_create_srq fun:rpma_peer_create_srq fun:rpma_srq_new ... } # # Assessment: this suppression indicates that a conditional jump # or move depends on uninitialised value(s) # # OS: Ubuntu 22.04 # libibverbs: 1.14.39.0 # librdmacm: 1.3.39.0 # # Occurs in traces of # - rpma_srq_new # { Conditional jump or move depends on uninitialised value(s) Memcheck:Cond fun:ibv_icmd_create_srq fun:ibv_cmd_create_srq ... fun:ibv_create_srq* fun:rpma_peer_create_srq fun:rpma_srq_new ... } rpma-1.3.0/tests/memcheck-libnl.supp000066400000000000000000000005021443364775400174150ustar00rootroot00000000000000{ The list of translations, once it is created, is not freed Memcheck:Leak match-leak-kinds: possible fun:calloc obj:*/lib*/libnl-*.so* } { The name of routing table, once it is created, is not freed Memcheck:Leak match-leak-kinds: possible fun:malloc fun:strdup obj:*/lib*/libnl-*.so* } rpma-1.3.0/tests/multithreaded/000077500000000000000000000000001443364775400164705ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/CMakeLists.txt000066400000000000000000000005011443364775400212240ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # add_subdirectory(conn_cfg) add_subdirectory(conn) add_subdirectory(ep) add_subdirectory(example) add_subdirectory(log) add_subdirectory(mr) add_subdirectory(peer) add_subdirectory(srq) add_subdirectory(srq_cfg) add_subdirectory(utils) rpma-1.3.0/tests/multithreaded/common/000077500000000000000000000000001443364775400177605ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/common/mtt.c000066400000000000000000000326431443364775400207400ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * mtt.c -- a multithreaded tests' runner */ #include #include #include #include #include #include #include #include #include #include "mtt.h" #define TIMEOUT_SECONDS 60 static struct { /* mutex and conditional used to start all threads synchronously */ pthread_mutex_t mtx; pthread_cond_t cond; volatile unsigned threads_num_waiting; struct timespec timeout; } mtt_sync; struct mtt_thread_args { unsigned id; /* a thread id */ void *state; /* a thread-specific state */ struct mtt_test *test; struct mtt_result ret; /* a thread return object */ }; /* * call either init or fini function (func) using the provided test object * (test), the thread arguments, and the result object. */ #define MTT_CALL_INIT_FINI(test, func, thread_args, result) \ (test)->func((thread_args)->id, (test)->prestate, \ &(thread_args)->state, (result)) /* * mtt_thread_main -- wait for the synchronization conditional and run the test */ static void * mtt_thread_main(void *arg) { struct mtt_thread_args *ta = (struct mtt_thread_args *)arg; struct mtt_test *test = ta->test; struct mtt_result *tr = &ta->ret; struct mtt_result tr_dummy = {0}; int result; if (test->thread_init_func) { MTT_CALL_INIT_FINI(test, thread_init_func, ta, tr); if (tr->ret) { /* unblock the main thread waiting for this one */ ++mtt_sync.threads_num_waiting; return tr; } } /* wait for all threads to start */ result = pthread_mutex_lock(&mtt_sync.mtx); if (result) { MTT_ERR(tr, "pthread_mutex_lock", result); return tr; } ++mtt_sync.threads_num_waiting; result = pthread_cond_timedwait(&mtt_sync.cond, &mtt_sync.mtx, &mtt_sync.timeout); if (result) { MTT_ERR(tr, "pthread_cond_timedwait", result); (void) pthread_mutex_unlock(&mtt_sync.mtx); goto err_thread_fini_func; } result = pthread_mutex_unlock(&mtt_sync.mtx); if (result) { MTT_ERR(tr, "pthread_mutex_unlock", result); goto err_thread_fini_func; } test->thread_func(ta->id, test->prestate, ta->state, tr); err_thread_fini_func: if (test->thread_fini_func) { /* * if the thread result is already non-zero provide tr_dummy * instead to avoid overwriting the result */ MTT_CALL_INIT_FINI(test, thread_fini_func, ta, (tr->ret ? &tr_dummy : tr)); } return tr; } /* print an error message for errors not related to any specific thread */ #define MTT_INTERNAL_ERR(fmt, ...) \ fprintf(stderr, "error: " fmt "\n", ##__VA_ARGS__) /* * mtt_threads_sync_unblock -- wait for threads to spawn and unblock all * threads synchronously */ static int mtt_threads_sync_unblock(unsigned threads_num) { int ret; int done = 0; do { ret = pthread_mutex_lock(&mtt_sync.mtx); if (ret) { MTT_INTERNAL_ERR("pthread_mutex_lock() failed: %s", strerror(ret)); return ret; } if (mtt_sync.threads_num_waiting == threads_num) { ret = pthread_cond_broadcast(&mtt_sync.cond); if (ret) { MTT_INTERNAL_ERR("pthread_cond_broadcast() failed: %s", strerror(ret)); } /* * If broadcasting has failed the waiting threads will * time out so the test will exit with appropriate * error messages. Nothing more can be done. */ done = 1; } ret = pthread_mutex_unlock(&mtt_sync.mtx); if (ret) { MTT_INTERNAL_ERR("pthread_mutex_unlock() failed: %s", strerror(ret)); return ret; } } while (!done); return ret; } /* * mtt_init -- initialize the global state */ static int mtt_init() { int ret; ret = pthread_mutex_init(&mtt_sync.mtx, NULL); if (ret) { MTT_INTERNAL_ERR("pthread_mutex_init() failed: %s", strerror(ret)); return ret; } ret = pthread_cond_init(&mtt_sync.cond, NULL); if (ret) { MTT_INTERNAL_ERR("pthread_cond_init() failed: %s", strerror(ret)); (void) pthread_mutex_destroy(&mtt_sync.mtx); return ret; } ret = clock_gettime(CLOCK_REALTIME, &mtt_sync.timeout); if (ret) { MTT_INTERNAL_ERR("clock_gettime() failed: %s", strerror(errno)); (void) pthread_cond_destroy(&mtt_sync.cond); (void) pthread_mutex_destroy(&mtt_sync.mtx); return ret; } mtt_sync.timeout.tv_sec += TIMEOUT_SECONDS; mtt_sync.threads_num_waiting = 0; return 0; } /* * mtt_fini -- clean up the global state */ static int mtt_fini() { int ret; int result = 0; ret = pthread_mutex_destroy(&mtt_sync.mtx); if (ret) result = ret; ret = pthread_cond_destroy(&mtt_sync.cond); if (ret) result = ret; return result; } /* * mtt_parse_args -- process the command line arguments * RETURN -1 on error */ int mtt_parse_args(int argc, char *argv[], struct mtt_args *args) { if (argc < 3) { fprintf(stderr, "usage: %s []\n", argv[0]); return -1; } args->threads_num = strtoul(argv[1], NULL, 10); args->addr = argv[2]; if (argc > 3) args->port = strtoul(argv[3], NULL, 10); return 0; } /* * mtt_base_file_name -- find the first character of the file name in the full * file path by tracking the '/' characters. This allows extracting the file * name from the path in-situ and using the file name without introducing any * additional resources. */ const char * mtt_base_file_name(const char *file_name) { const char *base_file_name = strrchr(file_name, '/'); if (!base_file_name) base_file_name = file_name; else /* skip '/' */ base_file_name++; return base_file_name; } /* * mtt_malloc_aligned -- allocate an aligned chunk of memory */ void * mtt_malloc_aligned(size_t size, struct mtt_result *tr) { long pagesize = sysconf(_SC_PAGESIZE); if (pagesize < 0) { if (tr) MTT_ERR(tr, "sysconf", errno); else CHILD_ERR("[CHILD]", "sysconf", strerror(errno)); return NULL; } /* allocate a page size aligned local memory pool */ void *mem; int ret = posix_memalign(&mem, (size_t)pagesize, size); if (ret) { if (tr) MTT_ERR(tr, "posix_memalign", errno); else CHILD_ERR("[CHILD]", "posix_memalign", strerror(errno)); return NULL; } /* zero the allocated memory */ memset(mem, 0, size); return mem; } /* print an error message prepended with thread's number */ #define MTT_TEST_ERR(thread_num, fmt, ...) \ fprintf(stderr, "[thread #%d] " fmt "\n", \ thread_num, ##__VA_ARGS__) /* * mtt_start_child_process -- start the child process */ static int mtt_start_child_process(mtt_child_process_func child_process_func, void *prestate) { int pid; int ret; /* * The another side of the connection (server or client) * has to be started as a separate process using fork() * (not as a thread), because those tests are run under * valgrind and valgrind must not check MT-safety of * server vs. client, because they are always separate * processes. */ switch ((pid = fork())) { case 0: /* this is the child */ ret = child_process_func(prestate); exit(ret); case -1: /* fork failed */ perror("fork"); return -1; } /* the parent returns a PID of the child process */ return pid; } /* * mtt_check_child_process -- check if the child process terminated */ static int mtt_check_child_process(int child_pid) { int status; if (child_pid && waitpid(child_pid, &status, WNOHANG) != 0) { if (WIFEXITED(status) && WEXITSTATUS(status)) { MTT_INTERNAL_ERR("child process failed with status %i", WEXITSTATUS(status)); } else { MTT_INTERNAL_ERR("child process has already exited with status 0"); } return -1; } return 0; } /* * mtt_run -- run the provided test using provided number of threads */ int mtt_run(struct mtt_test *test, unsigned threads_num) { pthread_t *threads; struct mtt_thread_args *threads_args; struct mtt_thread_args *ta; struct mtt_result *tr; struct mtt_result tr_local = {0}; unsigned threads_num_to_join = 0; unsigned threads_num_to_fini = 0; int child_pid = 0; int result = 0; unsigned i; int ret; /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); if (test->child_start == MTT_START_CHILD_BEFORE_PRESTATE_INIT_FUNC && test->child_process_func) { child_pid = mtt_start_child_process(test->child_process_func, test->child_prestate); if (child_pid == -1) { MTT_INTERNAL_ERR("starting the child process failed"); return -1; } if (child_pid && (result = mtt_check_child_process(child_pid))) goto err_kill_child; } /* initialize the prestate */ if (test->prestate_init_func) { test->prestate_init_func(test->prestate, &tr_local); if (tr_local.ret) { MTT_INTERNAL_ERR("%s", tr_local.errmsg); result = tr_local.ret; if (child_pid) (void) mtt_check_child_process(child_pid); goto err_kill_child; } } if (child_pid && (result = mtt_check_child_process(child_pid))) goto err_prestate_fini_func; /* allocate threads and their arguments */ threads = calloc(threads_num, sizeof(pthread_t)); if (threads == NULL) { MTT_INTERNAL_ERR("calloc failed"); result = -1; goto err_prestate_fini_func; } threads_args = calloc(threads_num, sizeof(struct mtt_thread_args)); if (threads_args == NULL) { MTT_INTERNAL_ERR("calloc failed"); result = -1; goto err_free_threads; } if (test->child_start == MTT_START_CHILD_BEFORE_THREAD_SEQ_INIT_FUNC && test->child_process_func) { child_pid = mtt_start_child_process(test->child_process_func, test->child_prestate); if (child_pid == -1) { MTT_INTERNAL_ERR("starting the child process failed"); result = -1; goto err_free_threads_args; } if (child_pid && (result = mtt_check_child_process(child_pid))) goto err_free_threads_args; } /* initialize threads' arguments */ for (i = 0; i < threads_num; i++) { ta = &threads_args[i]; ta->id = i; ta->test = test; if (test->thread_seq_init_func) { MTT_CALL_INIT_FINI(test, thread_seq_init_func, ta, &ta->ret); if (ta->ret.ret) { result = ta->ret.ret; MTT_TEST_ERR(ta->id, "%s", ta->ret.errmsg); goto err_thread_seq_fini_func; } } ++threads_num_to_fini; } if (child_pid && (result = mtt_check_child_process(child_pid))) goto err_thread_seq_fini_func; if (test->child_start == MTT_START_CHILD_BEFORE_THREAD_INIT_FUNC && test->child_process_func) { child_pid = mtt_start_child_process(test->child_process_func, test->child_prestate); if (child_pid == -1) { MTT_INTERNAL_ERR("starting the child process failed"); result = -1; goto err_thread_seq_fini_func; } if (child_pid && (result = mtt_check_child_process(child_pid))) goto err_thread_seq_fini_func; } /* * The global initialization has to be as close as possible to spawning * threads since it also calculates an absolute timeout value common * for all threads. */ result = mtt_init(); if (result) goto err_thread_seq_fini_func; /* create threads */ for (i = 0; i < threads_num; i++) { result = pthread_create(&threads[i], NULL, mtt_thread_main, &threads_args[i]); if (result != 0) { MTT_TEST_ERR(i, "pthread_create() failed: %s", strerror(result)); break; } ++threads_num_to_join; } if (child_pid && (result = mtt_check_child_process(child_pid))) goto err_mtt_fini; if (test->child_start == MTT_START_CHILD_BEFORE_THREAD_FUNC && test->child_process_func) { child_pid = mtt_start_child_process(test->child_process_func, test->child_prestate); if (child_pid == -1) { MTT_INTERNAL_ERR("starting the child process failed"); result = -1; goto err_mtt_fini; } if (child_pid && (result = mtt_check_child_process(child_pid))) goto err_mtt_fini; } ret = mtt_threads_sync_unblock(threads_num_to_join); if (ret) result = ret; if (child_pid && (result = mtt_check_child_process(child_pid))) goto err_mtt_fini; if (test->child_start == MTT_START_CHILD_BEFORE_JOINING_THREADS && test->child_process_func) { child_pid = mtt_start_child_process(test->child_process_func, test->child_prestate); if (child_pid == -1) { MTT_INTERNAL_ERR("starting the child process failed"); result = -1; goto err_mtt_fini; } if (child_pid && (result = mtt_check_child_process(child_pid))) goto err_mtt_fini; } /* wait for threads to join */ for (i = 0; i < threads_num_to_join; i++) { ret = pthread_join(threads[i], (void **)&tr); if (ret != 0) { MTT_TEST_ERR(i, "pthread_join() failed: %s", strerror(ret)); result = ret; } else if (tr == NULL) { MTT_TEST_ERR(i, "returned a NULL result"); result = -1; } else if (tr->ret != 0) { MTT_TEST_ERR(i, "%s", tr->errmsg); result = tr->ret; } } err_mtt_fini: ret = mtt_fini(); if (ret) result = ret; err_thread_seq_fini_func: /* clean up threads' arguments */ if (test->thread_seq_fini_func) { for (i = 0; i < threads_num_to_fini; i++) { ta = &threads_args[i]; MTT_CALL_INIT_FINI(test, thread_seq_fini_func, ta, &tr_local); if (tr_local.ret) { MTT_TEST_ERR(i, "%s", tr_local.errmsg); result = tr_local.ret; tr_local.ret = 0; } } } err_free_threads_args: free(threads_args); err_free_threads: free(threads); err_prestate_fini_func: /* clean up the prestate */ if (test->prestate_fini_func) { test->prestate_fini_func(test->prestate, &tr_local); if (tr_local.ret) { MTT_INTERNAL_ERR("%s", tr_local.errmsg); result = tr_local.ret; } } err_kill_child: if (child_pid) { if (child_pid == -1) { /* starting the child process failed */ return -1; } /* check if the child process is still running */ if (kill(child_pid, 0) == 0) { /* kill the child process */ if (kill(child_pid, SIGTERM)) (void) kill(child_pid, SIGKILL); } } return result; } rpma-1.3.0/tests/multithreaded/common/mtt.h000066400000000000000000000200351443364775400207350ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2021-2022, Intel Corporation */ /* * mtt.h -- a multithreaded tests' API * * For an example of how to use this API please see already existing * multithreaded tests especially the example test. */ #ifndef MTT #define MTT #include #include #define KILOBYTE 1024 /* arguments coming from the command line */ struct mtt_args { unsigned threads_num; char *addr; unsigned port; }; int mtt_parse_args(int argc, char *argv[], struct mtt_args *args); #define MTT_PORT_STR_MAX 6 #define MTT_PORT_STR _mtt_port_str #define MTT_PORT_INIT char _mtt_port_str[MTT_PORT_STR_MAX] #define MTT_PORT_SET(base_port, thread_id) \ snprintf(MTT_PORT_STR, MTT_PORT_STR_MAX, "%u", (base_port) + (thread_id)) #define MTT_ERRMSG_MAX 512 /* a store for any thread error message and the return value */ struct mtt_result { int ret; char errmsg[MTT_ERRMSG_MAX]; }; /* * mtt_base_file_name -- extract the exact file name from a file name with path */ const char *mtt_base_file_name(const char *file_name); void *mtt_malloc_aligned(size_t size, struct mtt_result *tr); /* on child's error print the error message to stderr */ #define CHILD_ERR_MSG(child_name, msg) \ do { \ fprintf(stderr, "%s %s:%d %s() -> %s\n", \ child_name, mtt_base_file_name(__FILE__), \ __LINE__, __func__, msg); \ } while (0) #define CHILD_ERR(child_name, func, msg) \ do { \ fprintf(stderr, "%s %s:%d %s() -> %s() failed: %s\n", \ child_name, mtt_base_file_name(__FILE__), \ __LINE__, __func__, func, msg); \ } while (0) #define CHILD_RPMA_ERR(child_name, func, err) \ CHILD_ERR(child_name, func, rpma_err_2str(err)) #define SERVER_ERR_MSG(msg) CHILD_ERR_MSG("[SERVER]", msg) #define SERVER_RPMA_ERR(func, err) CHILD_RPMA_ERR("[SERVER]", func, err) /* on error populate the result and the error message */ #define MTT_ERR_MSG(result, msg, err, ...) \ do { \ if ((result) == NULL) \ break; \ char msg_buf[MTT_ERRMSG_MAX / 2]; \ snprintf(msg_buf, MTT_ERRMSG_MAX / 2 - 1, \ msg, ##__VA_ARGS__); \ (result)->ret = err; \ snprintf((result)->errmsg, MTT_ERRMSG_MAX - 1, \ "%s:%d %s() -> %s\n", \ mtt_base_file_name(__FILE__), __LINE__, __func__, \ msg_buf); \ } while (0) /* on error populate the result and the error string */ #define MTT_ERR(result, func, err) \ do { \ if ((result) == NULL) \ break; \ (result)->ret = err; \ snprintf((result)->errmsg, MTT_ERRMSG_MAX - 1, \ "%s:%d %s() -> %s() failed: %s\n", \ mtt_base_file_name(__FILE__), __LINE__, __func__, \ func, strerror(err)); \ } while (0) /* on librpma error populate the result and the error string */ #define MTT_RPMA_ERR(result, func, err) \ do { \ if ((result) == NULL) \ break; \ (result)->ret = err; \ snprintf((result)->errmsg, MTT_ERRMSG_MAX - 1, \ "%s:%d %s() -> %s() failed: %s\n", \ mtt_base_file_name(__FILE__), __LINE__, __func__, \ func, rpma_err_2str(err)); \ } while (0) /* * mtt_prestate_init_fini_func -- a function type used for initialization and * cleanup of prestate. Run once. * * Arguments: * - prestate - a pointer to the test-provided data. It is the only function * type in which the prestate is expected to be modified. * - result - the result. On error the test is responsible for providing * the error details (using e.g. MTT_ERR or MTT_RPMA_ERR macros). */ typedef void (*mtt_prestate_init_fini_func)(void *prestate, struct mtt_result *result); /* * mtt_thread_init_fini_func -- a function type used for all initialization and * cleanup steps * * Arguments: * - id - a thread identifier. It is constant for the whole life of * the thread including sequential initialization and sequential * cleanup. * - prestate - a pointer to the test-provided data passed to all threads in * all steps. It is shared in a non-thread-safe way. * - state_ptr - a pointer to thread-related data. The test can allocate and * store their specific data here at any point. Accessing it is * always thread-safe. Once the data is stored the test is also * responsible for freeing it. * - result - the result. On error the test is responsible for providing * the error details (using e.g. MTT_ERR or MTT_RPMA_ERR macros), * the test should not print anything to stdout nor stderr during * parallel steps of the test (thread_init_func, thread_func, * and thread_fini_func). */ typedef void (*mtt_thread_init_fini_func)(unsigned id, void *prestate, void **state_ptr, struct mtt_result *result); /* * mtt_thread_func -- a function type used for the main execution step * * Arguments: * - id - a thread identifier. It is constant for the whole life of * the thread including sequential initialization and sequential * cleanup. * - prestate - a pointer to the test-provided data passed to all threads in all * steps. It is shared in a non-thread-safe way. * - state - a pointer to thread-related data. At this point, it is available * as long as it was prepared during one of the initialization * steps. Note it should not be freed during this step. For tips * on how to allocate/free the thread-related data please see * mtt_thread_init_fini_func. * - result - the result. On error the test is responsible for providing * the error details (using e.g. MTT_ERR or MTT_RPMA_ERR macros), * the test should not print anything to stdout nor stderr during * parallel steps of the test (thread_init_func, thread_func, * and thread_fini_func). */ typedef void (*mtt_thread_func)(unsigned id, void *prestate, void *state, struct mtt_result *result); /* * mtt_child_process_func -- a function type used for the child process * e.g. as another side of the connection * * Arguments: * - prestate - a pointer to the test-provided data. */ typedef int (*mtt_child_process_func)(void *prestate); /* * mtt_start_child - define a time when the child process is started */ enum mtt_start_child { MTT_START_CHILD_BEFORE_PRESTATE_INIT_FUNC, MTT_START_CHILD_BEFORE_THREAD_SEQ_INIT_FUNC, MTT_START_CHILD_BEFORE_THREAD_INIT_FUNC, MTT_START_CHILD_BEFORE_THREAD_FUNC, MTT_START_CHILD_BEFORE_JOINING_THREADS }; struct mtt_test { /* * a pointer to test-provided data passed on all initialization steps * (both sequential and parallel) and also on thread_func */ void *prestate; /* * A function called only once before the sequential initialization of * all threads. It is dedicated to initializing the prestate. */ mtt_prestate_init_fini_func prestate_init_func; /* * a function called for each of threads before spawning it (sequential) */ mtt_thread_init_fini_func thread_seq_init_func; /* * a function called at the beginning of each thread * (parallel but before synchronizing all threads) */ mtt_thread_init_fini_func thread_init_func; /* * a thread main function (parallel and after synchronizing all threads) */ mtt_thread_func thread_func; /* a function called at the end of each thread (parallel) */ mtt_thread_init_fini_func thread_fini_func; /* * a function called for each of threads after its termination * (sequential) */ mtt_thread_init_fini_func thread_seq_fini_func; /* * A function called only once after the sequential clean up of all * threads. It is dedicated to cleaning up the prestate. */ mtt_prestate_init_fini_func prestate_fini_func; /* * A function of the child process. * If it is not NULL, the child process is started * using fork() at the very beginning of the test. */ mtt_child_process_func child_process_func; /* * A pointer to test-provided data passed to the child process function. */ void *child_prestate; /* * Set a time when the child process should be started. */ enum mtt_start_child child_start; }; int mtt_run(struct mtt_test *test, unsigned threads_num); #endif /* MTT */ rpma-1.3.0/tests/multithreaded/common/mtt_client_process.c000066400000000000000000000023661443364775400240330ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * mtt_client_process.c -- process spawning threads with clients for server-side MT tests */ #include #include "mtt.h" #include "mtt_connect.h" const char data[] = "Hello server!"; struct client_prestate { char *addr; unsigned port; }; /* * thread -- run a single client */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct client_prestate *pr = (struct client_prestate *)prestate; struct rpma_peer *peer = NULL; struct rpma_conn *conn = NULL; if (mtt_client_peer_new(result, pr->addr, &peer)) return; struct rpma_conn_private_data pdata; pdata.ptr = (void *)data; pdata.len = sizeof(data); if (mtt_client_connect(result, pr->addr, pr->port, peer, &conn, &pdata)) { mtt_client_peer_delete(result, &peer); return; } mtt_client_disconnect(result, &conn); mtt_client_peer_delete(result, &peer); } int client_main(char *addr, unsigned port, unsigned threads_num) { struct client_prestate client_prestate = {addr, port}; struct mtt_test test = { &client_prestate, NULL, NULL, NULL, thread, NULL, NULL, NULL, NULL, NULL }; return mtt_run(&test, threads_num); } rpma-1.3.0/tests/multithreaded/common/mtt_common-epoll.c000066400000000000000000000025431443364775400234150ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * mtt_common-epoll.c -- common epoll functions for MT tests */ #include #include #include #include #include #include #include "mtt_common-epoll.h" /* * fd_set_nonblock -- set O_NONBLOCK flag for provided file descriptor */ int fd_set_nonblock(int fd) { int ret = fcntl(fd, F_GETFL); if (ret < 0) { perror("fcntl"); return errno; } int flags = ret | O_NONBLOCK; ret = fcntl(fd, F_SETFL, flags); if (ret < 0) { perror("fcntl"); return errno; } return 0; } /* * epoll_add -- add a custom event to the epoll */ int epoll_add(int epoll, int fd, void *arg, event_func func, struct custom_event *ce) { /* set O_NONBLOCK flag for the provided fd */ int ret = fd_set_nonblock(fd); if (ret) return -1; /* prepare a custom event structure */ ce->fd = fd; ce->arg = arg; ce->func = func; /* prepare an epoll event */ struct epoll_event event; event.events = EPOLLIN; event.data.ptr = ce; /* add the event to epoll */ if (epoll_ctl(epoll, EPOLL_CTL_ADD, fd, &event)) return errno; return 0; } /* * epoll_delete -- remove the custom event from the epoll */ void epoll_delete(int epoll, struct custom_event *ce) { (void) epoll_ctl(epoll, EPOLL_CTL_DEL, ce->fd, NULL); ce->fd = -1; } rpma-1.3.0/tests/multithreaded/common/mtt_common-epoll.h000066400000000000000000000011511443364775400234140ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * mtt_common-epoll.h -- common epoll functions declarations for MT tests */ #ifndef MTT_COMMON_EPOLL #define MTT_COMMON_EPOLL #define TIMEOUT_15S (15000) /* [msec] == 15s */ int fd_set_nonblock(int fd); struct custom_event; typedef void (*event_func)(struct custom_event *ce); struct custom_event { int fd; void *arg; event_func func; }; int epoll_add(int epoll, int fd, void *arg, event_func func, struct custom_event *ce); void epoll_delete(int epoll, struct custom_event *ce); #endif /* MTT_COMMON_EPOLL */ rpma-1.3.0/tests/multithreaded/common/mtt_connect.c000066400000000000000000000167741443364775400224600ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022-2023, Intel Corporation */ /* * mtt_connect.c -- common connection code of multithreaded tests */ #include #include #include #include "mtt.h" /* maximum number of client's connection retries */ #define MAX_CONN_RETRY 10 /* * mtt_server_listen -- start a listening endpoint at addr:port */ int mtt_server_listen(char *addr, unsigned port, struct rpma_peer **peer_ptr, struct rpma_ep **ep_ptr) { struct ibv_context *ibv_ctx = NULL; int ret; *peer_ptr = NULL; *ep_ptr = NULL; /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* lookup an ibv_context via the address */ ret = rpma_utils_get_ibv_context(addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); if (ret) { SERVER_RPMA_ERR("rpma_utils_get_ibv_context", ret); return ret; } /* create a new peer object */ ret = rpma_peer_new(ibv_ctx, peer_ptr); if (ret) { SERVER_RPMA_ERR("rpma_peer_new", ret); return ret; } MTT_PORT_INIT; MTT_PORT_SET(port, 0); /* start a listening endpoint at addr:port */ ret = rpma_ep_listen(*peer_ptr, addr, MTT_PORT_STR, ep_ptr); if (ret) { SERVER_RPMA_ERR("rpma_ep_listen", ret); /* delete the peer object */ (void) rpma_peer_delete(peer_ptr); return ret; } return 0; } /* * mtt_server_accept_connection -- wait for an incoming connection request, * accept it and wait for its establishment */ int mtt_server_accept_connection(struct rpma_ep *ep, struct rpma_conn_private_data *pdata, struct rpma_conn **conn_ptr) { struct rpma_conn_req *req = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; int ret; /* receive an incoming connection request */ ret = rpma_ep_next_conn_req(ep, NULL, &req); if (ret) { SERVER_RPMA_ERR("rpma_ep_next_conn_req", ret); return ret; } /* * connect / accept the connection request and obtain the connection * object */ ret = rpma_conn_req_connect(&req, pdata, conn_ptr); if (ret) { SERVER_RPMA_ERR("rpma_conn_req_connect", ret); (void) rpma_conn_req_delete(&req); return ret; } /* wait for the connection to be established */ ret = rpma_conn_next_event(*conn_ptr, &conn_event); if (ret) SERVER_RPMA_ERR("rpma_conn_next_event", ret); else if (conn_event != RPMA_CONN_ESTABLISHED) { SERVER_ERR_MSG("rpma_conn_next_event returned an unexpected event"); SERVER_ERR_MSG(rpma_utils_conn_event_2str(conn_event)); ret = -1; } if (ret) (void) rpma_conn_delete(conn_ptr); return ret; } /* * mtt_server_wait_for_conn_close_and_disconnect -- wait for RPMA_CONN_CLOSED, * disconnect and delete the connection structure */ void mtt_server_wait_for_conn_close_and_disconnect(struct rpma_conn **conn_ptr) { enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; int ret = 0; /* wait for the connection to be closed */ ret = rpma_conn_next_event(*conn_ptr, &conn_event); if (ret) SERVER_RPMA_ERR("rpma_conn_next_event", ret); else if (conn_event != RPMA_CONN_CLOSED) { SERVER_ERR_MSG("rpma_conn_next_event returned an unexpected event"); SERVER_ERR_MSG(rpma_utils_conn_event_2str(conn_event)); } ret = rpma_conn_disconnect(*conn_ptr); if (ret) SERVER_RPMA_ERR("rpma_conn_disconnect", ret); ret = rpma_conn_delete(conn_ptr); if (ret) SERVER_RPMA_ERR("rpma_conn_delete", ret); } /* * mtt_server_shutdown -- shutdown the endpoint and delete the peer object */ void mtt_server_shutdown(struct rpma_peer **peer_ptr, struct rpma_ep **ep_ptr) { /* shutdown the endpoint */ (void) rpma_ep_shutdown(ep_ptr); /* delete the peer object */ (void) rpma_peer_delete(peer_ptr); } /* * mtt_client_peer_new -- create a new peer */ int mtt_client_peer_new(struct mtt_result *tr, char *addr, struct rpma_peer **peer_ptr) { struct ibv_context *ibv_ctx; int ret; ret = rpma_utils_get_ibv_context(addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return -1; } ret = rpma_peer_new(ibv_ctx, peer_ptr); if (ret) { MTT_RPMA_ERR(tr, "rpma_peer_new", ret); return -1; } return 0; } /* * mtt_client_peer_delete -- delete the peer */ void mtt_client_peer_delete(struct mtt_result *tr, struct rpma_peer **peer_ptr) { int ret; ret = rpma_peer_delete(peer_ptr); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } /* * mtt_client_connect -- connect with the server and get the private data */ int mtt_client_connect(struct mtt_result *tr, char *addr, unsigned port, struct rpma_peer *peer, struct rpma_conn **conn_ptr, struct rpma_conn_private_data *pdata) { struct rpma_conn_req *req = NULL; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; int ret; MTT_PORT_INIT; MTT_PORT_SET(port, 0); int retry = 0; do { /* create a connection request */ ret = rpma_conn_req_new(peer, addr, MTT_PORT_STR, NULL, &req); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_req_new", ret); goto err_conn_req_delete; } /* * Connect the connection request and obtain * the connection object. */ ret = rpma_conn_req_connect(&req, NULL, conn_ptr); if (ret) { (void) rpma_conn_req_delete(&req); MTT_RPMA_ERR(tr, "rpma_conn_req_connect", ret); goto err_conn_req_delete; } /* wait for the connection to establish */ ret = rpma_conn_next_event(*conn_ptr, &conn_event); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_next_event", ret); goto err_conn_delete; } if (conn_event == RPMA_CONN_ESTABLISHED) break; if (conn_event != RPMA_CONN_REJECTED) { MTT_ERR_MSG(tr, "rpma_conn_next_event returned an unexpected event: %s", -1, rpma_utils_conn_event_2str(conn_event)); goto err_conn_delete; } retry++; if (retry == MAX_CONN_RETRY) { MTT_ERR_MSG(tr, "reached the maximum number of retries (%i), exiting ...", -1, MAX_CONN_RETRY); goto err_conn_delete; } /* received the RPMA_CONN_REJECTED event, retrying ... */ (void) rpma_conn_disconnect(*conn_ptr); (void) rpma_conn_delete(conn_ptr); sleep(1); } while (retry < MAX_CONN_RETRY); /* get the connection private data */ ret = rpma_conn_get_private_data(*conn_ptr, pdata); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_get_private_data", ret); goto err_conn_disconnect; } else if (pdata->ptr == NULL) { MTT_ERR_MSG(tr, "The server has not provided the connection's private data", -1); goto err_conn_disconnect; } return 0; err_conn_disconnect: (void) rpma_conn_disconnect(*conn_ptr); err_conn_delete: (void) rpma_conn_delete(conn_ptr); err_conn_req_delete: (void) rpma_conn_req_delete(&req); return -1; } /* * mtt_client_err_disconnect -- force disconnect and delete the peer object * in case of an error */ void mtt_client_err_disconnect(struct rpma_conn **conn_ptr) { (void) rpma_conn_disconnect(*conn_ptr); (void) rpma_conn_delete(conn_ptr); } /* * mtt_client_disconnect -- disconnect and delete the peer object */ void mtt_client_disconnect(struct mtt_result *tr, struct rpma_conn **conn_ptr) { enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; int ret; ret = rpma_conn_disconnect(*conn_ptr); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_disconnect", ret); } else { /* wait for the connection to be closed */ ret = rpma_conn_next_event(*conn_ptr, &conn_event); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_next_event", ret); else if (conn_event != RPMA_CONN_CLOSED) MTT_ERR_MSG(tr, "rpma_conn_next_event returned an unexpected event", -1); } ret = rpma_conn_delete(conn_ptr); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_delete", ret); } rpma-1.3.0/tests/multithreaded/common/mtt_connect.h000066400000000000000000000026431443364775400224530ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * mtt_connect.h -- header for common connection code of multithreaded tests */ #ifndef MTT_CONNECT_H #define MTT_CONNECT_H #define DESCRIPTORS_MAX_SIZE 24 struct common_data { uint16_t data_offset; /* user data offset */ uint8_t mr_desc_size; /* size of mr_desc in descriptors[] */ uint8_t pcfg_desc_size; /* size of pcfg_desc in descriptors[] */ /* buffer containing mr_desc and pcfg_desc */ char descriptors[DESCRIPTORS_MAX_SIZE]; }; int mtt_server_listen(char *addr, unsigned port, struct rpma_peer **peer_ptr, struct rpma_ep **ep_ptr); int mtt_server_accept_connection(struct rpma_ep *ep, struct rpma_conn_private_data *pdata, struct rpma_conn **conn_ptr); void mtt_server_wait_for_conn_close_and_disconnect(struct rpma_conn **conn_ptr); void mtt_server_shutdown(struct rpma_peer **peer_ptr, struct rpma_ep **ep_ptr); int mtt_client_peer_new(struct mtt_result *tr, char *addr, struct rpma_peer **peer_ptr); void mtt_client_peer_delete(struct mtt_result *tr, struct rpma_peer **peer_ptr); int mtt_client_connect(struct mtt_result *tr, char *addr, unsigned port, struct rpma_peer *peer, struct rpma_conn **conn_ptr, struct rpma_conn_private_data *pdata); void mtt_client_err_disconnect(struct rpma_conn **conn_ptr); void mtt_client_disconnect(struct mtt_result *tr, struct rpma_conn **conn_ptr); #endif /* MTT_CONNECT_H */ rpma-1.3.0/tests/multithreaded/conn/000077500000000000000000000000001443364775400174255ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/conn/CMakeLists.txt000066400000000000000000000030401443364775400221620ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2021-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME conn BIN rpma_conn_apply_remote_peer_cfg SRCS rpma_conn_apply_remote_peer_cfg.c rpma_conn_common.c server_rpma_conn_common.c server_rpma_empty.c ../common/mtt_connect.c ../common/mtt_common-epoll.c) add_multithreaded(NAME conn BIN rpma_conn_get_private_data SRCS rpma_conn_get_private_data.c server_rpma_conn_get_private_data.c ../common/mtt_connect.c) add_multithreaded(NAME conn BIN rpma_conn_req_connect SRCS rpma_conn_req_connect.c ../common/mtt_client_process.c ../common/mtt_connect.c) add_multithreaded(NAME conn BIN rpma_conn_req_delete SRCS rpma_conn_req_delete.c) add_multithreaded(NAME conn BIN rpma_conn_req_new SRCS rpma_conn_req_new.c) add_multithreaded(NAME conn BIN rpma_read SRCS rpma_read.c rpma_conn_common.c server_rpma_conn_common.c server_rpma_empty.c ../common/mtt_connect.c ../common/mtt_common-epoll.c) add_multithreaded(NAME conn BIN rpma_send USE_LIBIBVERBS SRCS rpma_send.c rpma_conn_common.c server_rpma_conn_common.c server_rpma_send.c ../common/mtt_connect.c ../common/mtt_common-epoll.c) add_multithreaded(NAME conn BIN rpma_write SRCS rpma_write.c rpma_conn_common.c server_rpma_conn_common.c server_rpma_empty.c ../common/mtt_connect.c ../common/mtt_common-epoll.c) add_multithreaded(NAME conn BIN rpma_write_read SRCS rpma_write_read.c rpma_conn_common.c server_rpma_conn_common.c server_rpma_empty.c ../common/mtt_connect.c ../common/mtt_common-epoll.c) rpma-1.3.0/tests/multithreaded/conn/rpma_conn_apply_remote_peer_cfg.c000066400000000000000000000013351443364775400261610ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_apply_remote_peer_cfg.c -- rpma_conn_apply_remote_peer_cfg multithreaded test */ #include #include "mtt.h" #include "rpma_conn_common.h" /* * thread -- main function of rpma_conn_apply_remote_peer_cfg multithreaded test */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct thread_state *ts = (struct thread_state *)state; /* apply remote peer configuration */ int ret = rpma_conn_apply_remote_peer_cfg(ts->conn, pr->pcfg); if (ret) { MTT_ERR_MSG(result, "rpma_conn_apply_remote_peer_cfg() failed", ret); return; } } rpma-1.3.0/tests/multithreaded/conn/rpma_conn_common.c000066400000000000000000000141201443364775400231130ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_common.c -- common implementations of multi-connection MT tests */ #include #include #include "mtt.h" #include "mtt_connect.h" #include "rpma_conn_common.h" /* the client's part */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; (void) mtt_client_peer_new(tr, pr->addr, &pr->peer); /* create peer configuration */ (void) rpma_peer_cfg_new(&pr->pcfg); /* set direct write to pmem */ (void) rpma_peer_cfg_set_direct_write_to_pmem(pr->pcfg, DIRECT_WRITE_TO_PMEM); } static void thread_seq_init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; /* allocate a thread state */ struct thread_state *ts = mtt_malloc_aligned(sizeof(struct thread_state), tr); /* save the thread state */ *state_ptr = ts; if (ts == NULL) return; /* allocate a memory for local MR */ ts->mr_local_size = MAX_STR_LEN; ts->local_ptr = mtt_malloc_aligned(ts->mr_local_size, tr); if (ts->local_ptr == NULL) goto err_free; /* fill the source memory with an initial content for write tests */ memset(ts->local_ptr, 0, ts->mr_local_size); memcpy(ts->local_ptr, STRING_TO_WRITE_SEND, LEN_STRING_TO_WRITE_SEND); int ret = mtt_client_connect(tr, pr->addr, pr->port, pr->peer, &ts->conn, &ts->pdata); if (ret) goto err_free; /* register the memory */ ret = rpma_mr_reg(pr->peer, ts->local_ptr, ts->mr_local_size, RPMA_MR_USAGE_READ_DST | RPMA_MR_USAGE_WRITE_SRC, &ts->mr_local_ptr); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_reg", ret); goto err_conn_disconnect; }; /* * Create a remote memory registration structure from the received * descriptor. */ struct common_data *dst_data = ts->pdata.ptr; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &ts->mr_remote_ptr); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_remote_from_descriptor", ret); goto err_mr_dereg; }; ret = rpma_mr_remote_get_size(ts->mr_remote_ptr, &ts->mr_remote_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_remote_get_size", ret); goto err_mr_remote_delete; }; /* get the connection's main CQ */ ret = rpma_conn_get_cq(ts->conn, &ts->cq); if (ret) goto err_mr_remote_delete; return; err_mr_remote_delete: /* delete the remote memory region's structure */ (void) rpma_mr_remote_delete(&ts->mr_remote_ptr); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&ts->mr_local_ptr); err_conn_disconnect: mtt_client_err_disconnect(&ts->conn); err_free: free(ts->local_ptr); free(ts); } /* * wait_and_validate_completion -- wait for the completion to be ready and validate it */ int wait_and_validate_completion(struct rpma_cq *cq, enum ibv_wc_opcode expected_opcode, uint64_t wr_id, struct mtt_result *result) { struct ibv_wc wc; /* wait for the completion to be ready */ int ret = rpma_cq_wait(cq); if (ret) { MTT_ERR_MSG(result, "rpma_cq_wait() failed", ret); return ret; } /* get a completion of the RDMA read */ ret = rpma_cq_get_wc(cq, 1, &wc, NULL); if (ret) { MTT_ERR_MSG(result, "rpma_cq_get_wc() failed", ret); return ret; } if (wc.status != IBV_WC_SUCCESS) { MTT_ERR_MSG(result, "completion status is different from IBV_WC_SUCCESS", -1); return -1; } if (wc.opcode != expected_opcode) { MTT_ERR_MSG(result, "unexpected wc.opcode value", -1); return -1; } if (wc.wr_id != wr_id) { MTT_ERR_MSG(result, "wrong work request ID", -1); return -1; } return 0; } /* * thread_seq_fini -- deregister and free the memory region, disconnect and delete the peer object */ static void thread_seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct thread_state *ts = (struct thread_state *)(*state_ptr); enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; int ret; ret = rpma_conn_disconnect(ts->conn); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_disconnect", ret); } else { /* wait for the connection to be closed */ ret = rpma_conn_next_event(ts->conn, &conn_event); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_next_event", ret); else if (conn_event != RPMA_CONN_CLOSED) MTT_ERR_MSG(tr, "rpma_conn_next_event returned an unexpected event", -1); } ret = rpma_conn_delete(&ts->conn); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_delete", ret); /* delete the remote memory region's structure */ ret = rpma_mr_remote_delete(&ts->mr_remote_ptr); if (ret) MTT_RPMA_ERR(tr, "rpma_mr_remote_delete", ret); /* deregister the memory region */ ret = rpma_mr_dereg(&ts->mr_local_ptr); if (ret) MTT_RPMA_ERR(tr, "rpma_mr_dereg", ret); free(ts->local_ptr); free(ts); } /* * prestate_fini -- deregister and free the memory region, disconnect and delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; /* delete the peer configuration structure */ ret = rpma_peer_cfg_delete(&pr->pcfg); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_cfg_delete", ret); ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } /* the server's part */ struct server_prestate { char *addr; unsigned port; /* the expected value of the private data */ struct rpma_conn_private_data pdata; }; /* * server_main -- the main function of the server */ int server_main(char *addr, unsigned port); /* * server_func -- the server function of this test */ int server_func(void *prestate) { struct server_prestate *pst = (struct server_prestate *)prestate; return server_main(pst->addr, pst->port); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate client_prestate = {args.addr, args.port}; struct server_prestate server_prestate = {args.addr, args.port}; struct mtt_test test = { &client_prestate, prestate_init, thread_seq_init, NULL, thread, NULL, thread_seq_fini, prestate_fini, server_func, &server_prestate }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/conn/rpma_conn_common.h000066400000000000000000000033611443364775400231250ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_common.h -- common definition of multi-connection MT tests */ #ifndef MTT_RPMA_RW_COMMON #define MTT_RPMA_RW_COMMON #define DIRECT_WRITE_TO_PMEM true #define MAX(a, b) ( \ { \ __typeof__ (a) _a = (a); \ __typeof__ (b) _b = (b); \ _a > _b ? _a : _b; \ }) #define STRING_TO_READ_RECV "This string was read/received using RDMA read/receive" #define STRING_TO_WRITE_SEND "This string was written/sent using RDMA write/send" #define LEN_STRING_TO_READ_RECV (strlen(STRING_TO_READ_RECV)) #define LEN_STRING_TO_WRITE_SEND (strlen(STRING_TO_WRITE_SEND)) #define MAX_STR_LEN (MAX(LEN_STRING_TO_READ_RECV, LEN_STRING_TO_WRITE_SEND)) #define WR_ID_READ ((uint64_t)0x1111111111111111) #define WR_ID_WRITE ((uint64_t)0x2222222222222222) #define WR_ID_SEND ((uint64_t)0x3333333333333333) #define WR_ID_RECV ((uint64_t)0x4444444444444444) struct thread_state { struct rpma_conn *conn; struct rpma_cq *cq; void *local_ptr; struct rpma_mr_local *mr_local_ptr; struct rpma_mr_remote *mr_remote_ptr; size_t mr_local_size; size_t mr_remote_size; /* the expected value of the private data */ struct rpma_conn_private_data pdata; }; struct prestate { char *addr; unsigned port; struct rpma_peer *peer; struct rpma_peer_cfg *pcfg; }; /* * thread - test-specific function implemented by each MT test separately */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *result); /* * wait_and_validate_completion -- wait for the completion to be ready and validate it */ int wait_and_validate_completion(struct rpma_cq *cq, enum ibv_wc_opcode expected_opcode, uint64_t wr_id, struct mtt_result *result); #endif /* MTT_RPMA_RW_COMMON */ rpma-1.3.0/tests/multithreaded/conn/rpma_conn_get_private_data.c000066400000000000000000000055031443364775400251320ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_get_private_data.c -- rpma_conn_get_private_data multithreaded test */ #include #include #include "mtt.h" #include "mtt_connect.h" /* the client's part */ struct prestate { char *addr; unsigned port; struct rpma_peer *peer; struct rpma_conn *conn; /* the expected value of the private data */ struct rpma_conn_private_data pdata; }; /* * prestate_init -- connect with the server, get the private data * and save it in order to verify it later */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; if (mtt_client_peer_new(tr, pr->addr, &pr->peer)) return; if (mtt_client_connect(tr, pr->addr, pr->port, pr->peer, &pr->conn, &pr->pdata)) mtt_client_peer_delete(tr, &pr->peer); } /* * thread -- get and verify the private data */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct rpma_conn_private_data pdata; /* get a connection's private data */ int ret = rpma_conn_get_private_data(pr->conn, &pdata); if (ret) { MTT_RPMA_ERR(result, "rpma_conn_get_private_data", ret); return; } else if (pdata.ptr == NULL) { MTT_ERR_MSG(result, "The server has not provided the connection's private data", -1); return; } /* verify the length of the received private data */ if (pdata.len != pr->pdata.len) { MTT_ERR_MSG(result, "Wrong length of the private data", -1); return; } /* verify the content of the received private data */ if (memcmp(pdata.ptr, pr->pdata.ptr, pdata.len) != 0) MTT_ERR_MSG(result, "Wrong content of the private data", -1); } /* * prestate_fini -- disconnect and delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; mtt_client_disconnect(tr, &pr->conn); mtt_client_peer_delete(tr, &pr->peer); } /* the server's part */ struct server_prestate { char *addr; unsigned port; }; /* * server_main -- the main function of the server */ int server_main(char *addr, unsigned port); /* * server_func -- the server function of this test */ int server_func(void *prestate) { struct server_prestate *pst = prestate; return server_main(pst->addr, pst->port); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate client_prestate = {args.addr, args.port}; struct server_prestate server_prestate = {args.addr, args.port}; struct mtt_test test = { &client_prestate, prestate_init, NULL, NULL, thread, NULL, NULL, prestate_fini, server_func, &server_prestate }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/conn/rpma_conn_req_connect.c000066400000000000000000000062411443364775400241300ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_req_connect.c -- rpma_conn_req_connect multithreaded test */ #include #include #include "mtt.h" #include "mtt_connect.h" /* the server's part */ struct prestate { char *addr; unsigned port; struct rpma_peer *peer; struct rpma_ep *ep; }; struct state { struct rpma_conn_req *req; struct rpma_conn *conn; }; /* * prestate_init -- listen to the clients */ static void prestate_init(void *prestate, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; int ret = mtt_server_listen(pr->addr, pr->port, &pr->peer, &pr->ep); if (ret) MTT_RPMA_ERR(result, "mtt_server_listen", ret); } /* * seq_init -- allocate a state for the thread */ static void seq_init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(result, "calloc", errno); return; } *state_ptr = st; /* receive an incoming connection request */ int ret = rpma_ep_next_conn_req(pr->ep, NULL, &st->req); if (ret) MTT_RPMA_ERR(result, "rpma_ep_next_conn_req", ret); } /* * thread -- run rpma_conn_req_connect() */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct state *st = (struct state *)state; /* * connect / accept the connection request and obtain the connection object */ int ret = rpma_conn_req_connect(&st->req, NULL, &st->conn); if (ret) { MTT_RPMA_ERR(result, "rpma_conn_req_connect", ret); (void) rpma_conn_req_delete(&st->req); } } /* * seq_fini -- delete the connection */ static void seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *result) { struct state *st = (struct state *)*state_ptr; int ret = rpma_conn_delete(&st->conn); if (ret) MTT_RPMA_ERR(result, "rpma_conn_delete", ret); free(st); *state_ptr = NULL; } /* * prestate_fini -- delete the endpoint and the peer object */ static void prestate_fini(void *prestate, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; mtt_server_shutdown(&pr->peer, &pr->ep); } /* the client's part */ struct client_prestate { char *addr; unsigned port; unsigned threads_num; }; /* * client_main -- the main function of the client */ int client_main(char *addr, unsigned port, unsigned threads_num); /* * client_func -- the client function of this test */ int client_func(void *prestate) { struct client_prestate *pst = prestate; return client_main(pst->addr, pst->port, pst->threads_num); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate server_prestate = {args.addr, args.port}; struct client_prestate client_prestate = {args.addr, args.port, args.threads_num}; struct mtt_test test = { &server_prestate, prestate_init, seq_init, NULL, thread, NULL, seq_fini, prestate_fini, client_func, &client_prestate, MTT_START_CHILD_BEFORE_THREAD_SEQ_INIT_FUNC }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/conn/rpma_conn_req_delete.c000066400000000000000000000065731443364775400237510ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_req_delete.c -- rpma_conn_req_delete multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct rpma_peer *peer; struct rpma_conn_cfg *cfg; }; struct state { struct rpma_conn_req *req; }; /* * prestate_init -- obtain an ibv_context for a remote IP address and create a new peer object */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct ibv_context *ibv_ctx; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(ibv_ctx, &pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_new", ret); } /* * seq_init -- allocate a state and create a connection request */ void seq_init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } MTT_PORT_INIT; MTT_PORT_SET(pr->port, id); int ret = rpma_conn_req_new(pr->peer, pr->addr, MTT_PORT_STR, pr->cfg, &st->req); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_req_new", ret); *state_ptr = st; } /* * thread -- delete a connection request */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct state *st = (struct state *)state; int ret = rpma_conn_req_delete(&st->req); if (ret) MTT_RPMA_ERR(result, "rpma_conn_req_delete", ret); } /* * seq_fini -- free the state */ static void seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; free(st); } /* * prestate_fini -- delete the peer object and the connection configuration object if it's not NULL */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); if (pr->cfg != NULL && (ret = rpma_conn_cfg_delete(&pr->cfg))) MTT_RPMA_ERR(tr, "rpma_conn_cfg_delete", ret); } /* * prestate_init_with_conn_cfg -- obtain an ibv_context for a remote IP address, * create a new peer object and a new connection configuration object */ static void prestate_init_with_conn_cfg(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; prestate_init(prestate, tr); if (tr->ret) return; int ret = rpma_conn_cfg_new(&pr->cfg); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_new", ret); prestate_fini(prestate, tr); } } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL, NULL}; struct mtt_test test = { &prestate, prestate_init, seq_init, NULL, thread, NULL, seq_fini, prestate_fini }; struct mtt_test test_with_conn_cfg = { &prestate, prestate_init_with_conn_cfg, seq_init, NULL, thread, NULL, seq_fini, prestate_fini }; int ret = mtt_run(&test, args.threads_num); if (ret) return ret; return mtt_run(&test_with_conn_cfg, args.threads_num); } rpma-1.3.0/tests/multithreaded/conn/rpma_conn_req_new.c000066400000000000000000000066011443364775400232700ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_req_new.c -- rpma_conn_req_new multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct rpma_peer *peer; struct rpma_conn_cfg *cfg; }; struct state { struct rpma_conn_req *req; }; /* * prestate_init -- obtain an ibv_context for a remote IP address and create a new peer object */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct ibv_context *ibv_ctx; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(ibv_ctx, &pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_new", ret); } /* * init -- allocate state */ void init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } *state_ptr = st; } /* * thread -- create a connection request based on shared peer object */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)state; MTT_PORT_INIT; MTT_PORT_SET(pr->port, id); int ret = rpma_conn_req_new(pr->peer, pr->addr, MTT_PORT_STR, pr->cfg, &st->req); if (ret) MTT_RPMA_ERR(result, "rpma_conn_req_new", ret); } /* * fini -- delete rpma_conn_req and free the state */ static void fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; int ret; if (st->req && (ret = rpma_conn_req_delete(&st->req))) MTT_RPMA_ERR(tr, "rpma_conn_req_delete", ret); free(st); } /* * prestate_fini -- delete the peer object and the connection configuration object if it's not NULL */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); if (pr->cfg != NULL && (ret = rpma_conn_cfg_delete(&pr->cfg))) MTT_RPMA_ERR(tr, "rpma_conn_cfg_delete", ret); } /* * prestate_init_with_conn_cfg -- obtain an ibv_context for a remote IP address, * create a new peer object and a new connection configuration object */ static void prestate_init_with_conn_cfg(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; prestate_init(prestate, tr); if (tr->ret) return; int ret = rpma_conn_cfg_new(&pr->cfg); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_new", ret); (void) rpma_peer_delete(&pr->peer); } } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL, NULL}; struct mtt_test test = { &prestate, prestate_init, NULL, init, thread, fini, NULL, prestate_fini }; struct mtt_test test_with_conn_cfg = { &prestate, prestate_init_with_conn_cfg, NULL, init, thread, fini, NULL, prestate_fini }; int ret = mtt_run(&test, args.threads_num); if (ret) return ret; return mtt_run(&test_with_conn_cfg, args.threads_num); } rpma-1.3.0/tests/multithreaded/conn/rpma_read.c000066400000000000000000000017661443364775400215350ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_read.c -- rpma_read multithreaded test */ #include #include "mtt.h" #include "rpma_conn_common.h" /* * thread -- main function of rpma_read multithreaded test (read and verify the data) */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct thread_state *ts = (struct thread_state *)state; /* zero the destination memory */ memset(ts->local_ptr, 0, ts->mr_local_size); /* post an RDMA read operation */ int ret = rpma_read(ts->conn, ts->mr_local_ptr, 0, ts->mr_remote_ptr, 0, ts->mr_remote_size, RPMA_F_COMPLETION_ALWAYS, (void *)WR_ID_READ); if (ret) { MTT_ERR_MSG(result, "rpma_read() failed", ret); return; } ret = wait_and_validate_completion(ts->cq, IBV_WC_RDMA_READ, WR_ID_READ, result); if (ret) return; if (strncmp(ts->local_ptr, STRING_TO_READ_RECV, LEN_STRING_TO_READ_RECV) != 0) MTT_ERR_MSG(result, "read string mismatch", -1); } rpma-1.3.0/tests/multithreaded/conn/rpma_send.c000066400000000000000000000013301443364775400215360ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_send.c -- rpma_send multithreaded test */ #include #include "mtt.h" #include "rpma_conn_common.h" /* * thread -- main function of rpma_send multithreaded test */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct thread_state *ts = (struct thread_state *)state; /* post an RDMA send operation */ int ret = rpma_send(ts->conn, ts->mr_local_ptr, 0, ts->mr_remote_size, RPMA_F_COMPLETION_ALWAYS, (void *)WR_ID_SEND); if (ret) { MTT_ERR_MSG(result, "rpma_send() failed", ret); return; } (void) wait_and_validate_completion(ts->cq, IBV_WC_SEND, WR_ID_SEND, result); } rpma-1.3.0/tests/multithreaded/conn/rpma_write.c000066400000000000000000000013731443364775400217460ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_write.c -- rpma_write multithreaded test */ #include #include "mtt.h" #include "rpma_conn_common.h" /* * thread -- main function of rpma_write multithreaded test */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct thread_state *ts = (struct thread_state *)state; /* post an RDMA write operation */ int ret = rpma_write(ts->conn, ts->mr_remote_ptr, 0, ts->mr_local_ptr, 0, ts->mr_remote_size, RPMA_F_COMPLETION_ALWAYS, (void *)WR_ID_WRITE); if (ret) { MTT_ERR_MSG(result, "rpma_write() failed", ret); return; } (void) wait_and_validate_completion(ts->cq, IBV_WC_RDMA_WRITE, WR_ID_WRITE, result); } rpma-1.3.0/tests/multithreaded/conn/rpma_write_read.c000066400000000000000000000027201443364775400227360ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_write_read.c -- rpma_write with rpma_read verification multithreaded test */ #include #include "mtt.h" #include "rpma_conn_common.h" /* * thread -- main function of rpma_write with rpma_read verification MT test * (write and verify the data using rpma_read) */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct thread_state *ts = (struct thread_state *)state; /* post an RDMA write operation */ int ret = rpma_write(ts->conn, ts->mr_remote_ptr, 0, ts->mr_local_ptr, 0, ts->mr_remote_size, RPMA_F_COMPLETION_ALWAYS, (void *)WR_ID_WRITE); if (ret) { MTT_ERR_MSG(result, "rpma_write() failed", ret); return; } ret = wait_and_validate_completion(ts->cq, IBV_WC_RDMA_WRITE, WR_ID_WRITE, result); if (ret) return; /* zero the local memory as the destination for reading */ memset(ts->local_ptr, 0, ts->mr_local_size); /* post an RDMA read operation */ ret = rpma_read(ts->conn, ts->mr_local_ptr, 0, ts->mr_remote_ptr, 0, ts->mr_remote_size, RPMA_F_COMPLETION_ALWAYS, (void *)WR_ID_READ); if (ret) { MTT_ERR_MSG(result, "rpma_read() failed", ret); return; } ret = wait_and_validate_completion(ts->cq, IBV_WC_RDMA_READ, WR_ID_READ, result); if (ret) return; if (strncmp(ts->local_ptr, STRING_TO_WRITE_SEND, LEN_STRING_TO_WRITE_SEND) != 0) MTT_ERR_MSG(result, "write string mismatch", -1); } rpma-1.3.0/tests/multithreaded/conn/server_rpma_conn_common.c000066400000000000000000000163011443364775400245040ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * server_rpma_conn_common.c -- implementation of common server of multi-connection MT tests */ #include #include #include #include #include #include #include "mtt.h" #include "mtt_connect.h" #include "mtt_common-epoll.h" #include "rpma_conn_common.h" #include "server_rpma_conn_common.h" /* * server_init -- initialize server's resources */ int server_init(struct server_res *svr, struct rpma_peer *peer) { int ret = 0; svr->epoll = epoll_create1(EPOLL_CLOEXEC); if (svr->epoll == -1) return errno; /* allocate a memory */ svr->mr_local_size = MAX_STR_LEN; svr->local_ptr = mtt_malloc_aligned(svr->mr_local_size, NULL); if (svr->local_ptr == NULL) { ret = -1; goto err_close; } /* fill the source memory with an initial content for read tests */ memset(svr->local_ptr, 0, svr->mr_local_size); memcpy(svr->local_ptr, STRING_TO_READ_RECV, LEN_STRING_TO_READ_RECV); /* register the memory */ ret = rpma_mr_reg(peer, svr->local_ptr, svr->mr_local_size, RPMA_MR_USAGE_READ_SRC | RPMA_MR_USAGE_WRITE_DST, &svr->mr_local_ptr); if (ret) goto err_free; /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(svr->mr_local_ptr, &mr_desc_size); if (ret) goto err_mr_dereg; svr->data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(svr->mr_local_ptr, &svr->data.descriptors[0]); if (ret) goto err_mr_dereg; svr->pdata.ptr = &svr->data; svr->pdata.len = sizeof(struct common_data); return 0; err_mr_dereg: (void) rpma_mr_dereg(&svr->mr_local_ptr); err_free: free(svr->local_ptr); err_close: close(svr->epoll); return ret; } /* * server_fini -- release server's resources */ void server_fini(struct server_res *svr) { /* deregister the memory region */ (void) rpma_mr_dereg(&svr->mr_local_ptr); /* free the memory */ free(svr->local_ptr); /* close the epoll */ (void) close(svr->epoll); } /* * server_find_first_free_client_slot -- find a slot for the incoming client */ struct client_res * server_find_first_free_client_slot(struct server_res *svr) { /* find the first free slot */ struct client_res *clnt = NULL; for (int i = 0; i < CLIENT_MAX; ++i) { clnt = &svr->clients[i]; if (clnt->conn != NULL) continue; clnt->client_id = i; clnt->svr = svr; clnt->ev_conn_cmpl.fd = -1; clnt->ev_conn_event.fd = -1; break; } return clnt; } /* * server_add_client_fd_to_epoll -- add all client's file descriptors to epoll */ int server_add_client_fd_to_epoll(struct client_res *clnt, int epoll) { /* get the connection's event fd and add it to epoll */ int fd; int ret = rpma_conn_get_event_fd(clnt->conn, &fd); if (ret) return ret; ret = epoll_add(epoll, fd, clnt, client_connection_event_handle, &clnt->ev_conn_event); if (ret) return ret; /* get the connection's completion fd and add it to epoll */ ret = rpma_cq_get_fd(clnt->cq, &fd); if (ret) { epoll_delete(epoll, &clnt->ev_conn_event); return ret; } ret = epoll_add(epoll, fd, clnt, client_completion_event_handle, &clnt->ev_conn_cmpl); if (ret) epoll_delete(epoll, &clnt->ev_conn_event); return ret; } /* * server_delete_client -- release client's resources */ void server_delete_client(struct client_res *clnt) { struct server_res *svr = clnt->svr; if (clnt->ev_conn_cmpl.fd != -1) epoll_delete(svr->epoll, &clnt->ev_conn_cmpl); if (clnt->ev_conn_event.fd != -1) epoll_delete(svr->epoll, &clnt->ev_conn_event); /* delete the connection and set conn to NULL */ (void) rpma_conn_delete(&clnt->conn); } /* * client_connection_event_handle -- callback on connection's next event */ void client_connection_event_handle(struct custom_event *ce) { struct client_res *clnt = (struct client_res *)ce->arg; /* get next connection's event */ enum rpma_conn_event event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(clnt->conn, &event); if (ret) { if (ret == RPMA_E_NO_EVENT) return; (void) rpma_conn_disconnect(clnt->conn); return; } /* proceed to the callback specific to the received event */ switch (event) { case RPMA_CONN_ESTABLISHED: client_is_ready_handle(clnt); break; case RPMA_CONN_CLOSED: default: server_delete_client(clnt); break; } } /* * server_incoming_connection_handle -- callback on endpoint's next incoming * connection * * Get the connection request. If there is not free slots reject it. Otherwise, * accept the incoming connection, get the event and completion file * descriptors, set O_NONBLOCK flag for both of them and add events to * the epoll. * If error will occur at any of the required steps the client is disconnected. */ void server_incoming_connection_handle(struct custom_event *ce) { struct server_res *svr = (struct server_res *)ce->arg; /* receive an incoming connection request */ struct rpma_conn_req *req = NULL; if (rpma_ep_next_conn_req(svr->ep, NULL, &req)) return; /* if no free slot is available */ struct client_res *clnt = NULL; if ((clnt = server_find_first_free_client_slot(svr)) == NULL) { rpma_conn_req_delete(&req); return; } /* accept the connection request and obtain the connection object */ if (rpma_conn_req_connect(&req, &svr->pdata, &clnt->conn)) { (void) rpma_conn_req_delete(&req); /* * When rpma_conn_req_connect() fails the connection pointer * remains unchanged (in this case it is NULL) so the server * would choose the same client slot if another client will * come. No additional cleanup needed. */ return; } /* get the connection's main CQ */ if (rpma_conn_get_cq(clnt->conn, &clnt->cq)) { /* an error occurred - disconnect */ (void) rpma_conn_disconnect(clnt->conn); return; } if (server_add_client_fd_to_epoll(clnt, svr->epoll)) (void) rpma_conn_disconnect(clnt->conn); } int server_main(char *addr, unsigned port) { int ret; /* RPMA resources - general */ struct rpma_peer *peer = NULL; /* server resource */ struct server_res svr = {0}; ret = mtt_server_listen(addr, port, &peer, &svr.ep); if (ret) return ret; /* initialize the server's structure */ ret = server_init(&svr, peer); if (ret) goto err_server_shutdown; /* get the endpoint's event file descriptor and add it to epoll */ int ep_fd; ret = rpma_ep_get_fd(svr.ep, &ep_fd); if (ret) goto err_server_fini; ret = epoll_add(svr.epoll, ep_fd, &svr, server_incoming_connection_handle, &svr.ev_incoming); if (ret) goto err_server_fini; /* process epoll's events */ struct epoll_event event = {0}; struct custom_event *ce; while ((ret = epoll_wait(svr.epoll, &event, 1 /* # of events */, TIMEOUT_15S)) == 1) { ce = (struct custom_event *)event.data.ptr; ce->func(ce); } /* disconnect all remaining client's */ for (int i = 0; i < CLIENT_MAX; ++i) { if (svr.clients[i].conn == NULL) continue; (void) rpma_conn_disconnect(svr.clients[i].conn); (void) rpma_conn_delete(&svr.clients[i].conn); } if (ret == 0) SERVER_ERR_MSG("Server timed out"); err_server_fini: /* release the server's resources */ server_fini(&svr); err_server_shutdown: mtt_server_shutdown(&peer, &svr.ep); return ret; } rpma-1.3.0/tests/multithreaded/conn/server_rpma_conn_common.h000066400000000000000000000027501443364775400245140ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * server_rpma_conn_common.h -- definitions of common server of multi-connection MT tests */ #include #include #include #include #include #include #include "mtt.h" #include "mtt_connect.h" #include "mtt_common-epoll.h" #include "rpma_conn_common.h" #define CLIENT_MAX 32 struct client_res { /* RPMA resources */ struct rpma_conn *conn; struct rpma_cq *cq; /* events */ struct custom_event ev_conn_event; struct custom_event ev_conn_cmpl; /* parent and identifier */ struct server_res *svr; int client_id; }; struct server_res { /* RPMA resources */ struct rpma_ep *ep; /* resources - memory region */ void *local_ptr; struct rpma_mr_local *mr_local_ptr; size_t mr_local_size; struct common_data data; struct rpma_conn_private_data pdata; /* epoll and event */ int epoll; struct custom_event ev_incoming; /* client's resources */ struct client_res clients[CLIENT_MAX]; }; /* * client_completion_event_handle -- callback on completion is ready (test-specific) */ void client_completion_event_handle(struct custom_event *ce); /* * client_connection_event_handle -- callback on connection's next event */ void client_connection_event_handle(struct custom_event *ce); /* * client_is_ready_handle -- callback on connection is established (test-specific) */ void client_is_ready_handle(struct client_res *clnt); rpma-1.3.0/tests/multithreaded/conn/server_rpma_conn_get_private_data.c000066400000000000000000000020141443364775400265120ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * server.c -- a server of the rpma_conn_get_private_data MT test */ #include #include "mtt.h" #include "mtt_connect.h" const char data[] = "Hello client!"; int server_main(char *addr, unsigned port) { struct rpma_peer *peer = NULL; struct rpma_ep *ep = NULL; struct rpma_conn *conn = NULL; int ret; ret = mtt_server_listen(addr, port, &peer, &ep); if (ret) return ret; struct rpma_conn_private_data pdata; pdata.ptr = (void *)data; pdata.len = sizeof(data); /* * Wait for an incoming connection request, accept it and wait for its * establishment. */ ret = mtt_server_accept_connection(ep, &pdata, &conn); if (ret) goto err_shutdown; /* * Wait for RPMA_CONN_CLOSED, disconnect and delete the connection * structure. */ mtt_server_wait_for_conn_close_and_disconnect(&conn); err_shutdown: /* shutdown the endpoint and delete the peer object */ mtt_server_shutdown(&peer, &ep); return ret; } rpma-1.3.0/tests/multithreaded/conn/server_rpma_empty.c000066400000000000000000000011311443364775400233300ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * server_rpma_empty.c -- empty implementation of test-specific functions for common server * of multi-connection MT tests */ #include "server_rpma_conn_common.h" /* * client_completion_event_handle -- empty implementation of callback on completion is ready */ void client_completion_event_handle(struct custom_event *ce) { } /* * client_is_ready_handle -- empty implementation of callback on connection is established */ void client_is_ready_handle(struct client_res *clnt) { } rpma-1.3.0/tests/multithreaded/conn/server_rpma_send.c000066400000000000000000000040751443364775400231350ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * server_rpma_send.c -- implementation of rpma_send-specific functions for common server * of multi-connection MT tests */ #include "server_rpma_conn_common.h" /* * client_completion_event_handle -- callback on completion is ready */ void client_completion_event_handle(struct custom_event *ce) { struct client_res *clnt = (struct client_res *)ce->arg; const struct server_res *svr = clnt->svr; /* wait for the completion to be ready */ int ret = rpma_cq_wait(clnt->cq); if (ret) goto err_cq_wait_and_get_wc; /* get next completion */ struct ibv_wc wc; ret = rpma_cq_get_wc(clnt->cq, 1, &wc, NULL); if (ret) goto err_cq_wait_and_get_wc; /* validate received completion */ if (wc.status != IBV_WC_SUCCESS) { SERVER_ERR_MSG("completion status is different from IBV_WC_SUCCESS"); SERVER_ERR_MSG(ibv_wc_status_str(wc.status)); goto err_disconnect; } if (wc.opcode != IBV_WC_RECV) { SERVER_ERR_MSG("received unexpected wc.opcode value"); goto err_disconnect; } if (wc.wr_id != WR_ID_RECV) { SERVER_ERR_MSG("wrong work request ID"); goto err_disconnect; } if (memcmp(svr->local_ptr, STRING_TO_WRITE_SEND, LEN_STRING_TO_WRITE_SEND) != 0) SERVER_ERR_MSG("sent data mismatch"); /* post next RDMA recv operation */ ret = rpma_recv(clnt->conn, svr->mr_local_ptr, 0, svr->mr_local_size, (void *)WR_ID_RECV); if (ret) SERVER_RPMA_ERR("rpma_recv() failed", ret); return; err_cq_wait_and_get_wc: /* no completion is ready - continue */ if (ret == RPMA_E_NO_COMPLETION) return; err_disconnect: /* another error occurred - disconnect */ (void) rpma_conn_disconnect(clnt->conn); } /* * client_is_ready_handle -- callback on connection is established */ void client_is_ready_handle(struct client_res *clnt) { const struct server_res *svr = clnt->svr; /* post an RDMA recv operation */ int ret = rpma_recv(clnt->conn, svr->mr_local_ptr, 0, svr->mr_local_size, (void *)WR_ID_RECV); if (ret) SERVER_RPMA_ERR("rpma_recv() failed", ret); } rpma-1.3.0/tests/multithreaded/conn_cfg/000077500000000000000000000000001443364775400202445ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/conn_cfg/CMakeLists.txt000066400000000000000000000037651443364775400230170ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME conn_cfg BIN get_cq_size SRCS rpma_conn_cfg_get_cq_size.c rpma_conn_cfg_common.c rpma_conn_cfg_common_get.c) add_multithreaded(NAME conn_cfg BIN get_compl_channel SRCS rpma_conn_cfg_get_compl_channel.c rpma_conn_cfg_common.c rpma_conn_cfg_common_get.c) add_multithreaded(NAME conn_cfg BIN get_rq_size SRCS rpma_conn_cfg_get_rq_size.c rpma_conn_cfg_common.c rpma_conn_cfg_common_get.c) add_multithreaded(NAME conn_cfg BIN get_sq_size SRCS rpma_conn_cfg_get_sq_size.c rpma_conn_cfg_common.c rpma_conn_cfg_common_get.c) add_multithreaded(NAME conn_cfg BIN get_srq SRCS rpma_conn_cfg_get_srq.c rpma_conn_cfg_common.c rpma_conn_cfg_common_get.c) add_multithreaded(NAME conn_cfg BIN get_rcq_size SRCS rpma_conn_cfg_get_rcq_size.c rpma_conn_cfg_common.c rpma_conn_cfg_common_get.c) add_multithreaded(NAME conn_cfg BIN get_timeout SRCS rpma_conn_cfg_get_timeout.c rpma_conn_cfg_common.c rpma_conn_cfg_common_get.c) add_multithreaded(NAME conn_cfg BIN new SRCS rpma_conn_cfg_new.c) add_multithreaded(NAME conn_cfg BIN set_compl_channel SRCS rpma_conn_cfg_set_compl_channel.c rpma_conn_cfg_common.c rpma_conn_cfg_common_set.c) add_multithreaded(NAME conn_cfg BIN set_cq_size SRCS rpma_conn_cfg_set_cq_size.c rpma_conn_cfg_common.c rpma_conn_cfg_common_set.c) add_multithreaded(NAME conn_cfg BIN set_rq_size SRCS rpma_conn_cfg_set_rq_size.c rpma_conn_cfg_common.c rpma_conn_cfg_common_set.c) add_multithreaded(NAME conn_cfg BIN set_sq_size SRCS rpma_conn_cfg_set_sq_size.c rpma_conn_cfg_common.c rpma_conn_cfg_common_set.c) add_multithreaded(NAME conn_cfg BIN set_srq SRCS rpma_conn_cfg_set_srq.c rpma_conn_cfg_common.c rpma_conn_cfg_common_set.c) add_multithreaded(NAME conn_cfg BIN set_rcq_size SRCS rpma_conn_cfg_set_rcq_size.c rpma_conn_cfg_common.c rpma_conn_cfg_common_set.c) add_multithreaded(NAME conn_cfg BIN set_timeout SRCS rpma_conn_cfg_set_timeout.c rpma_conn_cfg_common.c) rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_common.c000066400000000000000000000042271443364775400245600ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_common.c -- common part for rpma_conn_cfg_* multithreaded tests */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * rpma_conn_cfg_common_prestate_init -- create a new connection * configuration object, set all queue sizes and timeout value */ void rpma_conn_cfg_common_prestate_init(void *prestate, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; int ret; ret = rpma_conn_cfg_new(&pr->cfg_ptr); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_new", ret); return; } ret = rpma_conn_cfg_set_compl_channel(pr->cfg_ptr, RPMA_CONN_CFG_COMMON_IS_SHARED); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_compl_channel", ret); return; } ret = rpma_conn_cfg_set_cq_size(pr->cfg_ptr, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_cq_size", ret); return; } ret = rpma_conn_cfg_set_sq_size(pr->cfg_ptr, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_sq_size", ret); return; } ret = rpma_conn_cfg_set_rcq_size(pr->cfg_ptr, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_rcq_size", ret); return; } ret = rpma_conn_cfg_set_rq_size(pr->cfg_ptr, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_rq_size", ret); return; } ret = rpma_conn_cfg_set_timeout(pr->cfg_ptr, RPMA_CONN_CFG_COMMON_TIMEOUT_MS_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_timeout", ret); return; } ret = rpma_conn_cfg_set_srq(pr->cfg_ptr, RPMA_SRQ_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_srq", ret); return; } } /* * rpma_conn_cfg_common_prestate_fini -- free the connection configuration * object */ void rpma_conn_cfg_common_prestate_fini(void *prestate, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; int ret; ret = rpma_conn_cfg_delete(&pr->cfg_ptr); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_cfg_delete", ret); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_common.h000066400000000000000000000016271443364775400245660ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_common.h -- common definition for rpma_conn_cfg_* * multithreaded tests */ #ifndef MTT_RPMA_CONN_CFG_COMMON #define MTT_RPMA_CONN_CFG_COMMON /* the expected queue size */ #define RPMA_CONN_CFG_COMMON_Q_SIZE_EXP 20 /* the expected timeout */ #define RPMA_CONN_CFG_COMMON_TIMEOUT_MS_EXP 2000 /* the expected completion channel state */ #define RPMA_CONN_CFG_COMMON_IS_SHARED true /* the expected SRQ object */ #define RPMA_SRQ_EXP (struct rpma_srq *)0xCD12 struct rpma_conn_cfg_common_prestate { struct rpma_conn_cfg *cfg_ptr; }; void rpma_conn_cfg_common_prestate_init(void *prestate, struct mtt_result *tr); void rpma_conn_cfg_common_prestate_fini(void *prestate, struct mtt_result *tr); struct rpma_conn_cfg_common_state { struct rpma_conn_cfg *cfg_ptr; }; #endif /* MTT_RPMA_CONN_CFG_COMMON */ rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_common_get.c000066400000000000000000000012671443364775400254200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_cfg_common_get.c -- common main function for all conn_cfg mt get tests */ #include "mtt.h" #include "rpma_conn_cfg_common.h" void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr); int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct rpma_conn_cfg_common_prestate prestate = {NULL}; struct mtt_test test = { &prestate, rpma_conn_cfg_common_prestate_init, NULL, NULL, thread, NULL, NULL, rpma_conn_cfg_common_prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_common_set.c000066400000000000000000000012671443364775400254340ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_cfg_common_set.c -- common main function for all conn_cfg set mt tests */ #include "mtt.h" #include "rpma_conn_cfg_common.h" void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr); int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct rpma_conn_cfg_common_prestate prestate = {NULL}; struct mtt_test test = { &prestate, rpma_conn_cfg_common_prestate_init, NULL, NULL, thread, NULL, NULL, rpma_conn_cfg_common_prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_get_compl_channel.c000066400000000000000000000015771443364775400267360ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_cfg_get_compl_channel.c -- rpma_conn_cfg_get_compl_channel multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- get connection configured completion channel and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; int ret; bool shared; ret = rpma_conn_cfg_get_compl_channel(pr->cfg_ptr, &shared); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_compl_channel", ret); return; } if (shared != RPMA_CONN_CFG_COMMON_IS_SHARED) MTT_ERR_MSG(tr, "Invalid completion channel's value: %d instead of %d", -1, shared, RPMA_CONN_CFG_COMMON_IS_SHARED); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_get_cq_size.c000066400000000000000000000014701443364775400255610ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_get_cq_size.c -- rpma_conn_cfg_get_cq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- get connection configured cq size and check if its value is * as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; uint32_t cq_size; int ret; ret = rpma_conn_cfg_get_cq_size(pr->cfg_ptr, &cq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_cq_size", ret); return; } if (cq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP) MTT_ERR(tr, "cq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP", EINVAL); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_get_rcq_size.c000066400000000000000000000014701443364775400257430ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_cfg_get_rcq_size.c -- rpma_conn_cfg_get_rcq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- get connection configured rcq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; uint32_t rcq_size; int ret; ret = rpma_conn_cfg_get_rcq_size(pr->cfg_ptr, &rcq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_rcq_size", ret); return; } if (rcq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP) MTT_ERR(tr, "rcq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP", EINVAL); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_get_rq_size.c000066400000000000000000000014641443364775400256030ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_get_rq_size.c -- rpma_conn_cfg_get_rq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- get connection configured rq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; uint32_t rq_size; int ret; ret = rpma_conn_cfg_get_rq_size(pr->cfg_ptr, &rq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_rq_size", ret); return; } if (rq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP) MTT_ERR(tr, "rq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP", EINVAL); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_get_sq_size.c000066400000000000000000000014661443364775400256060ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_get_sq_size.c -- rpma_conn_cfg_get_sq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- get connection configured sq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; uint32_t sq_size; int ret; ret = rpma_conn_cfg_get_sq_size(pr->cfg_ptr, &sq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_sq_size", ret); return; } if (sq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP) MTT_ERR(tr, "sq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP", EINVAL); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_get_srq.c000066400000000000000000000013541443364775400247320ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_cfg_get_srq.c -- rpma_conn_cfg_get_srq multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- get the shared RQ object and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; int ret; struct rpma_srq *srq; ret = rpma_conn_cfg_get_srq(pr->cfg_ptr, &srq); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_srq", ret); return; } if (srq != RPMA_SRQ_EXP) MTT_ERR_MSG(tr, "Invalid shared RQ value", -1); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_get_timeout.c000066400000000000000000000014761443364775400256200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_get_timeout.c -- rpma_conn_cfg_get_timeout multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- get connection configured timeout and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; int ret, timeout_ms; ret = rpma_conn_cfg_get_timeout(pr->cfg_ptr, &timeout_ms); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_timeout", ret); return; } if (timeout_ms != RPMA_CONN_CFG_COMMON_TIMEOUT_MS_EXP) MTT_ERR(tr, "timeout_ms != RPMA_CONN_CFG_COMMON_TIMEOUT_MS_EXP", EINVAL); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_new.c000066400000000000000000000065011443364775400240560ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_new.c -- rpma_conn_cfg_new multithreaded test */ #include #include #include "mtt.h" struct prestate { struct rpma_conn_cfg *cfg_exp; }; struct state { struct rpma_conn_cfg *cfg; }; struct conn_cfg_values { int timeout_ms; uint32_t cq_size; uint32_t sq_size; uint32_t rq_size; }; static int conn_cfg_get_all(struct rpma_conn_cfg *cfg, struct conn_cfg_values *vals, struct mtt_result *tr) { int ret; ret = rpma_conn_cfg_get_timeout(cfg, &vals->timeout_ms); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_timeout", ret); return -1; } ret = rpma_conn_cfg_get_cq_size(cfg, &vals->cq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_cq_size", ret); return -1; } ret = rpma_conn_cfg_get_sq_size(cfg, &vals->sq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_sq_size", ret); return -1; } ret = rpma_conn_cfg_get_rq_size(cfg, &vals->rq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_rq_size", ret); return -1; } return 0; } /* * prestate_init -- create a new connection configuration object */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_conn_cfg_new(&pr->cfg_exp); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_cfg_new", ret); } /* * init -- allocate state */ void init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } *state_ptr = st; } /* * thread -- create a new connection configuration object and check its default values */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)state; struct conn_cfg_values cfg_vals; struct conn_cfg_values cfg_exp_vals; int ret; ret = rpma_conn_cfg_new(&st->cfg); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_new", ret); return; } if (conn_cfg_get_all(st->cfg, &cfg_vals, tr)) return; if (conn_cfg_get_all(pr->cfg_exp, &cfg_exp_vals, tr)) return; ret = memcmp(&cfg_vals, &cfg_exp_vals, sizeof(struct conn_cfg_values)); if (ret) MTT_ERR(tr, "cfg_vals != cfg_exp_vals", EINVAL); } /* * fini -- free the connection configuration object and free the state */ static void fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; int ret; ret = rpma_conn_cfg_delete(&st->cfg); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_cfg_delete", ret); free(st); *state_ptr = NULL; } /* * prestate_fini -- free the connection configuration object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_conn_cfg_delete(&pr->cfg_exp); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_cfg_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {NULL}; struct mtt_test test = { &prestate, prestate_init, NULL, init, thread, fini, NULL, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_set_compl_channel.c000066400000000000000000000020341443364775400267370ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_cfg_set_compl_channel.c -- rpma_conn_cfg_set_compl_channel multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- set connection establishment completion channel and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; int ret; bool shared = true; ret = rpma_conn_cfg_set_compl_channel(pr->cfg_ptr, shared); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_compl_channel", ret); return; } ret = rpma_conn_cfg_get_compl_channel(pr->cfg_ptr, &shared); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_compl_channel", ret); return; } if (shared != RPMA_CONN_CFG_COMMON_IS_SHARED) MTT_ERR_MSG(tr, "Invalid completion channel's value: %d instead of %d", -1, shared, RPMA_CONN_CFG_COMMON_IS_SHARED); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_set_cq_size.c000066400000000000000000000020011443364775400255640ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_set_cq_size.c -- rpma_conn_cfg_set_cq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- set connection establishment cq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; uint32_t cq_size = 0; int ret; ret = rpma_conn_cfg_set_cq_size(pr->cfg_ptr, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_cq_size", ret); return; } ret = rpma_conn_cfg_get_cq_size(pr->cfg_ptr, &cq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_cq_size", ret); return; } if (cq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP) MTT_ERR_MSG(tr, "Invalid cq_size: %d instead of %d", -1, cq_size, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_set_rcq_size.c000066400000000000000000000016371443364775400257640ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_cfg_set_rcq_size.c -- rpma_conn_cfg_set_rcq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" struct rpma_conn_cfg_common_prestate prestate = {NULL}; /* * thread -- set connection establishment rcq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; uint32_t rcq_size = 0; int ret; ret = rpma_conn_cfg_get_rcq_size(pr->cfg_ptr, &rcq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_rcq_size", ret); return; } if (rcq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP) MTT_ERR_MSG(tr, "Invalid rcq_size: %d instead of %d", -1, rcq_size, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_set_rq_size.c000066400000000000000000000017771443364775400256260ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_set_rq_size.c -- rpma_conn_cfg_set_rq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- set connection establishment rq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; uint32_t rq_size = 0; int ret; ret = rpma_conn_cfg_set_rq_size(pr->cfg_ptr, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_rq_size", ret); return; } ret = rpma_conn_cfg_get_rq_size(pr->cfg_ptr, &rq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_rq_size", ret); return; } if (rq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP) MTT_ERR_MSG(tr, "Invalid rq_size: %d instead of %d", -1, rq_size, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_set_sq_size.c000066400000000000000000000017771443364775400256270ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_set_sq_size.c -- rpma_conn_cfg_set_sq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" /* * thread -- set connection establishment sq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; uint32_t sq_size = 0; int ret; ret = rpma_conn_cfg_set_sq_size(pr->cfg_ptr, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_sq_size", ret); return; } ret = rpma_conn_cfg_get_sq_size(pr->cfg_ptr, &sq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_sq_size", ret); return; } if (sq_size != RPMA_CONN_CFG_COMMON_Q_SIZE_EXP) MTT_ERR_MSG(tr, "Invalid sq_size: %d instead of %d", -1, sq_size, RPMA_CONN_CFG_COMMON_Q_SIZE_EXP); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_set_srq.c000066400000000000000000000016651443364775400247530ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_conn_cfg_set_srq.c -- rpma_conn_cfg_set_srq multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" #define SET_RPMA_SRQ (struct rpma_srq *)0xCD15 /* * thread -- set the shared RQ object and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; int ret; struct rpma_srq *srq; ret = rpma_conn_cfg_set_srq(pr->cfg_ptr, SET_RPMA_SRQ); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_compl_channel", ret); return; } ret = rpma_conn_cfg_get_srq(pr->cfg_ptr, &srq); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_get_compl_channel", ret); return; } if (srq != SET_RPMA_SRQ) MTT_ERR_MSG(tr, "Invalid shared RQ value", -1); } rpma-1.3.0/tests/multithreaded/conn_cfg/rpma_conn_cfg_set_timeout.c000066400000000000000000000042411443364775400256250ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_conn_cfg_set_timeout.c -- rpma_conn_cfg_set_timeout multithreaded test */ #include #include #include "mtt.h" #include "rpma_conn_cfg_common.h" struct rpma_conn_cfg_common_prestate prestate = {NULL}; #define TIMEOUT_MS_EXP 2000 struct state { struct rpma_conn_cfg *cfg_ptr; }; /* * init -- allocate state and create a new connection configuration object */ void init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); int ret; if (!st) { MTT_ERR(tr, "calloc", errno); return; } ret = rpma_conn_cfg_new(&st->cfg_ptr); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_new", ret); return; } *state_ptr = st; } /* * thread -- set connection establishment timeout and check if its value is as expected */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_conn_cfg_common_prestate *pr = (struct rpma_conn_cfg_common_prestate *)prestate; int ret, timeout_ms; ret = rpma_conn_cfg_set_timeout(pr->cfg_ptr, TIMEOUT_MS_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_timeout", ret); return; } ret = rpma_conn_cfg_get_timeout(pr->cfg_ptr, &timeout_ms); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_cfg_set_timeout", ret); return; } if (timeout_ms != TIMEOUT_MS_EXP) MTT_ERR(tr, "timeout_ms != TIMEOUT_MS_EXP", EINVAL); } /* * fini -- free the connection configuration object and free the state */ static void fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; int ret; ret = rpma_conn_cfg_delete(&st->cfg_ptr); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_cfg_delete", ret); free(st); *state_ptr = NULL; } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct mtt_test test = { &prestate, rpma_conn_cfg_common_prestate_init, NULL, init, thread, fini, NULL, rpma_conn_cfg_common_prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/ep/000077500000000000000000000000001443364775400170745ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/ep/CMakeLists.txt000066400000000000000000000007401443364775400216350ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2021-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME ep BIN rpma_ep_get_fd SRCS rpma_ep_get_fd.c) add_multithreaded(NAME ep BIN rpma_ep_listen SRCS rpma_ep_listen.c) add_multithreaded(NAME ep BIN rpma_ep_next_conn_req SRCS rpma_ep_next_conn_req.c ../common/mtt_client_process.c ../common/mtt_connect.c) add_multithreaded(NAME ep BIN rpma_ep_shutdown SRCS rpma_ep_shutdown.c) rpma-1.3.0/tests/multithreaded/ep/rpma_ep_get_fd.c000066400000000000000000000047341443364775400222030ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_ep_get_fd.c -- rpma_ep_get_fd multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct ibv_context *ibv_ctx; struct rpma_peer *peer; struct rpma_ep *ep; int ep_fd_exp; }; /* * prestate_init -- obtain an ibv_context for a local IP address, create a new peer object, * start a listening endpoint and get the endpoint's event file descriptor */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &pr->ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(pr->ibv_ctx, &pr->peer); if (ret) { MTT_RPMA_ERR(tr, "rpma_peer_new", ret); return; } MTT_PORT_INIT; MTT_PORT_SET(pr->port, 0); ret = rpma_ep_listen(pr->peer, pr->addr, MTT_PORT_STR, &pr->ep); if (ret) { MTT_RPMA_ERR(tr, "rpma_ep_listen", ret); (void) rpma_peer_delete(&pr->peer); return; } ret = rpma_ep_get_fd(pr->ep, &pr->ep_fd_exp); if (ret) { MTT_RPMA_ERR(tr, "rpma_ep_get_fd", ret); (void) rpma_ep_shutdown(&pr->ep); (void) rpma_peer_delete(&pr->peer); } } /* * thread -- get the endpoint's event file descriptor */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; int ep_fd; int ret = rpma_ep_get_fd(pr->ep, &ep_fd); if (ret) { MTT_RPMA_ERR(result, "rpma_ep_get_fd", ret); return; } if (ep_fd != pr->ep_fd_exp) MTT_ERR(result, "rpma_ep_get_fd returned an unexpected value", EINVAL); } /* * prestate_fini -- shutdown the endpoint and delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_ep_shutdown(&pr->ep); if (ret) MTT_RPMA_ERR(tr, "rpma_ep_shutdown", ret); ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL, NULL}; struct mtt_test test = { &prestate, prestate_init, NULL, NULL, thread, NULL, NULL, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/ep/rpma_ep_listen.c000066400000000000000000000047171443364775400222520ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_ep_listen.c -- rpma_ep_listen multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct ibv_context *ibv_ctx; struct rpma_peer *peer; }; struct state { struct rpma_ep *ep; }; /* * prestate_init -- obtain an ibv_context for a local IP address * and create a new peer object */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &pr->ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(pr->ibv_ctx, &pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_new", ret); } /* * init -- allocate state */ void init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } *state_ptr = st; } /* * thread -- start a listening endpoint */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)state; MTT_PORT_INIT; MTT_PORT_SET(pr->port, id); int ret = rpma_ep_listen(pr->peer, pr->addr, MTT_PORT_STR, &st->ep); if (ret) MTT_RPMA_ERR(result, "rpma_ep_listen", ret); } /* * fini -- shutdown the endpoint and free the state */ static void fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; /* shutdown the endpoint */ int ret = rpma_ep_shutdown(&st->ep); if (ret) MTT_RPMA_ERR(tr, "rpma_ep_shutdown", ret); free(st); *state_ptr = NULL; } /* * prestate_fini -- delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL, NULL}; struct mtt_test test = { &prestate, prestate_init, NULL, init, thread, fini, NULL, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/ep/rpma_ep_next_conn_req.c000066400000000000000000000056431443364775400236150ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_ep_next_conn_req.c -- rpma_ep_next_conn_req multithreaded test */ #include #include #include "mtt.h" #include "mtt_connect.h" /* the server's part */ struct prestate { char *addr; unsigned port; struct rpma_peer *peer; struct rpma_ep *ep; }; struct state { struct rpma_conn_req *req; }; /* * prestate_init -- listen to the clients */ static void prestate_init(void *prestate, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; int ret = mtt_server_listen(pr->addr, pr->port, &pr->peer, &pr->ep); if (ret) MTT_RPMA_ERR(result, "mtt_server_listen", ret); } /* * seq_init -- allocate a state for the thread */ static void seq_init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *result) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(result, "calloc", errno); return; } *state_ptr = st; } /* * thread -- run rpma_ep_next_conn_req() */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)state; /* receive an incoming connection request */ int ret = rpma_ep_next_conn_req(pr->ep, NULL, &st->req); if (ret) MTT_RPMA_ERR(result, "rpma_ep_next_conn_req", ret); } /* * seq_fini -- delete the connection request and free the state */ static void seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *result) { struct state *st = (struct state *)*state_ptr; int ret = rpma_conn_req_delete(&st->req); if (ret) MTT_RPMA_ERR(result, "rpma_conn_req_delete", ret); free(st); *state_ptr = NULL; } /* * prestate_fini -- delete the endpoint and the peer object */ static void prestate_fini(void *prestate, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; mtt_server_shutdown(&pr->peer, &pr->ep); } /* the client's part */ struct client_prestate { char *addr; unsigned port; unsigned threads_num; }; /* * client_main -- the main function of the client */ int client_main(char *addr, unsigned port, unsigned threads_num); /* * client_func -- the client function of this test */ int client_func(void *prestate) { struct client_prestate *pst = prestate; return client_main(pst->addr, pst->port, pst->threads_num); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate server_prestate = {args.addr, args.port}; struct client_prestate client_prestate = {args.addr, args.port, args.threads_num}; struct mtt_test test = { &server_prestate, prestate_init, seq_init, NULL, thread, NULL, seq_fini, prestate_fini, client_func, &client_prestate, MTT_START_CHILD_BEFORE_JOINING_THREADS }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/ep/rpma_ep_shutdown.c000066400000000000000000000047721443364775400226300ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_ep_shutdown.c -- rpma_ep_shutdown multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct ibv_context *ibv_ctx; struct rpma_peer *peer; }; struct state { struct rpma_ep *ep; }; /* * prestate_init -- obtain an ibv_context for a local IP address and create a new peer object */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &pr->ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(pr->ibv_ctx, &pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_new", ret); } /* * seq_init -- allocate state and start listening endpoints */ void seq_init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(result, "calloc", errno); return; } *state_ptr = st; MTT_PORT_INIT; MTT_PORT_SET(pr->port, id); int ret = rpma_ep_listen(pr->peer, pr->addr, MTT_PORT_STR, &st->ep); if (ret) MTT_RPMA_ERR(result, "rpma_ep_listen", ret); } /* * thread -- shutdown a listening endpoint */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct state *st = (struct state *)state; /* shutdown and delete the endpoint */ int ret = rpma_ep_shutdown(&st->ep); if (ret) MTT_RPMA_ERR(tr, "rpma_ep_shutdown", ret); } /* * seq_fini -- free the state */ static void seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; free(st); *state_ptr = NULL; } /* * prestate_fini -- delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL, NULL}; struct mtt_test test = { &prestate, prestate_init, seq_init, NULL, thread, NULL, seq_fini, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/example/000077500000000000000000000000001443364775400201235ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/example/CMakeLists.txt000066400000000000000000000002661443364775400226670ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2021, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME example BIN example SRCS example.c) rpma-1.3.0/tests/multithreaded/example/example.c000066400000000000000000000107171443364775400217300ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * example.c -- an example of a multithreaded test * * An example showing how to use the multithreaded test API (mtt_*). * The test itself does a very simple thing of rewriting input to the output * using an intermediate temp variable (of course it is nonsensical). * The test runs as follow: * - prestate_init - picks an arbitrary seed * - seq_init - sequentially allocates and initializes states for all of * the threads * - init - in parallel all of the threads allocate a temp variable for * themselves * - thread - rewrite input to the temp variable * - fini - in parallel rewrites temp to the output and frees the temp * variable * - seq_fini - sequentially prints the output of each of the threads and * frees their states * - prestate_fini - zero out the seed */ #include #include #include #include #include #include #include "mtt.h" /* * prestate is an object which exists in a single copy and is shared among all * threads. It can be safely modified during the sequential steps * (thread_seq_init_func, thread_seq_fini_func). Accessing it during parallel * steps (thread_init_func, thread_func, thread_fini_func) has to be executed * in a thread-safe manner. */ struct prestate { uint64_t seed; }; /* * state is an object which is allocated for each of the threads. It can be * allocated during sequential or parallel initialization step * (thread_seq_init_func, thread_init_func) and freed during parallel or * sequential cleanup step (thread_fini_func, thread_seq_fini_func). * Since it is dedicated to a single thread it can be accessed freely during * all test steps. */ struct state { uint64_t input; uint64_t *temp; uint64_t output; }; /* * prestate_init -- prestate initialization called once for all threads */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; pr->seed = 5; /* an arbitrary seed */ } /* * seq_init -- a sequential step of initialization * * Everything that has been initialized before the test but is not thread-safe * should go here. */ static void seq_init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } st->input = pr->seed + id; *state_ptr = st; } /* * init -- a parallel step of initialization * * Everything that can be initialized before the test in a thread-safe manner * should go here. */ void init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; st->temp = calloc(1, sizeof(uint64_t)); if (!st->temp) { MTT_ERR(tr, "calloc", errno); return; } } /* * thread -- a test itself (parallel) */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct state *st = (struct state *)state; *st->temp = st->input; } /* * fini -- a parallel step of cleanup * * Everything that can be cleaned up after the test in a thread-safe manner * should go here. */ static void fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; st->output = *st->temp; free(st->temp); st->temp = NULL; } /* * seq_fini -- a sequential step of cleanup * * Everything that has to be cleaned up after the test but is not thread-safe * should go here. */ static void seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; printf("[%u] = %" PRIu64 "\n", id, st->output); free(st); *state_ptr = NULL; } /* * prestate_fini -- prestate cleanup called once for all threads */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; pr->seed = 0; /* zero out the seed */ } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {0}; struct mtt_test test = { &prestate, prestate_init, seq_init, init, thread, fini, seq_fini, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/log/000077500000000000000000000000001443364775400172515ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/log/CMakeLists.txt000066400000000000000000000006031443364775400220100ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME log BIN rpma_log_set_function SRCS rpma_log_set_function.c) add_multithreaded(NAME log BIN rpma_log_set_threshold SRCS rpma_log_set_threshold.c) add_multithreaded(NAME log BIN rpma_log_set_get_threshold SRCS rpma_log_set_get_threshold.c) rpma-1.3.0/tests/multithreaded/log/rpma_log_set_function.c000066400000000000000000000013271443364775400240000ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_log_set_function.c -- rpma_log_set_function multithreaded test */ #include #include "mtt.h" /* * thread -- set log function */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { int ret = rpma_log_set_function(RPMA_LOG_USE_DEFAULT_FUNCTION); if (ret) { MTT_RPMA_ERR(tr, "rpma_log_set_function", ret); return; } } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct mtt_test test = { NULL, NULL, NULL, NULL, thread, NULL, NULL, NULL }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/log/rpma_log_set_get_threshold.c000066400000000000000000000021021443364775400247760ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_log_set_get_threshold.c -- multithreaded test of rpma_log_set_threshold * + rpma_log_get_threshold */ #include #include "mtt.h" /* * thread -- set threshold function and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { enum rpma_log_level level; int ret; ret = rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); if (ret) { MTT_RPMA_ERR(tr, "rpma_log_set_threshold", ret); return; } ret = rpma_log_get_threshold(RPMA_LOG_THRESHOLD, &level); if (ret) { MTT_RPMA_ERR(tr, "rpma_log_get_threshold", ret); return; } if (level != RPMA_LOG_LEVEL_INFO) { MTT_ERR(tr, "level != RPMA_LOG_LEVEL_INFO", EINVAL); } } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct mtt_test test = { NULL, NULL, NULL, NULL, thread, NULL, NULL, NULL }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/log/rpma_log_set_threshold.c000066400000000000000000000013601443364775400241440ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_log_set_threshold.c -- rpma_log_set_threshold multithreaded test */ #include #include "mtt.h" /* * thread -- set the log level threshold */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { int ret = rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); if (ret) { MTT_RPMA_ERR(tr, "rpma_log_set_threshold", ret); return; } } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct mtt_test test = { NULL, NULL, NULL, NULL, thread, NULL, NULL, NULL }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/mr/000077500000000000000000000000001443364775400171065ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/mr/CMakeLists.txt000066400000000000000000000012161443364775400216460ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2021-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME mr BIN rpma_mr_dereg SRCS rpma_mr_dereg.c) add_multithreaded(NAME mr BIN rpma_mr_get_descriptor SRCS rpma_mr_get_descriptor.c) add_multithreaded(NAME mr BIN rpma_mr_get_ptr SRCS rpma_mr_get_ptr.c) add_multithreaded(NAME mr BIN rpma_mr_get_size SRCS rpma_mr_get_size.c) add_multithreaded(NAME mr BIN rpma_mr_reg SRCS rpma_mr_reg.c) add_multithreaded(NAME mr BIN rpma_mr_remote_from_descriptor SRCS rpma_mr_remote_from_descriptor.c server_rpma_mr_remote_from_descriptor.c ../common/mtt_connect.c) rpma-1.3.0/tests/multithreaded/mr/rpma_mr_dereg.c000066400000000000000000000051351443364775400220610ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_mr_dereg.c -- rpma_mr_dereg multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct ibv_context *ibv_ctx; struct rpma_peer *peer; }; struct state { void *mr_ptr; struct rpma_mr_local *mr; }; /* * prestate_init -- obtain an ibv_context for a local IP address and create a new peer object */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &pr->ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(pr->ibv_ctx, &pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_new", ret); } /* * seq_init -- allocate state, memory region and register the memory */ void seq_init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(result, "calloc", errno); return; } st->mr_ptr = mtt_malloc_aligned(KILOBYTE, result); if (st->mr_ptr == NULL) { free(st); return; } *state_ptr = st; int ret = rpma_mr_reg(pr->peer, st->mr_ptr, KILOBYTE, RPMA_MR_USAGE_READ_SRC, &st->mr); if (ret) MTT_RPMA_ERR(result, "rpma_mr_reg", ret); } /* * thread -- deregister the memory region */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct state *st = (struct state *)state; if (st->mr) { int ret = rpma_mr_dereg(&st->mr); if (ret) MTT_RPMA_ERR(result, "rpma_mr_dereg", ret); } } /* * seq_fini -- free the state */ static void seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; free(st->mr_ptr); free(st); *state_ptr = NULL; } /* * prestate_fini -- delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL, NULL}; struct mtt_test test = { &prestate, prestate_init, seq_init, NULL, thread, NULL, seq_fini, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/mr/rpma_mr_get_descriptor.c000066400000000000000000000065441443364775400240150ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_mr_get_descriptor.c -- rpma_mr_get_descriptor multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; struct rpma_peer *peer; void *mr_ptr; struct rpma_mr_local *mr; size_t mr_desc_size_exp; char descriptor_exp[KILOBYTE]; }; /* * prestate_init -- obtain an ibv_context for a local IP address, * create a new peer object, allocate and register memory region, * and prepare an expected descriptor value for the memory region. */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct ibv_context *ibv_ctx; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(ibv_ctx, &pr->peer); if (ret) { MTT_RPMA_ERR(tr, "rpma_peer_new", ret); return; } pr->mr_ptr = mtt_malloc_aligned(KILOBYTE, tr); if (pr->mr_ptr == NULL) goto err_peer_delete; ret = rpma_mr_reg(pr->peer, pr->mr_ptr, KILOBYTE, RPMA_MR_USAGE_READ_SRC, &pr->mr); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_reg", ret); goto err_free; } ret = rpma_mr_get_descriptor_size(pr->mr, &pr->mr_desc_size_exp); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_get_descriptor_size", ret); goto err_mr_dereg; } if (pr->mr_desc_size_exp > KILOBYTE) { MTT_ERR_MSG(tr, "mr_desc_size_exp > KILOBYTE", -1); goto err_mr_dereg; } ret = rpma_mr_get_descriptor(pr->mr, &pr->descriptor_exp[0]); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_get_descriptor", ret); goto err_mr_dereg; } return; err_mr_dereg: (void) rpma_mr_dereg(&pr->mr); err_free: free(pr->mr_ptr); err_peer_delete: (void) rpma_peer_delete(&pr->peer); } /* * thread -- get the memory region's descriptor and validate it */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; size_t mr_desc_size; char descriptor[KILOBYTE]; int ret; ret = rpma_mr_get_descriptor_size(pr->mr, &mr_desc_size); if (ret) { MTT_RPMA_ERR(result, "rpma_mr_get_descriptor_size", ret); return; } if (mr_desc_size != pr->mr_desc_size_exp) { MTT_ERR_MSG(result, "mr_desc_size != pr->mr_desc_size_exp", -1); return; } ret = rpma_mr_get_descriptor(pr->mr, &descriptor[0]); if (ret) { MTT_RPMA_ERR(result, "rpma_mr_get_descriptor", ret); return; } ret = memcmp(descriptor, pr->descriptor_exp, mr_desc_size); if (ret) MTT_ERR_MSG(result, "descriptor != pr->descriptor_exp", -1); } /* * prestate_fini -- deregister and free the memory region, and delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_mr_dereg(&pr->mr); if (ret) MTT_RPMA_ERR(tr, "rpma_mr_dereg", ret); free(pr->mr_ptr); ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr}; struct mtt_test test = { &prestate, prestate_init, NULL, NULL, thread, NULL, NULL, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/mr/rpma_mr_get_ptr.c000066400000000000000000000052751443364775400224440ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_mr_get_ptr.c -- rpma_mr_get_ptr multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; struct rpma_peer *peer; void *mr_ptr; struct rpma_mr_local *mr; }; /* * prestate_init -- obtain an ibv_context for a local IP address, create a new peer object, * allocate memory region, register the memory and get the memory region pointer */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct ibv_context *ibv_ctx; void *mr_ptr_exp; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(ibv_ctx, &pr->peer); if (ret) { MTT_RPMA_ERR(tr, "rpma_peer_new", ret); return; } pr->mr_ptr = mtt_malloc_aligned(KILOBYTE, tr); if (pr->mr_ptr == NULL) goto err_peer_delete; ret = rpma_mr_reg(pr->peer, pr->mr_ptr, KILOBYTE, RPMA_MR_USAGE_READ_SRC, &pr->mr); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_reg", ret); goto err_free; } ret = rpma_mr_get_ptr(pr->mr, &mr_ptr_exp); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_get_ptr", ret); goto err_mr_dereg; } if (mr_ptr_exp != pr->mr_ptr) { MTT_ERR_MSG(tr, "mr_ptr_exp != pr->mr_ptr", -1); goto err_mr_dereg; } return; err_mr_dereg: (void) rpma_mr_dereg(&pr->mr); err_free: free(pr->mr_ptr); err_peer_delete: (void) rpma_peer_delete(&pr->peer); } /* * thread -- get the memory region pointer */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; void *mr_ptr; int ret = rpma_mr_get_ptr(pr->mr, &mr_ptr); if (ret) { MTT_RPMA_ERR(result, "rpma_mr_get_ptr", ret); return; } if (mr_ptr != pr->mr_ptr) MTT_ERR(result, "mr_ptr != pr->mr_ptr", EINVAL); } /* * prestate_fini -- deregister the memory region, free it and delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_mr_dereg(&pr->mr); if (ret) MTT_RPMA_ERR(tr, "rpma_mr_dereg", ret); free(pr->mr_ptr); ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, NULL}; struct mtt_test test = { &prestate, prestate_init, NULL, NULL, thread, NULL, NULL, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/mr/rpma_mr_get_size.c000066400000000000000000000053311443364775400226020ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_mr_get_size.c -- rpma_mr_get_size multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; struct rpma_peer *peer; void *mr_ptr; struct rpma_mr_local *mr; size_t mr_size_exp; }; /* * prestate_init -- obtain an ibv_context for a local IP address, create a new peer object, * allocate memory region, register the memory and get the memory region size */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct ibv_context *ibv_ctx; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(ibv_ctx, &pr->peer); if (ret) { MTT_RPMA_ERR(tr, "rpma_peer_new", ret); return; } pr->mr_ptr = mtt_malloc_aligned(KILOBYTE, tr); if (pr->mr_ptr == NULL) goto err_peer_delete; ret = rpma_mr_reg(pr->peer, pr->mr_ptr, KILOBYTE, RPMA_MR_USAGE_READ_SRC, &pr->mr); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_reg", ret); goto err_free; } ret = rpma_mr_get_size(pr->mr, &pr->mr_size_exp); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_get_size", ret); goto err_mr_dereg; } if (pr->mr_size_exp != KILOBYTE) { MTT_ERR_MSG(tr, "pr->mr_size_exp != KILOBYTE", -1); goto err_mr_dereg; } return; err_mr_dereg: (void) rpma_mr_dereg(&pr->mr); err_free: free(pr->mr_ptr); err_peer_delete: (void) rpma_peer_delete(&pr->peer); } /* * thread -- get the memory region size */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; size_t mr_size; int ret = rpma_mr_get_size(pr->mr, &mr_size); if (ret) { MTT_RPMA_ERR(result, "rpma_mr_get_size", ret); return; } if (mr_size != pr->mr_size_exp) MTT_ERR(result, "mr_size != pr->mr_size_exp", EINVAL); } /* * prestate_fini -- deregister the memory region, free it and delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_mr_dereg(&pr->mr); if (ret) MTT_RPMA_ERR(tr, "rpma_mr_dereg", ret); free(pr->mr_ptr); ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, NULL}; struct mtt_test test = { &prestate, prestate_init, NULL, NULL, thread, NULL, NULL, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/mr/rpma_mr_reg.c000066400000000000000000000050711443364775400215470ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_mr_reg.c -- rpma_mr_reg multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct ibv_context *ibv_ctx; struct rpma_peer *peer; }; struct state { void *mr_ptr; struct rpma_mr_local *mr; }; /* * prestate_init -- obtain an ibv_context for a local IP address and create a new peer object */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &pr->ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(pr->ibv_ctx, &pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_new", ret); } /* * init -- allocate state and memory region */ void init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } st->mr_ptr = mtt_malloc_aligned(KILOBYTE, tr); if (st->mr_ptr == NULL) { free(st); return; } *state_ptr = st; } /* * thread -- register the memory */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)state; int ret = rpma_mr_reg(pr->peer, st->mr_ptr, KILOBYTE, RPMA_MR_USAGE_READ_SRC, &st->mr); if (ret) MTT_RPMA_ERR(result, "rpma_mr_reg", ret); } /* * fini -- deregister the memory region and free the state */ static void fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; if (st->mr) { int ret = rpma_mr_dereg(&st->mr); if (ret) MTT_RPMA_ERR(tr, "rpma_mr_dereg", ret); } free(st->mr_ptr); free(st); *state_ptr = NULL; } /* * prestate_fini -- delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL, NULL}; struct mtt_test test = { &prestate, prestate_init, NULL, init, thread, fini, NULL, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/mr/rpma_mr_remote_from_descriptor.c000066400000000000000000000103001443364775400255350ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_mr_remote_from_descriptor.c -- rpma_mr_remote_from_descriptor multithreaded test */ #include #include #include "mtt.h" #include "mtt_connect.h" /* the client's part */ struct prestate { char *addr; unsigned port; struct rpma_peer *peer; struct rpma_conn *conn; struct rpma_mr_remote *mr_ptr; size_t mr_size; /* the expected value of the private data */ struct rpma_conn_private_data pdata; }; static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; if (mtt_client_peer_new(tr, pr->addr, &pr->peer)) return; if (mtt_client_connect(tr, pr->addr, pr->port, pr->peer, &pr->conn, &pr->pdata)) goto err_peer_delete; /* * Create a remote memory registration structure from the received * descriptor. */ struct common_data *dst_data = pr->pdata.ptr; ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &pr->mr_ptr); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_remote_from_descriptor", ret); goto err_conn_disconnect; }; ret = rpma_mr_remote_get_size(pr->mr_ptr, &pr->mr_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_mr_remote_get_size", ret); goto err_mr_remote_delete; }; return; err_mr_remote_delete: /* delete the remote memory region's structure */ (void) rpma_mr_remote_delete(&pr->mr_ptr); err_conn_disconnect: mtt_client_err_disconnect(&pr->conn); err_peer_delete: mtt_client_peer_delete(tr, &pr->peer); } /* * thread -- get and verify the private data */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct rpma_mr_remote *mr_ptr; struct common_data *dst_data = pr->pdata.ptr; int ret = rpma_mr_remote_from_descriptor(&dst_data->descriptors[0], dst_data->mr_desc_size, &mr_ptr); if (ret) { MTT_RPMA_ERR(result, "rpma_mr_remote_from_descriptor", ret); return; } else if (mr_ptr == NULL) { MTT_ERR_MSG(result, "Getting mr_remote from descriptor failed", -1); return; } if (memcmp(mr_ptr, pr->mr_ptr, pr->mr_size) != 0) MTT_ERR_MSG(result, "Wrong content of the mr_remote", -1); ret = rpma_mr_remote_delete(&mr_ptr); if (ret) MTT_RPMA_ERR(result, "rpma_mr_remote_delete", ret); } /* * prestate_fini -- deregister and free the memory region, disconnect and delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; enum rpma_conn_event conn_event = RPMA_CONN_UNDEFINED; int ret; ret = rpma_conn_disconnect(pr->conn); if (ret) { MTT_RPMA_ERR(tr, "rpma_conn_disconnect", ret); } else { /* wait for the connection to be closed */ ret = rpma_conn_next_event(pr->conn, &conn_event); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_next_event", ret); else if (conn_event != RPMA_CONN_CLOSED) MTT_ERR_MSG(tr, "rpma_conn_next_event returned an unexpected event", -1); } ret = rpma_mr_remote_delete(&pr->mr_ptr); if (ret) MTT_RPMA_ERR(tr, "rpma_mr_remote_delete", ret); ret = rpma_conn_delete(&pr->conn); if (ret) MTT_RPMA_ERR(tr, "rpma_conn_delete", ret); ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } /* the server's part */ struct server_prestate { char *addr; unsigned port; /* the expected value of the private data */ struct rpma_conn_private_data pdata; }; /* * server_main -- the main function of the server */ int server_main(char *addr, unsigned port); /* * server_func -- the server function of this test */ int server_func(void *prestate) { struct server_prestate *pst = (struct server_prestate *)prestate; return server_main(pst->addr, pst->port); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate client_prestate = {args.addr, args.port}; struct server_prestate server_prestate = {args.addr, args.port}; struct mtt_test test = { &client_prestate, prestate_init, NULL, NULL, thread, NULL, NULL, prestate_fini, server_func, &server_prestate }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/mr/server_rpma_mr_remote_from_descriptor.c000066400000000000000000000040271443364775400271340ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * server_rpma_mr_remote_from_descriptor.c -- a server of the rpma_mr_remote_from_descriptor MT test */ #include #include #include #include #include #include "mtt.h" #include "mtt_connect.h" #define HELLO_STR "Hello client!" int server_main(char *addr, unsigned port) { struct rpma_peer *peer; struct rpma_ep *ep; struct rpma_conn *conn = NULL; int ret; /* resources - memory region */ void *mr_ptr = NULL; size_t mr_size = 0; struct rpma_mr_local *mr = NULL; ret = mtt_server_listen(addr, port, &peer, &ep); if (ret) return -1; /* allocate a memory */ mr_size = strlen(HELLO_STR) + 1; mr_ptr = mtt_malloc_aligned(mr_size, NULL); if (mr_ptr == NULL) { ret = -1; goto err_server_shutdown; } /* fill the memory with a content */ memcpy(mr_ptr, HELLO_STR, mr_size); /* register the memory */ ret = rpma_mr_reg(peer, mr_ptr, mr_size, RPMA_MR_USAGE_READ_SRC, &mr); if (ret) goto err_mr_free; /* get size of the memory region's descriptor */ size_t mr_desc_size; ret = rpma_mr_get_descriptor_size(mr, &mr_desc_size); if (ret) goto err_mr_dereg; struct common_data data = {0}; data.mr_desc_size = mr_desc_size; /* get the memory region's descriptor */ ret = rpma_mr_get_descriptor(mr, &data.descriptors[0]); if (ret) goto err_mr_dereg; struct rpma_conn_private_data pdata; pdata.ptr = &data; pdata.len = sizeof(struct common_data); /* * Wait for an incoming connection request, accept it and wait for its * establishment. */ ret = mtt_server_accept_connection(ep, &pdata, &conn); if (ret) goto err_server_shutdown; /* * Wait for RPMA_CONN_CLOSED, disconnect and delete the connection * structure. */ mtt_server_wait_for_conn_close_and_disconnect(&conn); err_mr_dereg: /* deregister the memory region */ (void) rpma_mr_dereg(&mr); err_mr_free: /* free the memory */ free(mr_ptr); err_server_shutdown: mtt_server_shutdown(&peer, &ep); return ret; } rpma-1.3.0/tests/multithreaded/peer/000077500000000000000000000000001443364775400174235ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/peer/CMakeLists.txt000066400000000000000000000007561443364775400221730ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME peer BIN rpma_peer_cfg_new SRCS rpma_peer_cfg_new.c) add_multithreaded(NAME peer BIN rpma_peer_cfg_from_descriptor SRCS rpma_peer_cfg_from_descriptor.c) add_multithreaded(NAME peer BIN rpma_peer_cfg_set_direct_write_to_pmem SRCS rpma_peer_cfg_set_direct_write_to_pmem.c) add_multithreaded(NAME peer BIN rpma_peer_new SRCS rpma_peer_new.c) rpma-1.3.0/tests/multithreaded/peer/rpma_peer_cfg_from_descriptor.c000066400000000000000000000106671443364775400256530ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_peer_cfg_from_descriptor.c -- multithreaded test */ #include #include #include #include #include #include #define DIRECT_WRITE_TO_PMEM_SUPPORTED true #define DESCRIPTORS_MAX_SIZE 512 struct thread_args { int thread_num; const void *desc; size_t desc_size; }; static const char *api_name = "rpma_peer_cfg_from_descriptor"; static void * thread_main(void *arg) { int ret; bool direct_write_to_pmem = false; /* RPMA resources */ struct rpma_peer_cfg *pcfg = NULL; /* parameters */ struct thread_args *p_thread_args = (struct thread_args *)arg; /* Create a remote peer configuration structure from input descriptor */ ret = rpma_peer_cfg_from_descriptor(p_thread_args->desc, p_thread_args->desc_size, &pcfg); if (ret) { fprintf(stderr, "[thread #%d] %s failed: %s\n", p_thread_args->thread_num, api_name, rpma_err_2str(ret)); exit(-1); } /* * check if the direct write to PMEM is as expected (supported or not). */ ret = rpma_peer_cfg_get_direct_write_to_pmem(pcfg, &direct_write_to_pmem); if (ret) { fprintf(stderr, "[thread #%d] rpma_peer_cfg_get_direct_write_to_pmem failed: %s\n", p_thread_args->thread_num, rpma_err_2str(ret)); exit(-1); } if (direct_write_to_pmem != DIRECT_WRITE_TO_PMEM_SUPPORTED) { fprintf(stderr, "[thread #%d] rpma_peer_cfg_get_direct_write_to_pmem: unexpected direct_write_to_pmem = %d\n", p_thread_args->thread_num, direct_write_to_pmem); exit(-1); } ret = rpma_peer_cfg_delete(&pcfg); if (ret) { fprintf(stderr, "[thread #%d] rpma_peer_cfg_delete failed: %s\n", p_thread_args->thread_num, rpma_err_2str(ret)); exit(-1); } return NULL; } int main(int argc, char *argv[]) { if (argc < 2) { fprintf(stderr, "usage: %s \n", argv[0]); return -1; } /* configure logging thresholds to see more details */ rpma_log_set_threshold(RPMA_LOG_THRESHOLD, RPMA_LOG_LEVEL_INFO); rpma_log_set_threshold(RPMA_LOG_THRESHOLD_AUX, RPMA_LOG_LEVEL_INFO); /* parameters */ int ret = 0; int i; int thread_num = (int)strtoul(argv[1], NULL, 10); pthread_t *p_threads; p_threads = calloc((size_t)thread_num, sizeof(pthread_t)); if (p_threads == NULL) { fprintf(stderr, "calloc() failed\n"); return -1; } struct thread_args *threads_args = calloc((size_t)thread_num, sizeof(struct thread_args)); if (threads_args == NULL) { fprintf(stderr, "calloc() failed\n"); ret = -1; goto err_free_p_threads; } /* create a peer configuration structure */ struct rpma_peer_cfg *pcfg = NULL; ret = rpma_peer_cfg_new(&pcfg); if (ret) { fprintf(stderr, "rpma_peer_cfg_new() failed\n"); goto err_free_threads_args; } /* set direct write to PMEM supported */ ret = rpma_peer_cfg_set_direct_write_to_pmem(pcfg, DIRECT_WRITE_TO_PMEM_SUPPORTED); if (ret) { fprintf(stderr, "rpma_peer_cfg_set_direct_write_to_pmem() failed\n"); goto err_peer_cfg_delete; } /* get size of the peer config descriptor */ size_t desc_size; ret = rpma_peer_cfg_get_descriptor_size(pcfg, &desc_size); if (ret) { fprintf(stderr, "rpma_peer_cfg_get_descriptor_size() failed\n"); goto err_peer_cfg_delete; } if (desc_size > DESCRIPTORS_MAX_SIZE) { fprintf(stderr, "rpma_peer_cfg_get_descriptor_size() return desc_size > DESCRIPTORS_MAX_SIZE\n"); goto err_peer_cfg_delete; } char descriptors[DESCRIPTORS_MAX_SIZE]; ret = rpma_peer_cfg_get_descriptor(pcfg, &descriptors[0]); if (ret) { fprintf(stderr, "rpma_peer_cfg_get_descriptor() failed\n"); goto err_peer_cfg_delete; } for (i = 0; i < thread_num; i++) { threads_args[i].thread_num = i; threads_args[i].desc = (void *)(&descriptors[0]); threads_args[i].desc_size = desc_size; } for (i = 0; i < thread_num; i++) { ret = pthread_create(&p_threads[i], NULL, thread_main, &threads_args[i]); if (ret != 0) { fprintf(stderr, "Cannot start the thread #%d: %s\n", i, strerror(ret)); /* * Set thread_num to the number of already created * threads to join them below. */ thread_num = i; /* return -1 on error */ ret = -1; break; } } for (i = thread_num - 1; i >= 0; i--) pthread_join(p_threads[i], NULL); err_peer_cfg_delete: ret = rpma_peer_cfg_delete(&pcfg); if (ret) { fprintf(stderr, "rpma_peer_cfg_delete failed: %s\n", rpma_err_2str(ret)); } err_free_threads_args: free(threads_args); err_free_p_threads: free(p_threads); return ret; } rpma-1.3.0/tests/multithreaded/peer/rpma_peer_cfg_new.c000066400000000000000000000030671443364775400232370ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_peer_cfg_new.c -- rpma_peer_cfg_new multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; }; struct state { struct rpma_peer_cfg *pcfg; }; /* * init -- allocate state */ void init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } *state_ptr = st; } /* * thread -- create rpma_peer_cfg */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct state *st = (struct state *)state; /* create a new peer cfg object */ int ret = rpma_peer_cfg_new(&st->pcfg); if (ret) { MTT_RPMA_ERR(result, "rpma_peer_cfg_new", ret); return; } } /* * fini -- delete rpma_peer_cfg and free the state */ static void fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; /* delete the peer_cfg object */ if (st->pcfg != NULL) { int ret = rpma_peer_cfg_delete(&st->pcfg); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_cfg_delete", ret); } free(st); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr}; struct mtt_test test = { &prestate, NULL, NULL, init, thread, fini, NULL, NULL }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/peer/rpma_peer_cfg_set_direct_write_to_pmem.c000066400000000000000000000041151443364775400275200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_peer_cfg_set_direct_write_to_pmem.c -- rpma_peer_cfg_set_direct_write_to_pmem MT test */ #include #include #include "mtt.h" #define DIRECT_WRITE_TO_PMEM true struct prestate { struct rpma_peer_cfg *pcfg; }; struct prestate prestate = {NULL}; /* * prestate_init -- create rpma_peer_cfg */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret = rpma_peer_cfg_new(&pr->pcfg); if (ret) { MTT_RPMA_ERR(tr, "rpma_peer_cfg_new", ret); return; } } /* * thread -- set peer configuration direct_write_to_pmem value and check if its is as expected */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { int ret; struct prestate *pr = (struct prestate *)prestate; /* create a new peer cfg object */ ret = rpma_peer_cfg_set_direct_write_to_pmem(pr->pcfg, DIRECT_WRITE_TO_PMEM); if (ret) { MTT_RPMA_ERR(result, "rpma_peer_cfg_set_direct_write_to_pmem", ret); return; } bool direct_write_to_pmem = false; ret = rpma_peer_cfg_get_direct_write_to_pmem(pr->pcfg, &direct_write_to_pmem); if (ret) { MTT_RPMA_ERR(result, "rpma_peer_cfg_get_direct_write_to_pmem", ret); return; } if (direct_write_to_pmem != DIRECT_WRITE_TO_PMEM) MTT_ERR_MSG(result, "Invalid cq_size: %d instead of %d", -1, direct_write_to_pmem, DIRECT_WRITE_TO_PMEM); } /* * prestate_fini -- delete rpma_peer_cfg */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; /* delete the peer_cfg object */ if (pr->pcfg != NULL) { int ret = rpma_peer_cfg_delete(&pr->pcfg); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_cfg_delete", ret); } } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct mtt_test test = { &prestate, prestate_init, NULL, NULL, thread, NULL, NULL, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/peer/rpma_peer_new.c000066400000000000000000000037231443364775400224170ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* * rpma_peer_new.c -- rpma_peer_new multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; struct ibv_context *ibv_ctx; }; struct state { struct rpma_peer *peer; }; /* * prestate_init -- obtain an ibv_context for a remote IP address */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, &pr->ibv_ctx); if (ret) MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); } /* * init -- allocate state */ void init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } *state_ptr = st; } /* * thread -- create rpma_peer based on shared ibv_context */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)state; /* create a new peer object */ int ret = rpma_peer_new(pr->ibv_ctx, &st->peer); if (ret) { MTT_RPMA_ERR(result, "rpma_peer_new", ret); return; } } /* * fini -- delete rpma_peer and free the state */ static void fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; /* delete the peer object */ int ret = rpma_peer_delete(&st->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); free(st); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, NULL}; struct mtt_test test = { &prestate, prestate_init, init, NULL, thread, fini, NULL, NULL }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/run_group.cmake000066400000000000000000000013201443364775400215060ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2021-2022, Intel Corporation # include(${SRC_DIR}/../../cmake/helpers.cmake) setup() if(NOT TRACER STREQUAL "none") message(NOTICE "Valgrind tool: ${TRACER}") endif() if(TRACER STREQUAL "memcheck") set(THREADS 2) else() set(THREADS ${MAX_THREADS}) endif() message(NOTICE "Number of threads: ${THREADS}") if("$ENV{RPMA_TESTING_IP}" STREQUAL "") message(FATAL_ERROR "RPMA_TESTING_IP is not set!") else() set(TESTING_IP $ENV{RPMA_TESTING_IP}) endif() if("$ENV{RPMA_TESTING_PORT}" STREQUAL "") set(TESTING_PORT "7204") else() set(TESTING_PORT $ENV{RPMA_TESTING_PORT}) endif() execute(${TEST_EXECUTABLE} ${THREADS} ${TESTING_IP} ${TESTING_PORT}) finish() rpma-1.3.0/tests/multithreaded/srq/000077500000000000000000000000001443364775400172755ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/srq/CMakeLists.txt000066400000000000000000000005211443364775400220330ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME srq BIN rpma_srq_delete SRCS rpma_srq_delete.c) add_multithreaded(NAME srq BIN rpma_srq_get_rcq SRCS rpma_srq_get_rcq.c) add_multithreaded(NAME srq BIN rpma_srq_new SRCS rpma_srq_new.c) rpma-1.3.0/tests/multithreaded/srq/rpma_srq_delete.c000066400000000000000000000045741443364775400226210ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_srq_delete.c -- rpma_srq_delete multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct rpma_peer *peer; }; struct state { struct rpma_srq *srq; }; /* * prestate_init -- obtain an ibv_context for a remote IP address and create a new peer object */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct ibv_context *ibv_ctx; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(ibv_ctx, &pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_new", ret); } /* * seq_init -- allocate a state and create a srq */ void seq_init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } MTT_PORT_INIT; MTT_PORT_SET(pr->port, id); int ret = rpma_srq_new(pr->peer, NULL, &st->srq); if (ret) MTT_RPMA_ERR(tr, "rpma_srq_new", ret); *state_ptr = st; } /* * thread -- delete a srq */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct state *st = (struct state *)state; int ret = rpma_srq_delete(&st->srq); if (ret) MTT_RPMA_ERR(result, "rpma_srq_delete", ret); } /* * seq_fini -- free the state */ static void seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; free(st); } /* * prestate_fini -- delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL}; struct mtt_test test = { &prestate, prestate_init, seq_init, NULL, thread, NULL, seq_fini, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/srq/rpma_srq_get_rcq.c000066400000000000000000000040651443364775400227760ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_srq_get_rcq.c -- rpma_srq_get_rcq multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct rpma_peer *peer; struct rpma_srq *srq; }; /* * prestate_init -- obtain an ibv_context for a remote IP address, create a new peer object * and create a srq */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct ibv_context *ibv_ctx; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(ibv_ctx, &pr->peer); if (ret) { MTT_RPMA_ERR(tr, "rpma_peer_new", ret); return; } ret = rpma_srq_new(pr->peer, NULL, &pr->srq); if (ret) MTT_RPMA_ERR(tr, "rpma_srq_new", ret); } /* * thread -- get the receive CQ from the shared RQ object */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct rpma_cq *rcq; MTT_PORT_INIT; MTT_PORT_SET(pr->port, id); int ret = rpma_srq_get_rcq(pr->srq, &rcq); if (ret) MTT_RPMA_ERR(result, "rpma_srq_get_rcq", ret); } /* * prestate_fini -- delete the srq object and the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_srq_delete(&pr->srq); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_delete", ret); return; } ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL, NULL}; struct mtt_test test = { &prestate, prestate_init, NULL, NULL, thread, NULL, NULL, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/srq/rpma_srq_new.c000066400000000000000000000046031443364775400221410ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_srq_new.c -- rpma_srq_new multithreaded test */ #include #include #include "mtt.h" struct prestate { char *addr; unsigned port; struct rpma_peer *peer; }; struct state { struct rpma_srq *srq; }; /* * prestate_init -- obtain an ibv_context for a remote IP address and create a new peer object */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; struct ibv_context *ibv_ctx; int ret; ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } ret = rpma_peer_new(ibv_ctx, &pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_new", ret); } /* * init -- allocate a state */ void init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(tr, "calloc", errno); return; } *state_ptr = st; } /* * thread -- create a new srq */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *pr = (struct prestate *)prestate; struct state *st = (struct state *)state; int ret; MTT_PORT_INIT; MTT_PORT_SET(pr->port, id); ret = rpma_srq_new(pr->peer, NULL, &st->srq); if (ret) MTT_RPMA_ERR(result, "rpma_srq_new", ret); } /* * seq_fini -- free the state and delete srq */ static void seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *tr) { struct state *st = (struct state *)*state_ptr; int ret; if (st->srq && (ret = rpma_srq_delete(&st->srq))) MTT_RPMA_ERR(tr, "rpma_srq_delete", ret); free(st); } /* * prestate_fini -- delete the peer object */ static void prestate_fini(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; ret = rpma_peer_delete(&pr->peer); if (ret) MTT_RPMA_ERR(tr, "rpma_peer_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, args.port, NULL}; struct mtt_test test = { &prestate, prestate_init, NULL, init, thread, NULL, seq_fini, prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/srq_cfg/000077500000000000000000000000001443364775400201145ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/srq_cfg/CMakeLists.txt000066400000000000000000000011341443364775400226530ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME srq_cfg BIN get_rcq_size SRCS rpma_srq_cfg_get_rcq_size.c rpma_srq_cfg_common.c) add_multithreaded(NAME srq_cfg BIN get_rq_size SRCS rpma_srq_cfg_get_rq_size.c rpma_srq_cfg_common.c) add_multithreaded(NAME srq_cfg BIN new SRCS rpma_srq_cfg_new.c) add_multithreaded(NAME srq_cfg BIN set_rcq_size SRCS rpma_srq_cfg_set_rcq_size.c rpma_srq_cfg_common.c) add_multithreaded(NAME srq_cfg BIN set_rq_size SRCS rpma_srq_cfg_set_rq_size.c rpma_srq_cfg_common.c) rpma-1.3.0/tests/multithreaded/srq_cfg/rpma_srq_cfg_common.c000066400000000000000000000033051443364775400242740ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_srq_cfg_common.c -- common part for rpma_srq_cfg_* multithreaded tests */ #include #include #include "mtt.h" #include "rpma_srq_cfg_common.h" /* * rpma_srq_cfg_common_prestate_init -- create a new srq configuration object, set all queue sizes */ void rpma_srq_cfg_common_prestate_init(void *prestate, struct mtt_result *tr) { struct rpma_srq_cfg_common_prestate *pr = (struct rpma_srq_cfg_common_prestate *)prestate; int ret; ret = rpma_srq_cfg_new(&pr->cfg_ptr); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_new", ret); return; } ret = rpma_srq_cfg_set_rq_size(pr->cfg_ptr, RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_set_rq_size", ret); return; } ret = rpma_srq_cfg_set_rcq_size(pr->cfg_ptr, RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_set_rcq_size", ret); return; } } /* * rpma_srq_cfg_common_prestate_fini -- free the srq configuration object */ void rpma_srq_cfg_common_prestate_fini(void *prestate, struct mtt_result *tr) { struct rpma_srq_cfg_common_prestate *pr = (struct rpma_srq_cfg_common_prestate *)prestate; int ret; ret = rpma_srq_cfg_delete(&pr->cfg_ptr); if (ret) MTT_RPMA_ERR(tr, "rpma_srq_cfg_delete", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct rpma_srq_cfg_common_prestate prestate = {NULL}; struct mtt_test test = { &prestate, rpma_srq_cfg_common_prestate_init, NULL, NULL, thread, NULL, NULL, rpma_srq_cfg_common_prestate_fini }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/srq_cfg/rpma_srq_cfg_common.h000066400000000000000000000013361443364775400243030ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* * rpma_srq_cfg_common.h -- common definition for rpma_srq_cfg_* multithreaded tests */ #ifndef MTT_RPMA_SRQ_CFG_COMMON #define MTT_RPMA_SRQ_CFG_COMMON /* the expected queue size */ #define RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP 20 void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr); struct rpma_srq_cfg_common_prestate { struct rpma_srq_cfg *cfg_ptr; }; void rpma_srq_cfg_common_prestate_init(void *prestate, struct mtt_result *tr); void rpma_srq_cfg_common_prestate_fini(void *prestate, struct mtt_result *tr); struct rpma_srq_cfg_common_state { struct rpma_srq_cfg *cfg_ptr; }; #endif /* MTT_RPMA_SRQ_CFG_COMMON */ rpma-1.3.0/tests/multithreaded/srq_cfg/rpma_srq_cfg_get_rcq_size.c000066400000000000000000000014541443364775400254650ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_srq_cfg_get_rcq_size.c -- rpma_srq_cfg_get_rcq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_srq_cfg_common.h" /* * thread -- get the srq configured rcq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_srq_cfg_common_prestate *pr = (struct rpma_srq_cfg_common_prestate *)prestate; uint32_t rcq_size; int ret; ret = rpma_srq_cfg_get_rcq_size(pr->cfg_ptr, &rcq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_get_rcq_size", ret); return; } if (rcq_size != RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP) MTT_ERR(tr, "rcq_size != RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP", EINVAL); } rpma-1.3.0/tests/multithreaded/srq_cfg/rpma_srq_cfg_get_rq_size.c000066400000000000000000000014431443364775400253200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_srq_cfg_get_rq_size.c -- rpma_srq_cfg_get_rq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_srq_cfg_common.h" /* * thread -- get the srq configured rq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_srq_cfg_common_prestate *pr = (struct rpma_srq_cfg_common_prestate *)prestate; uint32_t rq_size; int ret; ret = rpma_srq_cfg_get_rq_size(pr->cfg_ptr, &rq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_get_rq_size", ret); return; } if (rq_size != RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP) MTT_ERR(tr, "rq_size != RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP", EINVAL); } rpma-1.3.0/tests/multithreaded/srq_cfg/rpma_srq_cfg_new.c000066400000000000000000000027171443364775400236030ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_srq_cfg_new.c -- rpma_srq_cfg_new multithreaded test */ #include #include #include "mtt.h" struct state { struct rpma_srq_cfg *cfg; }; /* * seq_init -- allocate a state */ static void seq_init(unsigned id, void *prestate, void **state_ptr, struct mtt_result *result) { struct state *st = (struct state *)calloc(1, sizeof(struct state)); if (!st) { MTT_ERR(result, "calloc", errno); return; } *state_ptr = st; } /* * thread -- create a new srq configuration object */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct state *st = (struct state *)state; int ret; ret = rpma_srq_cfg_new(&st->cfg); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_new", ret); return; } } /* * seq_fini -- free the srq configuration object and the state */ static void seq_fini(unsigned id, void *prestate, void **state_ptr, struct mtt_result *result) { struct state *st = (struct state *)*state_ptr; int ret; ret = rpma_srq_cfg_delete(&st->cfg); if (ret) MTT_RPMA_ERR(result, "rpma_srq_cfg_delete", ret); free(st); *state_ptr = NULL; } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct mtt_test test = { NULL, NULL, seq_init, NULL, thread, NULL, seq_fini, NULL }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/srq_cfg/rpma_srq_cfg_set_rcq_size.c000066400000000000000000000020571443364775400255010ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_srq_cfg_set_rcq_size.c -- rpma_srq_cfg_set_rcq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_srq_cfg_common.h" struct rpma_srq_cfg_common_prestate prestate = {NULL}; /* * thread -- set the srq establishment rcq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_srq_cfg_common_prestate *pr = (struct rpma_srq_cfg_common_prestate *)prestate; uint32_t rcq_size = 0; int ret; ret = rpma_srq_cfg_set_rcq_size(pr->cfg_ptr, RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_set_rcq_size", ret); return; } ret = rpma_srq_cfg_get_rcq_size(pr->cfg_ptr, &rcq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_get_rcq_size", ret); return; } if (rcq_size != RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP) MTT_ERR_MSG(tr, "Invalid rcq_size: %d instead of %d", -1, rcq_size, RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP); } rpma-1.3.0/tests/multithreaded/srq_cfg/rpma_srq_cfg_set_rq_size.c000066400000000000000000000017531443364775400253400ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * rpma_srq_cfg_set_rq_size.c -- rpma_srq_cfg_set_rq_size multithreaded test */ #include #include #include "mtt.h" #include "rpma_srq_cfg_common.h" /* * thread -- set the srq establishment rq size and check if its value is as expected */ void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct rpma_srq_cfg_common_prestate *pr = (struct rpma_srq_cfg_common_prestate *)prestate; uint32_t rq_size = 0; int ret; ret = rpma_srq_cfg_set_rq_size(pr->cfg_ptr, RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_set_rq_size", ret); return; } ret = rpma_srq_cfg_get_rq_size(pr->cfg_ptr, &rq_size); if (ret) { MTT_RPMA_ERR(tr, "rpma_srq_cfg_get_rq_size", ret); return; } if (rq_size != RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP) MTT_ERR_MSG(tr, "Invalid rq_size: %d instead of %d", -1, rq_size, RPMA_SRQ_CFG_COMMON_Q_SIZE_EXP); } rpma-1.3.0/tests/multithreaded/utils/000077500000000000000000000000001443364775400176305ustar00rootroot00000000000000rpma-1.3.0/tests/multithreaded/utils/CMakeLists.txt000066400000000000000000000005261443364775400223730ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2021, Intel Corporation # include(../../cmake/ctest_helpers.cmake) add_multithreaded(NAME utils BIN rpma_utils_get_ibv_context SRCS rpma_utils_get_ibv_context.c) add_multithreaded(NAME utils BIN rpma_utils_ibv_context_is_odp_capable SRCS rpma_utils_ibv_context_is_odp_capable.c) rpma-1.3.0/tests/multithreaded/utils/rpma_utils_get_ibv_context.c000066400000000000000000000020431443364775400254150ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * rpma_utils-get_ibv_context.c -- 'get ibv context' multithreaded test */ #include #include "mtt.h" struct prestate { char *addr; }; /* * thread -- try to get an ibv_context based on a shared network interface * address string */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *result) { struct prestate *ps = (struct prestate *)prestate; struct ibv_context *ibv_ctx = NULL; int ret; /* obtain an IBV context for a local IP address */ ret = rpma_utils_get_ibv_context(ps->addr, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); if (ret) MTT_RPMA_ERR(result, "rpma_utils_get_ibv_context", ret); } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr}; struct mtt_test test = { &prestate, NULL, NULL, NULL, thread, NULL, NULL, NULL }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/multithreaded/utils/rpma_utils_ibv_context_is_odp_capable.c000066400000000000000000000040221443364775400275610ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021, Intel Corporation */ /* * rpma_utils_ibv_context_is_odp_capable.c -- 'check odp' multithreaded test */ #include #include #include #include #include #include #include "mtt.h" struct prestate { char *addr; struct ibv_context *ibv_ctx; int is_odp_supported_exp; }; /* * prestate_init -- obtain an ibv_context for a local IP address * and check if the device supports On-Demand Paging */ static void prestate_init(void *prestate, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int ret; /* obtain an IBV context for a remote IP address */ ret = rpma_utils_get_ibv_context(pr->addr, RPMA_UTIL_IBV_CONTEXT_REMOTE, &pr->ibv_ctx); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_get_ibv_context", ret); return; } /* check if the device supports On-Demand Paging */ ret = rpma_utils_ibv_context_is_odp_capable(pr->ibv_ctx, &pr->is_odp_supported_exp); if (ret) MTT_RPMA_ERR(tr, "rpma_utils_ibv_context_is_odp_capable", ret); } /* * thread -- obtain an ibv_context for a remote IP address * and check if the device supports On-Demand Paging */ static void thread(unsigned id, void *prestate, void *state, struct mtt_result *tr) { struct prestate *pr = (struct prestate *)prestate; int is_odp_supported; /* check if the device supports On-Demand Paging */ int ret = rpma_utils_ibv_context_is_odp_capable(pr->ibv_ctx, &is_odp_supported); if (ret) { MTT_RPMA_ERR(tr, "rpma_utils_ibv_context_is_odp_capable", ret); } else if (is_odp_supported != pr->is_odp_supported_exp) { MTT_ERR(tr, "is_odp_supported != is_odp_supported_exp", EINVAL); } } int main(int argc, char *argv[]) { struct mtt_args args = {0}; if (mtt_parse_args(argc, argv, &args)) return -1; struct prestate prestate = {args.addr, NULL, 0}; struct mtt_test test = { &prestate, prestate_init, NULL, NULL, thread, NULL, NULL, NULL }; return mtt_run(&test, args.threads_num); } rpma-1.3.0/tests/true.cmake000066400000000000000000000002151443364775400156140ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # # true.cmake - cmake script which always succeeds return() rpma-1.3.0/tests/unit/000077500000000000000000000000001443364775400146145ustar00rootroot00000000000000rpma-1.3.0/tests/unit/CMakeLists.txt000066400000000000000000000011341443364775400173530ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # Copyright 2021-2022, Fujitsu # add_subdirectory(conn) add_subdirectory(conn_cfg) add_subdirectory(conn_req) add_subdirectory(cq) add_subdirectory(ep) add_subdirectory(error) add_subdirectory(flush) add_subdirectory(info) add_subdirectory(librpma_constructor) add_subdirectory(log) add_subdirectory(mr) add_subdirectory(peer) add_subdirectory(peer_cfg) add_subdirectory(private_data) add_subdirectory(srq) add_subdirectory(srq_cfg) add_subdirectory(utils) if(TESTS_NO_FORTIFY_SOURCE) add_subdirectory(log_default) endif() rpma-1.3.0/tests/unit/common/000077500000000000000000000000001443364775400161045ustar00rootroot00000000000000rpma-1.3.0/tests/unit/common/mocks-glibc.c000066400000000000000000000011441443364775400204420ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * mocks-glibc.c -- glibc mocks */ #include #include #include "cmocka_headers.h" #include "mocks-glibc.h" /* * syscall() shall be mocked only during tests otherwise ctest with TESTS_COVERAGE=1 * does not work properly */ bool enabled__wrap_syscall = false; long __real_syscall(long number); /* * __wrap_syscall -- mock of syscall() */ long __wrap_syscall(long number) { if (!enabled__wrap_syscall || number != SYS_gettid) return __real_syscall(number); return mock_type(long); } rpma-1.3.0/tests/unit/common/mocks-glibc.h000066400000000000000000000004131443364775400204450ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * mocks-glibc.h -- the glibc mock's header */ #ifndef MOCKS_GLIBC_H #define MOCKS_GLIBC_H #include extern bool enabled__wrap_syscall; #endif /* MOCKS_GLIBC_H */ rpma-1.3.0/tests/unit/common/mocks-ibverbs.c000066400000000000000000000262051443364775400210230ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2021-2023, Fujitsu Limited */ /* * mock-ibverbs.c -- libibverbs mocks */ #include #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "mocks-rpma-conn_cfg.h" #include "test-common.h" /* mocked IBV entities */ struct verbs_context Verbs_context; struct ibv_comp_channel Ibv_comp_channel; struct ibv_device Ibv_device; struct ibv_context Ibv_context = {&Ibv_device}; struct ibv_pd Ibv_pd = {&Ibv_context, 0}; struct ibv_cq Ibv_cq; struct ibv_cq Ibv_rcq; struct ibv_cq Ibv_srq_rcq; struct ibv_cq Ibv_cq_unknown; struct ibv_qp Ibv_qp; #if defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || defined(NATIVE_FLUSH_SUPPORTED) struct ibv_qp_ex Ibv_qp_ex; #endif struct ibv_mr Ibv_mr; struct ibv_srq Ibv_srq; /* * ibv_query_device -- ibv_query_device() mock */ int ibv_query_device(struct ibv_context *ibv_ctx, struct ibv_device_attr *device_attr) { assert_ptr_equal(ibv_ctx, MOCK_VERBS); assert_non_null(device_attr); int ret = mock_type(int); if (ret) return ret; memset(device_attr, 0, sizeof(struct ibv_device_attr)); return 0; } #if defined(ON_DEMAND_PAGING_SUPPORTED) || defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || \ defined(NATIVE_FLUSH_SUPPORTED) /* * ibv_query_device_ex_mock -- ibv_query_device_ex() mock */ int ibv_query_device_ex_mock(struct ibv_context *ibv_ctx, const struct ibv_query_device_ex_input *input, struct ibv_device_attr_ex *attr, size_t attr_size) { assert_ptr_equal(ibv_ctx, MOCK_VERBS); assert_null(input); assert_non_null(attr); /* attr_size is provided by ibverbs - no validation needed */ struct ibv_device_attr_ex *device_attr = mock_type(struct ibv_device_attr_ex *); if (device_attr == NULL) return mock_type(int); memcpy(attr, device_attr, sizeof(struct ibv_device_attr_ex)); return 0; } #endif /* * ibv_create_cq -- ibv_create_cq() mock */ struct ibv_cq * ibv_create_cq(struct ibv_context *ibv_ctx, int cqe, void *cq_context, struct ibv_comp_channel *channel, int comp_vector) { assert_ptr_equal(ibv_ctx, MOCK_VERBS); check_expected(cqe); assert_ptr_equal(channel, MOCK_COMP_CHANNEL); assert_int_equal(comp_vector, 0); struct ibv_cq *cq = mock_type(struct ibv_cq *); if (!cq) { errno = mock_type(int); return NULL; } cq->channel = channel; return cq; } /* * ibv_destroy_cq -- ibv_destroy_cq() mock */ int ibv_destroy_cq(struct ibv_cq *cq) { assert_int_equal(cq, MOCK_IBV_CQ); return mock_type(int); } /* * ibv_create_comp_channel -- ibv_create_comp_channel() mock */ struct ibv_comp_channel * ibv_create_comp_channel(struct ibv_context *ibv_ctx) { assert_ptr_equal(ibv_ctx, MOCK_VERBS); struct ibv_comp_channel *channel = mock_type(struct ibv_comp_channel *); if (!channel) { errno = mock_type(int); return NULL; } return channel; } /* * ibv_destroy_comp_channel -- ibv_destroy_comp_channel() mock */ int ibv_destroy_comp_channel(struct ibv_comp_channel *channel) { assert_ptr_equal(channel, MOCK_COMP_CHANNEL); return mock_type(int); } #if defined(ibv_reg_mr) /* * Since rdma-core v27.0-105-g5a750676 * ibv_reg_mr() has been defined as a macro * in : * * https://github.com/linux-rdma/rdma-core/commit/5a750676e8312715100900c6336bbc98577e082b * * In order to mock the ibv_reg_mr() function * the `ibv_reg_mr` symbol has to be undefined first * and the additional ibv_reg_mr_iova2() function * has to be mocked, because it is called * by the 'ibv_reg_mr' macro. */ #undef ibv_reg_mr /* * ibv_reg_mr_iova2 -- ibv_reg_mr_iova2() mock */ struct ibv_mr * ibv_reg_mr_iova2(struct ibv_pd *pd, void *addr, size_t length, uint64_t iova, unsigned access) { return ibv_reg_mr(pd, addr, length, (int)access); } #endif /* * ibv_reg_mr -- ibv_reg_mr() mock */ struct ibv_mr * ibv_reg_mr(struct ibv_pd *pd, void *addr, size_t length, int access) { check_expected_ptr(pd); check_expected_ptr(addr); check_expected(length); check_expected(access); struct ibv_mr *mr = mock_type(struct ibv_mr *); if (mr == NULL) { errno = mock_type(int); return NULL; } return mr; } /* * ibv_req_notify_cq_mock -- ibv_req_notify_cq() mock */ int ibv_req_notify_cq_mock(struct ibv_cq *cq, int solicited_only) { check_expected_ptr(cq); assert_int_equal(solicited_only, 0); return mock_type(int); } /* * ibv_get_cq_event -- ibv_get_cq_event() mock */ int ibv_get_cq_event(struct ibv_comp_channel *channel, struct ibv_cq **cq, void **cq_context) { check_expected_ptr(channel); assert_non_null(cq); assert_non_null(cq_context); errno = mock_type(int); if (!errno) { *cq = mock_type(struct ibv_cq *); *cq_context = NULL; return 0; } return -1; } /* * ibv_ack_cq_events -- ibv_ack_cq_events() mock */ void ibv_ack_cq_events(struct ibv_cq *cq, unsigned nevents) { check_expected_ptr(cq); assert_int_equal(nevents, 1); } /* * ibv_dereg_mr -- a mock of ibv_dereg_mr() */ int ibv_dereg_mr(struct ibv_mr *mr) { /* * rpma_peer_setup_mr_reg() and malloc() may be called in any order. * If the first one fails, then the second one won't be called. * ibv_dereg_mr() will be called in rpma_mr_reg() only if: * 1) rpma_peer_setup_mr_reg() succeeded and * 2) malloc() failed. * In the opposite case, when: * 1) malloc() succeeded and * 2) rpma_peer_setup_mr_reg() failed, * ibv_dereg_mr() will not be called, * so we cannot add cmocka's expects here. * Otherwise, unconsumed expects would cause a test failure. */ assert_int_equal(mr, MOCK_MR); return mock_type(int); /* errno */ } /* * ibv_post_send_mock -- mock of ibv_post_send() */ int ibv_post_send_mock(struct ibv_qp *qp, struct ibv_send_wr *wr, struct ibv_send_wr **bad_wr) { struct ibv_post_send_mock_args *args = mock_type(struct ibv_post_send_mock_args *); assert_non_null(qp); assert_non_null(wr); assert_non_null(bad_wr); assert_int_equal(qp, args->qp); /* * XXX all wr fields should be validated to avoid * posting uninitialized values */ assert_int_equal(wr->opcode, args->opcode); assert_int_equal(wr->send_flags, args->send_flags); assert_int_equal(wr->wr_id, args->wr_id); if (args->opcode != IBV_WR_SEND && args->opcode != IBV_WR_SEND_WITH_IMM) { assert_int_equal(wr->wr.rdma.remote_addr, args->remote_addr); assert_int_equal(wr->wr.rdma.rkey, args->rkey); } if (args->opcode == IBV_WR_SEND_WITH_IMM || args->opcode == IBV_WR_RDMA_WRITE_WITH_IMM) assert_int_equal(wr->imm_data, args->imm_data); assert_null(wr->next); return args->ret; } /* * ibv_post_recv_mock -- mock of ibv_post_recv() */ int ibv_post_recv_mock(struct ibv_qp *qp, struct ibv_recv_wr *wr, struct ibv_recv_wr **bad_wr) { struct ibv_post_recv_mock_args *args = mock_type(struct ibv_post_recv_mock_args *); assert_non_null(qp); assert_non_null(wr); assert_non_null(bad_wr); assert_int_equal(qp, args->qp); assert_int_equal(wr->wr_id, args->wr_id); assert_null(wr->next); return args->ret; } /* * ibv_post_srq_recv_mock -- mock of ibv_post_srq_recv() */ int ibv_post_srq_recv_mock(struct ibv_srq *srq, struct ibv_recv_wr *wr, struct ibv_recv_wr **bad_wr) { struct ibv_post_srq_recv_mock_args *args = mock_type(struct ibv_post_srq_recv_mock_args *); assert_non_null(srq); assert_non_null(wr); assert_non_null(bad_wr); assert_int_equal(srq, args->srq); assert_int_equal(wr->wr_id, args->wr_id); assert_null(wr->next); return args->ret; } /* * ibv_alloc_pd -- ibv_alloc_pd() mock */ struct ibv_pd * ibv_alloc_pd(struct ibv_context *ibv_ctx) { struct ibv_alloc_pd_mock_args *args = mock_type(struct ibv_alloc_pd_mock_args *); if (args->validate_params == MOCK_VALIDATE) check_expected_ptr(ibv_ctx); if (args->pd != NULL) return args->pd; /* * The ibv_alloc_pd(3) manual page does not document that this function * returns any error via errno but seemingly it is. For the usability * sake, in librpma we try to deduce what really happened using * the errno value. */ errno = mock_type(int); return NULL; } /* * ibv_dealloc_pd -- ibv_dealloc_pd() mock */ int ibv_dealloc_pd(struct ibv_pd *pd) { struct ibv_dealloc_pd_mock_args *args = mock_type(struct ibv_dealloc_pd_mock_args *); if (args->validate_params == MOCK_VALIDATE) check_expected_ptr(pd); return args->ret; } /* * ibv_wc_status_str -- ibv_wc_status_str() mock */ const char * ibv_wc_status_str(enum ibv_wc_status status) { return ""; } #ifdef IBV_ADVISE_MR_SUPPORTED /* * ibv_advise_mr_mock -- mock of ibv_advise_mr() */ int ibv_advise_mr_mock(struct ibv_pd *pd, enum ibv_advise_mr_advice advice, uint32_t flags, struct ibv_sge *sg_list, uint32_t num_sge) { check_expected_ptr(pd); check_expected(advice); check_expected(flags); assert_non_null(sg_list); check_expected(sg_list->lkey); check_expected_ptr(sg_list->addr); check_expected(sg_list->length); check_expected(num_sge); return mock_type(int); } #endif /* * ibv_create_srq -- ibv_create_srq() mock */ struct ibv_srq * ibv_create_srq(struct ibv_pd *pd, struct ibv_srq_init_attr *srq_init_attr) { assert_ptr_equal(pd, MOCK_IBV_PD); assert_non_null(srq_init_attr); assert_null(srq_init_attr->srq_context); check_expected(srq_init_attr->attr.max_wr); assert_int_equal(srq_init_attr->attr.max_sge, 1); assert_int_equal(srq_init_attr->attr.srq_limit, 0); struct ibv_srq *srq = mock_type(struct ibv_srq *); if (!srq) { errno = mock_type(int); return NULL; } return srq; } /* * ibv_destroy_srq -- ibv_destroy_srq() mock */ int ibv_destroy_srq(struct ibv_srq *srq) { check_expected_ptr(srq); return mock_type(int); } #if defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || defined(NATIVE_FLUSH_SUPPORTED) /* * ibv_qp_to_qp_ex -- ibv_qp_to_qp_ex() mock */ struct ibv_qp_ex * ibv_qp_to_qp_ex(struct ibv_qp *qp) { check_expected_ptr(qp); return mock_type(struct ibv_qp_ex *); } /* * ibv_wr_start -- ibv_wr_start() mock */ void ibv_wr_start_mock(struct ibv_qp_ex *qp) { check_expected_ptr(qp); } /* * ibv_wr_complete_mock -- ibv_wr_complete() mock */ int ibv_wr_complete_mock(struct ibv_qp_ex *qp) { check_expected(qp); return mock_type(int); } #endif #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED /* * ibv_wr_atomic_write_mock -- ibv_wr_atomic_write() mock */ void ibv_wr_atomic_write_mock(struct ibv_qp_ex *qp, uint32_t rkey, uint64_t remote_addr, const void *atomic_wr) { struct ibv_wr_atomic_write_mock_args *args = mock_type(struct ibv_wr_atomic_write_mock_args *); assert_int_equal(qp, args->qp); assert_int_equal(qp->wr_id, args->wr_id); assert_int_equal(qp->wr_flags, args->wr_flags); assert_int_equal(rkey, args->rkey); assert_int_equal(remote_addr, args->remote_addr); assert_memory_equal(atomic_wr, args->atomic_wr, 8); } #endif #ifdef NATIVE_FLUSH_SUPPORTED /* * ibv_wr_flush_mock -- ibv_wr_flush() mock */ void ibv_wr_flush_mock(struct ibv_qp_ex *qp, uint32_t rkey, uint64_t remote_addr, size_t len, uint8_t type, uint8_t level) { struct ibv_wr_flush_mock_args *args = mock_type(struct ibv_wr_flush_mock_args *); assert_int_equal(qp, args->qp); assert_int_equal(qp->wr_id, args->wr_id); assert_int_equal(qp->wr_flags, args->wr_flags); assert_int_equal(rkey, args->rkey); assert_int_equal(remote_addr, args->remote_addr); assert_int_equal(len, args->len); assert_int_equal(type, args->type); assert_int_equal(level, args->level); } #endif rpma-1.3.0/tests/unit/common/mocks-ibverbs.h000066400000000000000000000077071443364775400210360ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2021-2023, Fujitsu Limited */ /* * mocks-ibverbs.h -- the ibverbs mocks' header */ #ifndef MOCKS_IBVERBS_H #define MOCKS_IBVERBS_H #include /* mocked IBV entities */ extern struct verbs_context Verbs_context; extern struct ibv_comp_channel Ibv_comp_channel; extern struct ibv_context Ibv_context; extern struct ibv_device Ibv_device; extern struct ibv_pd Ibv_pd; extern struct ibv_cq Ibv_cq; extern struct ibv_cq Ibv_rcq; extern struct ibv_cq Ibv_srq_rcq; extern struct ibv_cq Ibv_cq_unknown; extern struct ibv_qp Ibv_qp; #if defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || defined(NATIVE_FLUSH_SUPPORTED) extern struct ibv_qp_ex Ibv_qp_ex; #endif extern struct ibv_mr Ibv_mr; extern struct ibv_srq Ibv_srq; /* random values or pointers to mocked IBV entities */ #define MOCK_VERBS (&Verbs_context.context) #define MOCK_COMP_CHANNEL (struct ibv_comp_channel *)&Ibv_comp_channel #define MOCK_IBV_CQ (struct ibv_cq *)&Ibv_cq #define MOCK_IBV_RCQ (struct ibv_cq *)&Ibv_rcq #define MOCK_IBV_SRQ_RCQ (struct ibv_cq *)&Ibv_srq_rcq #define MOCK_IBV_CQ_UNKNOWN (struct ibv_cq *)&Ibv_cq_unknown #define MOCK_IBV_PD (struct ibv_pd *)&Ibv_pd #define MOCK_QP (struct ibv_qp *)&Ibv_qp #if defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || defined(NATIVE_FLUSH_SUPPORTED) #define MOCK_QPX (struct ibv_qp_ex *)&Ibv_qp_ex #endif #define MOCK_MR (struct ibv_mr *)&Ibv_mr #define MOCK_IBV_SRQ (struct ibv_srq *)&Ibv_srq struct ibv_alloc_pd_mock_args { int validate_params; struct ibv_pd *pd; }; struct ibv_dealloc_pd_mock_args { int validate_params; int ret; }; struct ibv_post_send_mock_args { struct ibv_qp *qp; enum ibv_wr_opcode opcode; unsigned send_flags; uint64_t wr_id; uint64_t remote_addr; uint32_t rkey; uint32_t imm_data; int ret; }; struct ibv_post_recv_mock_args { struct ibv_qp *qp; uint64_t wr_id; int ret; }; struct ibv_post_srq_recv_mock_args { struct ibv_srq *srq; uint64_t wr_id; int ret; }; #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED struct ibv_wr_atomic_write_mock_args { struct ibv_qp_ex *qp; uint64_t wr_id; uint32_t wr_flags; uint32_t rkey; uint64_t remote_addr; const void *atomic_wr; }; #endif #ifdef NATIVE_FLUSH_SUPPORTED struct ibv_wr_flush_mock_args { struct ibv_qp_ex *qp; uint64_t wr_id; uint32_t wr_flags; uint32_t rkey; uint64_t remote_addr; size_t len; uint8_t type; uint8_t level; }; #endif #if defined(ON_DEMAND_PAGING_SUPPORTED) || defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || \ defined(NATIVE_FLUSH_SUPPORTED) int ibv_query_device_ex_mock(struct ibv_context *ibv_ctx, const struct ibv_query_device_ex_input *input, struct ibv_device_attr_ex *attr, size_t attr_size); #endif int ibv_post_send_mock(struct ibv_qp *qp, struct ibv_send_wr *wr, struct ibv_send_wr **bad_wr); int ibv_post_recv_mock(struct ibv_qp *qp, struct ibv_recv_wr *wr, struct ibv_recv_wr **bad_wr); int ibv_post_srq_recv_mock(struct ibv_srq *srq, struct ibv_recv_wr *wr, struct ibv_recv_wr **bad_wr); int ibv_req_notify_cq_mock(struct ibv_cq *cq, int solicited_only); #ifdef IBV_ADVISE_MR_SUPPORTED int ibv_advise_mr_mock(struct ibv_pd *pd, enum ibv_advise_mr_advice advice, uint32_t flags, struct ibv_sge *sg_list, uint32_t num_sge); #endif struct ibv_srq *ibv_create_srq(struct ibv_pd *pd, struct ibv_srq_init_attr *srq_init_attr); int ibv_destroy_srq(struct ibv_srq *srq); #if defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || defined(NATIVE_FLUSH_SUPPORTED) struct ibv_qp_ex *ibv_qp_to_qp_ex(struct ibv_qp *qp); void ibv_wr_start_mock(struct ibv_qp_ex *qp); int ibv_wr_complete_mock(struct ibv_qp_ex *qp); #endif #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED void ibv_wr_atomic_write_mock(struct ibv_qp_ex *qp, uint32_t rkey, uint64_t remote_addr, const void *atomic_wr); #endif #ifdef NATIVE_FLUSH_SUPPORTED void ibv_wr_flush_mock(struct ibv_qp_ex *qp, uint32_t rkey, uint64_t remote_addr, size_t len, uint8_t type, uint8_t level); #endif #endif /* MOCKS_IBVERBS_H */ rpma-1.3.0/tests/unit/common/mocks-netdb.c000066400000000000000000000005111443364775400204530ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021, Fujitsu */ /* * mocks-netdb.c -- netdb mocks */ #include "cmocka_headers.h" #include "mocks-netdb.h" /* * __wrap_gai_strerror -- gai_strerror() mock */ const char * __wrap_gai_strerror(int errcode) { check_expected(errcode); return mock_type(const char *); } rpma-1.3.0/tests/unit/common/mocks-netdb.h000066400000000000000000000004161443364775400204640ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2021, Fujitsu */ /* * mocks-netdb.h -- the netdb mocks' header */ #ifndef MOCKS_NETDB_H #define MOCKS_NETDB_H #define MOCK_EAI_ERRNO 345678 #define MOCK_EAI_ERROR "mock eai error" #endif /* MOCKS_NETDB_H */ rpma-1.3.0/tests/unit/common/mocks-rdma_cm.c000066400000000000000000000177401443364775400207750ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2021-2023, Fujitsu Limited */ /* * mocks-rdma_cm.c -- librdmacm mocks */ #include #include #include "cmocka_headers.h" #include "conn_req.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" #include "test-common.h" struct rdma_event_channel Evch; /* mock event channel */ struct rdma_cm_id Cm_id; /* mock CM ID */ struct ibv_sa_path_rec Path_rec; /* mock ibv_sa_path_rec */ /* * Rdma_migrate_id_counter -- counter of calls to rdma_migrate_id() which allows * controlling its mock behaviour from call-to-call. */ int Rdma_migrate_id_counter = 0; /* mock control entity */ int Mock_ctrl_defer_destruction = MOCK_CTRL_NO_DEFER; const struct rdma_cm_id Cmid_zero = {0}; /* * rdma_create_qp_ex -- rdma_create_qp_ex() mock */ int rdma_create_qp_ex(struct rdma_cm_id *id, struct ibv_qp_init_attr_ex *qp_init_attr) { check_expected_ptr(id); assert_non_null(qp_init_attr); check_expected(qp_init_attr->qp_context); check_expected(qp_init_attr->send_cq); check_expected(qp_init_attr->recv_cq); check_expected(qp_init_attr->srq); check_expected(qp_init_attr->cap.max_send_wr); check_expected(qp_init_attr->cap.max_recv_wr); check_expected(qp_init_attr->cap.max_send_sge); check_expected(qp_init_attr->cap.max_recv_sge); check_expected(qp_init_attr->cap.max_inline_data); assert_int_equal(qp_init_attr->qp_type, IBV_QPT_RC); assert_int_equal(qp_init_attr->sq_sig_all, 0); check_expected(qp_init_attr->comp_mask); #if defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || defined(NATIVE_FLUSH_SUPPORTED) if (qp_init_attr->comp_mask & IBV_QP_INIT_ATTR_SEND_OPS_FLAGS) check_expected(qp_init_attr->send_ops_flags); #endif check_expected(qp_init_attr->pd); errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_destroy_qp -- rdma_destroy_qp() mock */ void rdma_destroy_qp(struct rdma_cm_id *id) { check_expected_ptr(id); } /* * rdma_accept -- rdma_accept() mock */ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { check_expected(id); assert_non_null(conn_param); assert_null(conn_param->private_data); assert_int_equal(conn_param->private_data_len, 0); assert_int_equal(conn_param->responder_resources, RDMA_MAX_RESP_RES); assert_int_equal(conn_param->initiator_depth, RDMA_MAX_INIT_DEPTH); assert_int_equal(conn_param->flow_control, 1); assert_int_equal(conn_param->retry_count, 7); /* max 3-bit value */ assert_int_equal(conn_param->rnr_retry_count, 7); /* max 3-bit value */ errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_reject -- rdma_reject() mock */ int rdma_reject(struct rdma_cm_id *id, const void *private_data, uint8_t private_data_len) { check_expected_ptr(id); assert_null(private_data); assert_int_equal(private_data_len, 0); errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_ack_cm_event -- rdma_ack_cm_event() mock */ int rdma_ack_cm_event(struct rdma_cm_event *event) { check_expected_ptr(event); errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_create_id -- mock of rdma_create_id */ int rdma_create_id(struct rdma_event_channel *channel, struct rdma_cm_id **id, void *context, enum rdma_port_space ps) { assert_non_null(id); assert_null(context); assert_int_equal(ps, RDMA_PS_TCP); /* allocate (struct rdma_cm_id *) */ *id = mock_type(struct rdma_cm_id *); if (*id == NULL) { errno = mock_type(int); return -1; } if (!Mock_ctrl_defer_destruction) expect_value(rdma_destroy_id, id, *id); return 0; } /* * rdma_destroy_id -- mock of rdma_destroy_id */ int rdma_destroy_id(struct rdma_cm_id *id) { check_expected(id); errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_resolve_route -- mock of rdma_resolve_route */ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) { check_expected(id); check_expected(timeout_ms); errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_connect -- rdma_connect() mock */ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) { check_expected(id); assert_non_null(conn_param); assert_int_equal(conn_param->responder_resources, RDMA_MAX_RESP_RES); assert_int_equal(conn_param->initiator_depth, RDMA_MAX_INIT_DEPTH); assert_int_equal(conn_param->flow_control, 1); assert_int_equal(conn_param->retry_count, 7); /* max 3-bit value */ assert_int_equal(conn_param->rnr_retry_count, 7); /* max 3-bit value */ errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_disconnect -- rdma_disconnect() mock */ int rdma_disconnect(struct rdma_cm_id *id) { check_expected_ptr(id); errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_create_event_channel -- rdma_create_event_channel() mock */ struct rdma_event_channel * rdma_create_event_channel(void) { struct rdma_event_channel *evch = mock_type(struct rdma_event_channel *); if (!evch) { errno = mock_type(int); return NULL; } return evch; } /* * rdma_destroy_event_channel -- rdma_destroy_event_channel() mock */ void rdma_destroy_event_channel(struct rdma_event_channel *channel) { assert_ptr_equal(channel, MOCK_EVCH); } /* * rdma_migrate_id -- rdma_migrate_id() mock */ int rdma_migrate_id(struct rdma_cm_id *id, struct rdma_event_channel *channel) { assert_ptr_equal(id, MOCK_CM_ID); /* * This mock assumes the first call to rdma_migrate_id() always migrate * a CM ID to an event channel. Whereas the second call migrate * the CM ID from the event channel (channel == NULL). */ if (Rdma_migrate_id_counter == RDMA_MIGRATE_TO_EVCH) assert_ptr_equal(channel, MOCK_EVCH); else if (Rdma_migrate_id_counter == RDMA_MIGRATE_FROM_EVCH) assert_ptr_equal(channel, NULL); else assert_true(0); ++Rdma_migrate_id_counter; id->qp = MOCK_QP; errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_get_cm_event -- rdma_get_cm_event() mock */ int rdma_get_cm_event(struct rdma_event_channel *channel, struct rdma_cm_event **event_ptr) { check_expected_ptr(channel); assert_non_null(event_ptr); struct rdma_cm_event *event = mock_type(struct rdma_cm_event *); if (!event) { errno = mock_type(int); return -1; } *event_ptr = event; return 0; } /* * rdma_getaddrinfo -- rdma_getaddrinfo() mock */ #ifdef RDMA_GETADDRINFO_OLD_SIGNATURE int rdma_getaddrinfo(char *node, char *port, struct rdma_addrinfo *hints, struct rdma_addrinfo **res) #else int rdma_getaddrinfo(const char *node, const char *port, const struct rdma_addrinfo *hints, struct rdma_addrinfo **res) #endif { struct rdma_addrinfo_args *args = mock_type(struct rdma_addrinfo_args *); if (args->validate_params == MOCK_VALIDATE) { assert_string_equal(node, MOCK_IP_ADDRESS); assert_string_equal(port, MOCK_PORT); check_expected(hints->ai_flags); } *res = args->res; if (*res != NULL) return 0; int ret = mock_type(int); assert_int_not_equal(ret, 0); errno = mock_type(int); return ret; } /* * rdma_freeaddrinfo -- rdma_freeaddrinfo() mock */ void rdma_freeaddrinfo(struct rdma_addrinfo *res) { struct rdma_addrinfo_args *args = mock_type(struct rdma_addrinfo_args *); if (args->validate_params == MOCK_VALIDATE) assert_ptr_equal(res, args->res); } /* * rdma_resolve_addr -- rdma_resolve_addr() mock * Note: CM ID is not modified. */ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, struct sockaddr *dst_addr, int timeout_ms) { check_expected_ptr(id); check_expected_ptr(src_addr); check_expected_ptr(dst_addr); check_expected(timeout_ms); errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_bind_addr -- rdma_bind_addr() mock * Note: CM ID is not modified. */ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) { check_expected_ptr(id); check_expected_ptr(addr); errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_event_str -- rdma_event_str() mock */ const char * rdma_event_str(enum rdma_cm_event_type event) { return ""; } rpma-1.3.0/tests/unit/common/mocks-rdma_cm.h000066400000000000000000000026511443364775400207750ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * mocks-rdma_cm.h -- a librdmacm mocks header */ #ifndef MOCKS_RDMA_CM_H #define MOCKS_RDMA_CM_H #define CM_EVENT_CONNECTION_REQUEST_INIT \ {NULL, NULL, RDMA_CM_EVENT_CONNECT_REQUEST, 0, {{0}}} #define CM_EVENT_CONNECT_ERROR_INIT \ {NULL, NULL, RDMA_CM_EVENT_CONNECT_ERROR, 0, {{0}}} #define MOCK_EVCH (struct rdma_event_channel *)&Evch #define MOCK_CM_ID (struct rdma_cm_id *)&Cm_id #define MOCK_PATH_REC (struct ibv_sa_path_rec *)&Path_rec #define RDMA_MIGRATE_TO_EVCH 0 #define RDMA_MIGRATE_FROM_EVCH 1 #define RDMA_MIGRATE_COUNTER_INIT (RDMA_MIGRATE_TO_EVCH) extern struct rdma_event_channel Evch; /* mock event channel */ extern struct rdma_cm_id Cm_id; /* mock CM ID */ extern struct ibv_sa_path_rec Path_rec; /* mock ibv_sa_path_rec */ extern int Rdma_migrate_id_counter; /* mock control entities */ #define MOCK_CTRL_DEFER 1 #define MOCK_CTRL_NO_DEFER 0 /* * Cmocka does not allow call expect_* from setup whereas check_* will be called * on teardown. So, function creating an object which is called during setup * cannot queue any expect_* regarding the function destroying the object * which will be called in the teardown. */ extern int Mock_ctrl_defer_destruction; struct rdma_addrinfo_args { int validate_params; struct rdma_addrinfo *res; }; extern const struct rdma_cm_id Cmid_zero; #endif /* MOCKS_RDMA_CM_H */ rpma-1.3.0/tests/unit/common/mocks-rpma-conn.c000066400000000000000000000032361443364775400212600ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * mocks-rpma-conn.c -- librpma conn.c module mocks */ #include #include #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "mocks-rpma-cq.h" /* * rpma_conn_new -- rpma_conn_new() mock */ int rpma_conn_new(struct rpma_peer *peer, struct rdma_cm_id *id, struct rpma_cq *cq, struct rpma_cq *rcq, struct ibv_comp_channel *channel, struct rpma_conn **conn_ptr) { assert_ptr_equal(peer, MOCK_PEER); check_expected_ptr(id); assert_ptr_equal(cq, MOCK_RPMA_CQ); check_expected_ptr(rcq); check_expected_ptr(channel); assert_non_null(conn_ptr); struct rpma_conn *conn = mock_type(struct rpma_conn *); if (!conn) { int result = mock_type(int); /* XXX validate the errno handling */ if (result == RPMA_E_PROVIDER) errno = mock_type(int); return result; } *conn_ptr = conn; return 0; } /* * rpma_conn_delete -- rpma_conn_delete() mock */ int rpma_conn_delete(struct rpma_conn **conn_ptr) { assert_non_null(conn_ptr); struct rpma_conn *conn = *conn_ptr; check_expected_ptr(conn); int result = mock_type(int); if (result) { /* XXX validate the errno handling */ if (result == RPMA_E_PROVIDER) errno = mock_type(int); return result; } *conn_ptr = NULL; return 0; } /* * rpma_conn_transfer_private_data -- rpma_conn_transfer_private_data() mock */ void rpma_conn_transfer_private_data(struct rpma_conn *conn, struct rpma_conn_private_data *pdata) { assert_non_null(conn); assert_non_null(pdata); check_expected(conn); check_expected(pdata->ptr); check_expected(pdata->len); } rpma-1.3.0/tests/unit/common/mocks-rpma-conn_cfg.c000066400000000000000000000054331443364775400221000ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * mocks-rpma-conn_cfg.c -- librpma conn_cfg.c module mocks */ #include #include "cmocka_headers.h" #include "conn_cfg.h" #include "mocks-rpma-conn_cfg.h" #include "test-common.h" /* * rpma_conn_cfg_default -- rpma_conn_cfg_default() mock */ struct rpma_conn_cfg * rpma_conn_cfg_default() { return MOCK_CONN_CFG_DEFAULT; } /* * rpma_conn_cfg_get_timeout -- rpma_conn_cfg_get_timeout() mock */ int rpma_conn_cfg_get_timeout(const struct rpma_conn_cfg *cfg, int *timeout_ms) { struct conn_cfg_get_mock_args *args = mock_type(struct conn_cfg_get_mock_args *); assert_ptr_equal(cfg, args->cfg); assert_non_null(timeout_ms); *timeout_ms = args->timeout_ms; return 0; } /* * rpma_conn_cfg_get_cqe -- rpma_conn_cfg_get_cqe() mock */ void rpma_conn_cfg_get_cqe(const struct rpma_conn_cfg *cfg, int *cqe) { struct conn_cfg_get_mock_args *args = mock_type(struct conn_cfg_get_mock_args *); assert_ptr_equal(cfg, args->cfg); assert_non_null(cqe); *cqe = (int)args->cq_size; } /* * rpma_conn_cfg_get_rcqe -- rpma_conn_cfg_get_rcqe() mock */ void rpma_conn_cfg_get_rcqe(const struct rpma_conn_cfg *cfg, int *rcqe) { struct conn_cfg_get_mock_args *args = mock_type(struct conn_cfg_get_mock_args *); assert_ptr_equal(cfg, args->cfg); assert_non_null(rcqe); *rcqe = (int)args->rcq_size; } /* * rpma_conn_cfg_get_sq_size -- rpma_conn_cfg_get_sq_size() mock */ int rpma_conn_cfg_get_sq_size(const struct rpma_conn_cfg *cfg, uint32_t *sq_size) { struct conn_cfg_get_mock_args *args = mock_type(struct conn_cfg_get_mock_args *); assert_ptr_equal(cfg, args->cfg); assert_non_null(sq_size); *sq_size = args->sq_size; return 0; } /* * rpma_conn_cfg_get_rq_size -- rpma_conn_cfg_get_rq_size() mock */ int rpma_conn_cfg_get_rq_size(const struct rpma_conn_cfg *cfg, uint32_t *rq_size) { struct conn_cfg_get_mock_args *args = mock_type(struct conn_cfg_get_mock_args *); assert_ptr_equal(cfg, args->cfg); assert_non_null(rq_size); *rq_size = args->rq_size; return 0; } /* * rpma_conn_cfg_get_compl_channel -- rpma_conn_cfg_get_compl_channel() mock */ int rpma_conn_cfg_get_compl_channel(const struct rpma_conn_cfg *cfg, bool *shared) { struct conn_cfg_get_mock_args *args = mock_type(struct conn_cfg_get_mock_args *); assert_ptr_equal(cfg, args->cfg); assert_non_null(shared); *shared = args->shared; return 0; } /* * rpma_conn_cfg_get_srq -- rpma_conn_cfg_get_srq() mock */ int rpma_conn_cfg_get_srq(const struct rpma_conn_cfg *cfg, struct rpma_srq **srq_ptr) { struct conn_cfg_get_mock_args *args = mock_type(struct conn_cfg_get_mock_args *); assert_ptr_equal(cfg, args->cfg); assert_non_null(srq_ptr); *srq_ptr = args->srq; return 0; } rpma-1.3.0/tests/unit/common/mocks-rpma-conn_cfg.h000066400000000000000000000021331443364775400220770ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * mocks-rpma-conn_cfg.h -- a rpma-conn_cfg mocks header */ #include #ifndef MOCKS_RPMA_CONN_CFG_H #define MOCKS_RPMA_CONN_CFG_H /* random values */ #define MOCK_CONN_CFG_DEFAULT (struct rpma_conn_cfg *)0xCF6D #define MOCK_CONN_CFG_CUSTOM (struct rpma_conn_cfg *)0xCF6C #define MOCK_CQ_SIZE_DEFAULT 10 #define MOCK_RCQ_SIZE_DEFAULT 0 #define MOCK_SQ_SIZE_DEFAULT 11 #define MOCK_RQ_SIZE_DEFAULT 12 #define MOCK_SHARED_DEFAULT false #define MOCK_TIMEOUT_MS_CUSTOM 4034 #define MOCK_CQ_SIZE_CUSTOM 13 #define MOCK_RCQ_SIZE_CUSTOM 16 #define MOCK_SQ_SIZE_CUSTOM 14 #define MOCK_RQ_SIZE_CUSTOM 15 #define MOCK_SHARED_CUSTOM true struct conn_cfg_get_mock_args { struct rpma_conn_cfg *cfg; int timeout_ms; uint32_t sq_size; uint32_t rq_size; uint32_t cq_size; uint32_t rcq_size; bool shared; struct rpma_srq *srq; struct rpma_cq *srq_rcq; }; /* current hardcoded values */ #define RPMA_MAX_SGE 1 #define RPMA_MAX_INLINE_DATA 8 #endif /* MOCKS_RPMA_CONN_CFG_H */ rpma-1.3.0/tests/unit/common/mocks-rpma-cq.c000066400000000000000000000033021443364775400207200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * mocks-rpma-cq.c -- librpma cq.c module mocks */ #include "librpma.h" #include "cmocka_headers.h" #include "mocks-rpma-cq.h" /* * rpma_cq_get_fd -- rpma_cq_get_fd() mock */ int rpma_cq_get_fd(const struct rpma_cq *cq, int *fd) { assert_ptr_equal(cq, MOCK_RPMA_CQ); assert_non_null(fd); *fd = mock_type(int); /* * XXX so far this function cannot fail. * It will be able to fail when it becomes public. */ return 0; } /* * rpma_cq_wait -- rpma_cq_wait() mock */ int rpma_cq_wait(struct rpma_cq *cq) { assert_ptr_equal(cq, MOCK_RPMA_CQ); return mock_type(int); } /* * rpma_cq_new -- rpma_cq_new() mock */ int rpma_cq_new(struct ibv_context *ibv_ctx, int cqe, struct ibv_comp_channel *shared_channel, struct rpma_cq **cq_ptr) { assert_non_null(ibv_ctx); check_expected(cqe); check_expected(shared_channel); assert_non_null(cq_ptr); struct rpma_cq *cq = mock_type(struct rpma_cq *); if (!cq) { int result = mock_type(int); /* XXX validate the errno handling */ if (result == RPMA_E_PROVIDER) errno = mock_type(int); return result; } *cq_ptr = cq; return 0; } /* * rpma_cq_delete -- rpma_cq_delete() mock */ int rpma_cq_delete(struct rpma_cq **cq_ptr) { assert_non_null(cq_ptr); check_expected_ptr(*cq_ptr); int result = mock_type(int); /* XXX validate the errno handling */ if (result == RPMA_E_PROVIDER) errno = mock_type(int); *cq_ptr = NULL; return result; } /* * rpma_cq_get_ibv_cq -- rpma_cq_get_ibv_cq() mock */ struct ibv_cq * rpma_cq_get_ibv_cq(const struct rpma_cq *cq) { check_expected_ptr(cq); return mock_type(struct ibv_cq *); } rpma-1.3.0/tests/unit/common/mocks-rpma-cq.h000066400000000000000000000006031443364775400207260ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * mocks-rpma-cq.h -- librpma cq.c module mocks */ #ifndef MOCKS_RPMA_CQ_H #define MOCKS_RPMA_CQ_H #include "test-common.h" #include "cq.h" #define MOCK_RPMA_CQ (struct rpma_cq *)0xD418 #define MOCK_RPMA_RCQ (struct rpma_cq *)0xD419 #endif /* MOCKS_RPMA_CQ_H */ rpma-1.3.0/tests/unit/common/mocks-rpma-flush.c000066400000000000000000000032321443364775400214400ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2023 Fujitsu Limited */ /* * mocks-rpma-flush.c -- librpma flush.c module mocks */ #include #include #include "cmocka_headers.h" #include "flush.h" #include "mocks-ibverbs.h" #include "mocks-rpma-flush.h" #include "test-common.h" struct rpma_flush Rpma_flush; /* * rpma_flush_mock_execute -- rpma_flush_apm_execute()/rpma_native_flush_execute() mock */ int rpma_flush_mock_execute(struct ibv_qp *qp, struct rpma_flush *flush, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context) { assert_non_null(qp); assert_non_null(flush); assert_non_null(dst); assert_int_not_equal(flags, 0); check_expected_ptr(qp); check_expected_ptr(flush); check_expected_ptr(dst); check_expected(dst_offset); check_expected(len); check_expected(flags); check_expected_ptr(op_context); return 0; } /* * rpma_flush_new -- rpma_flush_new() mock */ int rpma_flush_new(struct rpma_peer *peer, struct ibv_qp *qp, struct rpma_flush **flush_ptr) { assert_int_equal(peer, MOCK_PEER); assert_ptr_equal(qp, MOCK_QP); assert_non_null(flush_ptr); Rpma_flush.func = rpma_flush_mock_execute; int ret = mock_type(int); if (ret == MOCK_OK) *flush_ptr = MOCK_FLUSH; return ret; } /* * rpma_flush_delete -- rpma_flush_delete() mock */ int rpma_flush_delete(struct rpma_flush **flush_ptr) { assert_ptr_equal(*flush_ptr, MOCK_FLUSH); *flush_ptr = NULL; int ret = mock_type(int); /* XXX validate the errno handling */ if (ret == RPMA_E_PROVIDER) errno = mock_type(int); return ret; } rpma-1.3.0/tests/unit/common/mocks-rpma-flush.h000066400000000000000000000005041443364775400214440ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * mocks-rpma-flush.h -- a librpma_flush mocks header */ #ifndef MOCKS_RPMA_FLUSH_H #define MOCKS_RPMA_FLUSH_H extern struct rpma_flush Rpma_flush; #define MOCK_FLUSH (struct rpma_flush *)&Rpma_flush #endif /* MOCKS_RPMA_FLUSH_H */ rpma-1.3.0/tests/unit/common/mocks-rpma-info.c000066400000000000000000000034371443364775400212610ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * mocks-rpma-info.c -- librpma info.c module mocks */ #include #include #include "cmocka_headers.h" #include "info.h" #include "test-common.h" /* * rpma_info_new -- mock of rpma_info_new */ int rpma_info_new(const char *addr, const char *port, enum rpma_info_side side, struct rpma_info **info_ptr) { assert_string_equal(addr, MOCK_IP_ADDRESS); assert_string_equal(port, MOCK_PORT); assert_int_equal(side, RPMA_INFO_ACTIVE); *info_ptr = mock_type(struct rpma_info *); if (*info_ptr == NULL) { int result = mock_type(int); assert_int_not_equal(result, 0); /* XXX validate the errno handling */ if (result == RPMA_E_PROVIDER) errno = mock_type(int); return result; } return 0; } /* * rpma_info_delete -- mock of rpma_info_delete */ int rpma_info_delete(struct rpma_info **info_ptr) { assert_non_null(info_ptr); assert_int_equal(*info_ptr, MOCK_INFO); /* if argument is correct it cannot fail */ return 0; } /* * rpma_info_resolve_addr -- mock of rpma_info_resolve_addr */ int rpma_info_resolve_addr(const struct rpma_info *info, struct rdma_cm_id *id, int timeout_ms) { assert_int_equal(info, MOCK_INFO); check_expected(id); check_expected(timeout_ms); int ret = mock_type(int); /* XXX validate the errno handling */ if (ret == RPMA_E_PROVIDER) errno = mock_type(int); if (ret == MOCK_OK) expect_value(rdma_resolve_route, id, id); return ret; } /* * rpma_info_bind_addr -- mock of rpma_info_bind_addr */ int rpma_info_bind_addr(const struct rpma_info *info, struct rdma_cm_id *id) { check_expected(info); check_expected(id); int ret = mock_type(int); /* XXX validate the errno handling */ if (ret) errno = mock_type(int); return ret; } rpma-1.3.0/tests/unit/common/mocks-rpma-log.c000066400000000000000000000023471443364775400211060ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * mocks-rpma-log.c -- librpma log.c module mocks */ #include "cmocka_headers.h" #include "log_internal.h" #ifdef ATOMIC_OPERATIONS_SUPPORTED _Atomic #endif /* ATOMIC_OPERATIONS_SUPPORTED */ enum rpma_log_level Rpma_log_threshold[] = { /* all logs have to be triggered */ RPMA_LOG_LEVEL_DEBUG, /* RPMA_LOG_THRESHOLD */ RPMA_LOG_DISABLED /* RPMA_LOG_THRESHOLD_AUX */ }; /* * mock_function -- logging function's mock */ static void mock_function(enum rpma_log_level level, const char *file_name, const int line_no, const char *function_name, const char *message_format, ...) { } #ifdef ATOMIC_OPERATIONS_SUPPORTED _Atomic #endif /* ATOMIC_OPERATIONS_SUPPORTED */ uintptr_t Rpma_log_function = (uintptr_t)mock_function; /* * rpma_log_init -- rpma_log_init() mock */ void rpma_log_init() { function_called(); } /* * rpma_log_fini -- rpma_log_fini() mock */ void rpma_log_fini() { function_called(); } /* * rpma_log_fini -- rpma_log_get_threshold() mock */ int rpma_log_get_threshold(enum rpma_log_threshold threshold, enum rpma_log_level *level) { if (level == NULL) return RPMA_E_INVAL; *level = RPMA_LOG_LEVEL_NOTICE; return 0; } rpma-1.3.0/tests/unit/common/mocks-rpma-log_default.c000066400000000000000000000023331443364775400226050ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * mocks-rpma-log_default.c -- librpma log_default.c module mocks */ #include "cmocka_headers.h" #include "log_default.h" /* * rpma_log_default_function -- rpma_log_default_function() mock */ void rpma_log_default_function(enum rpma_log_level level, const char *file_name, const int line_no, const char *function_name, const char *message_format, ...) { function_called(); } /* * rpma_log_default_init -- rpma_log_default_init() mock */ void rpma_log_default_init(void) { function_called(); } /* * rpma_log_default_fini -- rpma_log_default_fini() mock */ void rpma_log_default_fini(void) { function_called(); } int mock__sync_bool_compare_and_swap__function(uintptr_t *ptr, uintptr_t oldval, uintptr_t newval) { static int run_orig = 1; run_orig = run_orig ? 0 : 1; if (run_orig) return __sync_bool_compare_and_swap(ptr, oldval, newval); return 0; } int mock__sync_bool_compare_and_swap__threshold(enum rpma_log_level *ptr, enum rpma_log_level oldval, enum rpma_log_level newval) { static int run_orig = 1; run_orig = run_orig ? 0 : 1; if (run_orig) return __sync_bool_compare_and_swap(ptr, oldval, newval); return 0; } rpma-1.3.0/tests/unit/common/mocks-rpma-mr.c000066400000000000000000000121171443364775400207370ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2022-2023, Fujitsu Limited */ /* * mocks-rpma-mr.c -- librpma mr.c module mocks */ #include #include #include "cmocka_headers.h" #include "mr.h" #include "test-common.h" /* * rpma_mr_read -- rpma_mr_read() mock */ int rpma_mr_read(struct ibv_qp *qp, struct rpma_mr_local *dst, size_t dst_offset, const struct rpma_mr_remote *src, size_t src_offset, size_t len, int flags, const void *op_context) { assert_non_null(qp); assert_int_not_equal(flags, 0); assert_true((src != NULL && dst != NULL) || (src == NULL && dst == NULL && dst_offset == 0 && src_offset == 0 && len == 0)); check_expected_ptr(qp); check_expected_ptr(dst); check_expected(dst_offset); check_expected_ptr(src); check_expected(src_offset); check_expected(len); check_expected(flags); check_expected_ptr(op_context); return mock_type(int); } /* * rpma_mr_write -- rpma_mr_write() mock */ int rpma_mr_write(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_offset, const struct rpma_mr_local *src, size_t src_offset, size_t len, int flags, enum ibv_wr_opcode operation, uint32_t imm, const void *op_context) { assert_non_null(qp); assert_int_not_equal(flags, 0); assert_true((src != NULL && dst != NULL) || (src == NULL && dst == NULL && dst_offset == 0 && src_offset == 0 && len == 0)); check_expected_ptr(qp); check_expected_ptr(dst); check_expected(dst_offset); check_expected_ptr(src); check_expected(src_offset); check_expected(len); check_expected(flags); check_expected(operation); check_expected(imm); check_expected_ptr(op_context); return mock_type(int); } /* * rpma_mr_atomic_write -- rpma_mr_atomic_write() mock */ int rpma_mr_atomic_write(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_offset, const char src[8], int flags, const void *op_context) { assert_non_null(qp); assert_int_not_equal(flags, 0); assert_non_null(src); assert_non_null(dst); check_expected_ptr(qp); check_expected_ptr(dst); check_expected(dst_offset); check_expected_ptr(src); check_expected(flags); check_expected_ptr(op_context); return mock_type(int); } /* * rpma_mr_reg -- a mock of rpma_mr_reg() */ int rpma_mr_reg(struct rpma_peer *peer, void *ptr, size_t size, int usage, struct rpma_mr_local **mr_ptr) { check_expected_ptr(peer); check_expected(size); check_expected(usage); assert_non_null(mr_ptr); void **paddr = mock_type(void **); assert_ptr_equal(ptr, *paddr); *mr_ptr = mock_type(struct rpma_mr_local *); if (*mr_ptr == NULL) return mock_type(int); /* errno */ return 0; } /* * rpma_mr_dereg -- a mock of rpma_mr_dereg() */ int rpma_mr_dereg(struct rpma_mr_local **mr_ptr) { assert_non_null(mr_ptr); check_expected_ptr(*mr_ptr); int ret = mock_type(int); /* XXX validate the errno handling */ if (ret == RPMA_E_PROVIDER) errno = mock_type(int); *mr_ptr = NULL; return ret; } /* * rpma_mr_send -- mock of rpma_mr_send */ int rpma_mr_send(struct ibv_qp *qp, const struct rpma_mr_local *src, size_t offset, size_t len, int flags, enum ibv_wr_opcode operation, uint32_t imm, const void *op_context) { assert_non_null(qp); assert_int_not_equal(flags, 0); assert_true(src != NULL || (offset == 0 && len == 0)); check_expected_ptr(qp); check_expected_ptr(src); check_expected(offset); check_expected(len); check_expected(flags); check_expected(operation); check_expected(imm); check_expected_ptr(op_context); return mock_type(int); } /* * rpma_mr_recv -- mock of rpma_mr_recv */ int rpma_mr_recv(struct ibv_qp *qp, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context) { assert_non_null(qp); assert_true(dst != NULL || (offset == 0 && len == 0)); check_expected_ptr(qp); check_expected_ptr(dst); check_expected(offset); check_expected(len); check_expected_ptr(op_context); return mock_type(int); } /* * rpma_mr_srq_recv -- mock of rpma_mr_srq_recv */ int rpma_mr_srq_recv(struct ibv_srq *srq, struct rpma_mr_local *dst, size_t offset, size_t len, const void *op_context) { assert_non_null(srq); assert_true(dst != NULL || (offset == 0 && len == 0)); check_expected_ptr(srq); check_expected_ptr(dst); check_expected(offset); check_expected(len); check_expected_ptr(op_context); return mock_type(int); } #ifdef NATIVE_FLUSH_SUPPORTED /* * rpma_mr_flush -- mock of rpma_mr_flush */ int rpma_mr_flush(struct ibv_qp *qp, struct rpma_mr_remote *dst, size_t dst_offset, size_t len, enum rpma_flush_type type, int flags, const void *op_context) { assert_non_null(qp); assert_int_not_equal(flags, 0); assert_non_null(dst); check_expected_ptr(qp); check_expected_ptr(dst); check_expected(dst_offset); check_expected(len); check_expected(type); check_expected(flags); check_expected_ptr(op_context); return mock_type(int); } #endif /* * rpma_mr_remote_get_flush_type -- mock of rpma_mr_remote_get_flush_type */ int rpma_mr_remote_get_flush_type(const struct rpma_mr_remote *mr, int *flush_type) { check_expected_ptr(mr); assert_non_null(flush_type); *flush_type = mock_type(int); return 0; } rpma-1.3.0/tests/unit/common/mocks-rpma-peer.c000066400000000000000000000046211443364775400212550ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2022, Fujitsu */ /* * mocks-rpma-peer.c -- librpma peer.c module mocks */ #include #include #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "mocks-rpma-peer.h" #include "mocks-rpma-cq.h" #include "mocks-rpma-srq.h" #include "mocks-rpma-srq_cfg.h" /* * rpma_peer_create_srq -- rpma_peer_create_srq() mock */ int rpma_peer_create_srq(struct rpma_peer *peer, struct rpma_srq_cfg *cfg, struct ibv_srq **ibv_srq_ptr, struct rpma_cq **rcq_ptr) { struct srq_cfg_get_mock_args *args = mock_type(struct srq_cfg_get_mock_args *); assert_ptr_equal(peer, MOCK_PEER); assert_ptr_equal(cfg, args->cfg); int result = mock_type(int); /* XXX validate the errno handling */ if (result == RPMA_E_PROVIDER) { errno = mock_type(int); } else { *ibv_srq_ptr = MOCK_IBV_SRQ; *rcq_ptr = args->rcq_size ? MOCK_RPMA_SRQ_RCQ : NULL; } return result; } /* * rpma_peer_setup_qp -- rpma_peer_setup_qp() mock */ int rpma_peer_setup_qp(struct rpma_peer *peer, struct rdma_cm_id *id, struct rpma_cq *cq, struct rpma_cq *rcq, const struct rpma_conn_cfg *cfg) { assert_ptr_equal(peer, MOCK_PEER); check_expected_ptr(id); assert_ptr_equal(cq, MOCK_RPMA_CQ); check_expected_ptr(rcq); check_expected_ptr(cfg); int result = mock_type(int); /* XXX validate the errno handling */ if (result == RPMA_E_PROVIDER) errno = mock_type(int); return result; } /* * rpma_peer_setup_mr_reg -- a mock of rpma_peer_setup_mr_reg() */ int rpma_peer_setup_mr_reg(struct rpma_peer *peer, struct ibv_mr **ibv_mr_ptr, void *addr, size_t length, int usage) { /* * rpma_peer_setup_mr_reg() and malloc() may be called in any order. * If the first one fails, then the second one won't be called, * so we cannot add cmocka's expects here. * Otherwise, unconsumed expects would cause a test failure. */ struct rpma_peer_setup_mr_reg_args *args = mock_type(struct rpma_peer_setup_mr_reg_args *); assert_ptr_equal(peer, MOCK_PEER); assert_ptr_equal(addr, MOCK_PTR); assert_int_equal(length, MOCK_SIZE); assert_int_equal(usage, args->usage); *ibv_mr_ptr = args->mr; if (*ibv_mr_ptr == NULL) { /* XXX validate the errno handling */ errno = args->verrno; return RPMA_E_PROVIDER; } (*ibv_mr_ptr)->addr = addr; (*ibv_mr_ptr)->length = length; (*ibv_mr_ptr)->rkey = MOCK_RKEY; return 0; } rpma-1.3.0/tests/unit/common/mocks-rpma-peer.h000066400000000000000000000010111443364775400212500ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* * mocks-rpma-peer.h -- a rpma_peer mocks header */ #ifndef MOCKS_RPMA_PEER_H #define MOCKS_RPMA_PEER_H #define MOCK_PTR (void *)0x0001020304050607 #define MOCK_SIZE (size_t)0x08090a0b0c0d0e0f #define MOCK_RKEY (uint32_t)0x10111213 /* structure of arguments used in rpma_peer_setup_mr_reg() */ struct rpma_peer_setup_mr_reg_args { int usage; int access; struct ibv_mr *mr; int verrno; }; #endif /* MOCKS_RPMA_PEER_H */ rpma-1.3.0/tests/unit/common/mocks-rpma-peer_cfg.c000066400000000000000000000010051443364775400220650ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * mocks-rpma-peer_cfg.c -- librpma peer_cfg.c module mocks */ #include #include "cmocka_headers.h" #include "test-common.h" /* * rpma_peer_cfg_get_direct_write_to_pmem -- mock of the original one */ int rpma_peer_cfg_get_direct_write_to_pmem(const struct rpma_peer_cfg *pcfg, bool *supported) { assert_ptr_equal(pcfg, MOCK_PEER_PCFG); assert_non_null(supported); *supported = mock_type(bool); return 0; } rpma-1.3.0/tests/unit/common/mocks-rpma-private_data.c000066400000000000000000000020621443364775400227620ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * mocks-rpma-private_data.c -- librpma private_data.c module mocks */ #include #include #include #include "cmocka_headers.h" /* * rpma_private_data_store -- rpma_private_data_store() mock */ int rpma_private_data_store(struct rdma_cm_event *edata, struct rpma_conn_private_data *pdata) { const LargestIntegralType allowed_events[] = { RDMA_CM_EVENT_CONNECT_REQUEST, RDMA_CM_EVENT_ESTABLISHED}; assert_non_null(edata); assert_in_set(edata->event, allowed_events, sizeof(allowed_events) / sizeof(allowed_events[0])); assert_non_null(pdata); assert_null(pdata->ptr); assert_int_equal(pdata->len, 0); pdata->ptr = mock_type(void *); if (pdata->ptr == NULL) return RPMA_E_NOMEM; pdata->len = strlen(pdata->ptr) + 1; return 0; } /* * rpma_private_data_delete -- rpma_private_data_delete() mock */ void rpma_private_data_delete(struct rpma_conn_private_data *pdata) { assert_non_null(pdata); function_called(); } rpma-1.3.0/tests/unit/common/mocks-rpma-srq.c000066400000000000000000000011171443364775400211240ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * mocks-rpma-srq.c -- librpma srq.c module mocks */ #include "librpma.h" #include "cmocka_headers.h" /* * rpma_srq_get_rcq -- rpma_srq_get_rcq() mock */ int rpma_srq_get_rcq(const struct rpma_srq *srq, struct rpma_cq **rcq_ptr) { check_expected_ptr(srq); *rcq_ptr = mock_type(struct rpma_cq *); return 0; } /* * rpma_srq_get_ibv_srq -- rpma_srq_get_ibv_srq() mock */ struct ibv_srq * rpma_srq_get_ibv_srq(const struct rpma_srq *srq) { check_expected_ptr(srq); return mock_type(struct ibv_srq *); } rpma-1.3.0/tests/unit/common/mocks-rpma-srq.h000066400000000000000000000004701443364775400211320ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Fujitsu */ /* * mocks-rpma-srq.h -- a rpma-srq mocks header */ #ifndef MOCKS_RPMA_SRQ_H #define MOCKS_RPMA_SRQ_H #define MOCK_RPMA_SRQ (struct rpma_srq *)0xCD12 #define MOCK_RPMA_SRQ_RCQ (struct rpma_cq *)0xD420 #endif /* MOCKS_RPMA_SRQ_H */ rpma-1.3.0/tests/unit/common/mocks-rpma-srq_cfg.c000066400000000000000000000020141443364775400217400ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * mocks-rpma-srq_cfg.c -- librpma srq_cfg.c module mocks */ #include #include "cmocka_headers.h" #include "mocks-rpma-srq_cfg.h" #include "test-common.h" /* * rpma_srq_cfg_default -- rpma_srq_cfg_default() mock */ struct rpma_srq_cfg * rpma_srq_cfg_default() { return MOCK_SRQ_CFG_DEFAULT; } /* * rpma_srq_cfg_get_rcqe -- rpma_srq_cfg_get_rcqe() mock */ void rpma_srq_cfg_get_rcqe(const struct rpma_srq_cfg *cfg, int *rcqe) { struct srq_cfg_get_mock_args *args = mock_type(struct srq_cfg_get_mock_args *); assert_ptr_equal(cfg, args->cfg); assert_non_null(rcqe); *rcqe = (int)args->rcq_size; } /* * rpma_srq_cfg_get_rq_size -- rpma_srq_cfg_get_rq_size() mock */ int rpma_srq_cfg_get_rq_size(const struct rpma_srq_cfg *cfg, uint32_t *rq_size) { struct srq_cfg_get_mock_args *args = mock_type(struct srq_cfg_get_mock_args *); assert_ptr_equal(cfg, args->cfg); assert_non_null(rq_size); *rq_size = args->rq_size; return 0; } rpma-1.3.0/tests/unit/common/mocks-rpma-srq_cfg.h000066400000000000000000000011731443364775400217520ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Fujitsu */ /* * mocks-rpma-srq_cfg.h -- a rpma-srq_cfg mocks header */ #include #ifndef MOCKS_RPMA_SRQ_CFG_H #define MOCKS_RPMA_SRQ_CFG_H /* random values */ #define MOCK_SRQ_CFG_DEFAULT (struct rpma_srq_cfg *)0xCF6E #define MOCK_SRQ_CFG_CUSTOM (struct rpma_srq_cfg *)0xCF6F #define MOCK_SRQ_SIZE_DEFAULT 100 #define MOCK_SRQ_SIZE_CUSTOM 200 #define MOCK_SRQ_RCQ_SIZE_DEFAULT 100 #define MOCK_SRQ_RCQ_SIZE_CUSTOM 0 struct srq_cfg_get_mock_args { struct rpma_srq_cfg *cfg; uint32_t rq_size; uint32_t rcq_size; }; #endif /* MOCKS_RPMA_SRQ_CFG_H */ rpma-1.3.0/tests/unit/common/mocks-rpma-utils.c000066400000000000000000000040541443364775400214620ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright (c) 2022-2023, Fujitsu Limited */ /* * mocks-rpma-utils.c -- librpma utils.c module mocks (rpma_utils_*) */ #include #include #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "test-common.h" /* * rpma_utils_ibv_context_is_atomic_write_capable -- * rpma_utils_ibv_context_is_atomic_write_capable() mock */ int rpma_utils_ibv_context_is_atomic_write_capable(struct ibv_context *ibv_ctx, int *is_atomic_write_capable) { assert_ptr_equal(ibv_ctx, MOCK_VERBS); assert_non_null(is_atomic_write_capable); *is_atomic_write_capable = mock_type(int); if (*is_atomic_write_capable == MOCK_ERR_PENDING) { int ret = mock_type(int); /* XXX validate the errno handling */ if (ret == RPMA_E_PROVIDER) errno = mock_type(int); return ret; } return 0; } /* * rpma_utils_ibv_context_is_flush_capable -- * rpma_utils_ibv_context_is_flush_capable() mock */ int rpma_utils_ibv_context_is_flush_capable(struct ibv_context *ibv_ctx, int *is_flush_capable) { assert_ptr_equal(ibv_ctx, MOCK_VERBS); assert_non_null(is_flush_capable); *is_flush_capable = mock_type(int); if (*is_flush_capable == MOCK_ERR_PENDING) { int ret = mock_type(int); /* XXX validate the errno handling */ if (ret == RPMA_E_PROVIDER) errno = mock_type(int); return ret; } return 0; } /* * rpma_utils_ibv_context_is_odp_capable -- * rpma_utils_ibv_context_is_odp_capable() mock */ int rpma_utils_ibv_context_is_odp_capable(struct ibv_context *ibv_ctx, int *is_odp_capable) { assert_ptr_equal(ibv_ctx, MOCK_VERBS); assert_non_null(is_odp_capable); *is_odp_capable = mock_type(int); if (*is_odp_capable == MOCK_ERR_PENDING) { int ret = mock_type(int); /* XXX validate the errno handling */ if (ret == RPMA_E_PROVIDER) errno = mock_type(int); return ret; } return 0; } /* * rpma_utils_conn_event_2str -- rpma_utils_conn_event_2str() mock */ const char * rpma_utils_conn_event_2str(enum rpma_conn_event conn_event) { return ""; } rpma-1.3.0/tests/unit/common/mocks-rpma-utils.h000066400000000000000000000007341443364775400214700ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* Copyright (c) 2022-2023, Fujitsu Limited */ /* * mocks-rpma-utils.h -- the rpma-utils mocks' header */ #ifndef MOCKS_RPMA_UTILS_H #define MOCKS_RPMA_UTILS_H #define MOCK_ODP_CAPABLE 1 #define MOCK_ODP_INCAPABLE 0 #define MOCK_ATOMIC_WRITE_CAPABLE 1 #define MOCK_ATOMIC_WRITE_INCAPABLE 0 #define MOCK_FLUSH_CAPABLE 1 #define MOCK_FLUSH_INCAPABLE 0 #endif /* MOCKS_RPMA_UTILS_H */ rpma-1.3.0/tests/unit/common/mocks-stdio.c000066400000000000000000000026031443364775400205050ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * mocks-stdio.c -- stdio mocks */ #include #include "cmocka_headers.h" #include "mocks-stdio.h" #include "test-common.h" /* * __wrap_vsnprintf -- vsnprintf() mock */ int __wrap_vsnprintf(char *str, size_t size, const char *format, va_list ap) { int ret = mock_type(int); if (ret < 0) return ret; ret = __real_vsnprintf(str, size, format, ap); assert_true(ret > 0); return ret; } /* * __wrap_snprintf -- snprintf() mock */ int __wrap_snprintf(char *str, size_t size, const char *format, ...) { int ret = mock_type(int); if (ret < 0) return ret; if (ret == MOCK_SNPRINTF_NO_EOL) { memset(str, 'x', size); return size; } va_list ap; va_start(ap, format); ret = __real_vsnprintf(str, size, format, ap); assert_true(ret > 0); va_end(ap); return ret; } /* * __wrap_fprintf -- fprintf() mock */ int __wrap_fprintf(FILE *stream, const char *format, ...) { static char fprintf_output[MOCK_BUFF_LEN]; assert_ptr_equal(stream, stderr); va_list ap; va_start(ap, format); int ret = __real_vsnprintf(fprintf_output, MOCK_BUFF_LEN, format, ap); assert_true(ret > 0); va_end(ap); int cmd = mock_type(int); if (cmd == MOCK_STDIO_ERROR) return -1; if (cmd == MOCK_VALIDATE) check_expected_ptr(fprintf_output); else assert_int_equal(cmd, MOCK_PASSTHROUGH); return ret; } rpma-1.3.0/tests/unit/common/mocks-stdio.h000066400000000000000000000011671443364775400205160ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * mocks-stdio.h -- the stdio mocks' header */ #ifndef MOCKS_STDIO_H #define MOCKS_STDIO_H #include #include #define MOCK_BUFF_LEN 1024 #define MOCK_STDIO_ERROR (-1) #define MOCK_SNPRINTF_NO_EOL INT_MAX int __wrap_vsnprintf(char *str, size_t size, const char *format, va_list ap); int __wrap_snprintf(char *str, size_t size, const char *format, ...); int __wrap_fprintf(FILE *stream, const char *format, ...); int __real_vsnprintf(char *str, size_t size, const char *format, va_list ap); #endif /* MOCKS_STDIO_H */ rpma-1.3.0/tests/unit/common/mocks-stdlib.c000066400000000000000000000024351443364775400206470ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * mocks-stdlib.c -- stdlib mocks */ #include #include #include #include #include "cmocka_headers.h" #include "mocks-stdlib.h" #include "test-common.h" void *__real__test_malloc(size_t size); /* * __wrap__test_malloc -- malloc() mock */ void * __wrap__test_malloc(size_t size) { errno = mock_type(int); if (errno) return NULL; return __real__test_malloc(size); } /* * __wrap_mmap -- mmap() mock */ void * __wrap_mmap(void *__addr, size_t __len, int __prot, int __flags, int __fd, off_t __offset) { void *ret = mock_type(void *); if (ret != (void *)MOCK_OK) return MAP_FAILED; struct mmap_args *args = mock_type(struct mmap_args *); void *memptr = __real__test_malloc(__len); /* * Save the address and length of the allocated memory * in order to verify it later. */ args->addr = memptr; args->len = __len; return memptr; } /* * __wrap_munmap -- munmap() mock */ int __wrap_munmap(void *__addr, size_t __len) { struct mmap_args *args = mock_type(struct mmap_args *); assert_ptr_equal(__addr, args->addr); assert_int_equal(__len, args->len); test_free(__addr); errno = mock_type(int); if (errno) return -1; return 0; } rpma-1.3.0/tests/unit/common/mocks-stdlib.h000066400000000000000000000004141443364775400206470ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2021, Intel Corporation */ /* * mocks-stdlib.h -- the stdlib mocks' header */ #ifndef MOCKS_STDLIB_H #define MOCKS_STDLIB_H struct mmap_args { void *addr; size_t len; }; #endif /* MOCKS_STDLIB_H */ rpma-1.3.0/tests/unit/common/mocks-string.c000066400000000000000000000004621443364775400206720ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021, Fujitsu */ /* * mocks-string.c -- string mocks */ #include "cmocka_headers.h" #include "mocks-string.h" /* * __wrap_strerror -- strerror() mock */ char * __wrap_strerror(int errnum) { check_expected(errnum); return mock_type(char *); } rpma-1.3.0/tests/unit/common/mocks-string.h000066400000000000000000000003551443364775400207000ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2021, Fujitsu */ /* * mocks-string.h -- the string mocks' header */ #ifndef MOCKS_STRING_H #define MOCKS_STRING_H #define MOCK_ERROR "mock error" #endif /* MOCKS_STRING_H */ rpma-1.3.0/tests/unit/common/mocks-syslog.c000066400000000000000000000016371443364775400207110ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * mocks-syslog.c -- syslog mocks */ #include "cmocka_headers.h" #include "mocks-stdio.h" #include "test-common.h" /* * openlog -- openlog() mock */ void openlog(const char *__ident, int __option, int __facility) { check_expected(__ident); check_expected(__option); check_expected(__facility); } /* * closelog -- closelog() mock */ void closelog(void) { function_called(); } /* * syslog -- syslog() mock */ void syslog(int priority, const char *format, ...) { static char syslog_output[MOCK_BUFF_LEN]; va_list ap; va_start(ap, format); int ret = __real_vsnprintf(syslog_output, MOCK_BUFF_LEN, format, ap); assert_true(ret > 0); va_end(ap); int cmd = mock_type(int); if (cmd == MOCK_VALIDATE) { check_expected(priority); check_expected_ptr(syslog_output); } else { assert_int_equal(cmd, MOCK_PASSTHROUGH); } } rpma-1.3.0/tests/unit/common/mocks-time.c000066400000000000000000000030531443364775400203210ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * mocks-time.c -- time.h mocks */ #include #include #include "cmocka_headers.h" #include "mocks-time.h" /* * __wrap_clock_gettime -- clock_gettime() mock */ int __wrap_clock_gettime(clockid_t __clock_id, struct timespec *__tp) { assert_int_equal(__clock_id, CLOCK_REALTIME); assert_non_null(__tp); struct timespec *tp = mock_type(struct timespec *); if (NULL == tp) return -1; memcpy(__tp, tp, sizeof(struct timespec)); return 0; } /* * __wrap_localtime_r -- localtime_r() mock */ struct tm * __wrap_localtime_r(const time_t *restrict __timer, struct tm *restrict __result) { assert_non_null(__timer); assert_non_null(__result); time_t *timer = mock_type(time_t *); assert_memory_equal(__timer, timer, sizeof(time_t)); struct tm *__tm = mock_type(struct tm *); if (__tm) memcpy(__result, __tm, sizeof(*__result)); return __tm; } size_t __real_strftime(char *__restrict __s, size_t __maxsize, const char *__restrict __format, const struct tm *__restrict __tp); /* * __wrap_strftime -- strftime() mock */ size_t __wrap_strftime(char *__restrict __s, size_t __maxsize, const char *__restrict __format, const struct tm *__restrict __tp) { assert_non_null(__s); assert_non_null(__format); assert_non_null(__tp); size_t ret = mock_type(size_t); if (ret == MOCK_STRFTIME_ERROR) return ret; assert_int_equal(ret, MOCK_STRFTIME_SUCCESS); ret = __real_strftime(__s, __maxsize, __format, __tp); assert_true(ret > 0); return ret; } rpma-1.3.0/tests/unit/common/mocks-time.h000066400000000000000000000004461443364775400203310ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * mocks-time.h -- the time mocks' header */ #ifndef MOCKS_TIME_H #define MOCKS_TIME_H #include #define MOCK_STRFTIME_ERROR 0 #define MOCK_STRFTIME_SUCCESS INT_MAX #endif /* MOCKS_TIME_H */ rpma-1.3.0/tests/unit/common/mocks-unistd.c000066400000000000000000000014311443364775400206670ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * mocks-unistd.c -- unistd mocks */ #include #include #include "cmocka_headers.h" #include "mocks-unistd.h" static int unistd_mocks_enabled; long __real_sysconf(int name); /* * enable_unistd_mocks -- enable unistd mocks */ void enable_unistd_mocks(void) { unistd_mocks_enabled = 1; } /* * disable_unistd_mocks -- disable unistd mocks */ void disable_unistd_mocks(void) { unistd_mocks_enabled = 0; } /* * __wrap_sysconf -- sysconf() mock */ long __wrap_sysconf(int name) { if (unistd_mocks_enabled == 0) return __real_sysconf(name); assert_int_equal(name, _SC_PAGESIZE); int err = mock_type(int); if (err) { errno = err; return -1; } return PAGESIZE; } rpma-1.3.0/tests/unit/common/mocks-unistd.h000066400000000000000000000005241443364775400206760ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2021, Intel Corporation */ /* * mocks-unistd.h -- the unistd mocks' header */ #ifndef MOCKS_UNISTD_H #define MOCKS_UNISTD_H #define PAGESIZE 4096 void enable_unistd_mocks(void); void disable_unistd_mocks(void); long __wrap_sysconf(int name); #endif /* MOCKS_UNISTD_H */ rpma-1.3.0/tests/unit/common/test-common.h000066400000000000000000000025311443364775400205230ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * test-common.h -- a test's common header */ #ifndef TEST_COMMON_H #define TEST_COMMON_H #include #define MOCK_IP_ADDRESS "127.0.0.1" #define MOCK_PORT "1234" /* a random port number */ #define MOCK_TIMEOUT_MS 5678 #define MOCK_Q_SIZE 123 #define MOCK_IMM_DATA 0x87654321 /* random values */ #define MOCK_RPMA_MR_LOCAL (struct rpma_mr_local *)0xC411 #define MOCK_PEER (struct rpma_peer *)0xFEEF #define MOCK_INFO (struct rpma_info *)0xE6B2 #define MOCK_CONN (struct rpma_conn *)0xC004 #define MOCK_PEER_PCFG (struct rpma_peer_cfg *)0xA1D2 #define MOCK_PRIVATE_DATA ((void *)Private_data) #define MOCK_PDATA_LEN (strlen(MOCK_PRIVATE_DATA) + 1) #define MOCK_PRIVATE_DATA_2 ((void *)Private_data_2) #define MOCK_PDATA_LEN_2 (strlen(MOCK_PRIVATE_DATA_2) + 1) #define MOCK_LOCAL_OFFSET (size_t)0xC413 #define MOCK_LEN (size_t)0xC415 #define MOCK_FLAGS (int)0xC416 #define MOCK_OP_CONTEXT (void *)0xC417 #define MOCK_COMPLETION_FD 0x00FE #define MOCK_QP_NUM 1289 #define MOCK_OK 0 #define MOCK_ERRNO 123456 #define MOCK_ERRNO2 234567 #define MOCK_PASSTHROUGH 0 #define MOCK_VALIDATE 1 #define MOCK_ERR_PENDING (-1) extern const char Private_data[]; extern const char Private_data_2[]; #endif /* TEST_COMMON_H */ rpma-1.3.0/tests/unit/conn/000077500000000000000000000000001443364775400155515ustar00rootroot00000000000000rpma-1.3.0/tests/unit/conn/CMakeLists.txt000066400000000000000000000026221443364775400203130ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021, Fujitsu # include(../../cmake/ctest_helpers.cmake) function(add_test_conn name) set(src_name conn-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c conn-common.c ${TEST_UNIT_COMMON_DIR}/mocks-ibverbs.c ${TEST_UNIT_COMMON_DIR}/mocks-rdma_cm.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-cq.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-peer_cfg.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-flush.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-mr.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-utils.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${LIBRPMA_SOURCE_DIR}/rpma_err.c ${LIBRPMA_SOURCE_DIR}/conn.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_conn(apply_remote_peer_cfg) add_test_conn(atomic_write) add_test_conn(disconnect) add_test_conn(flush) add_test_conn(get_compl_fd) add_test_conn(get_cq_rcq) add_test_conn(get_event_fd) add_test_conn(get_qp_num) add_test_conn(new) add_test_conn(next_event) add_test_conn(private_data) add_test_conn(read) add_test_conn(recv) add_test_conn(send) add_test_conn(send_with_imm) add_test_conn(wait) add_test_conn(write) add_test_conn(write_with_imm) rpma-1.3.0/tests/unit/conn/conn-apply_remote_peer_cfg.c000066400000000000000000000037711443364775400232120ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * conn-apply_remote_peer_cfg.c -- the connection apply_remote_peer_cfg * unit tests * * API covered: * - rpma_conn_apply_remote_peer_cfg() */ #include "conn-common.h" #include "mocks-ibverbs.h" /* * apply_remote_peer_cfg__conn_NULL -- conn NULL is invalid */ static void apply_remote_peer_cfg__conn_NULL(void **unused) { /* run test */ int ret = rpma_conn_apply_remote_peer_cfg(NULL, MOCK_PEER_PCFG); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * apply_remote_peer_cfg__pcfg_NULL -- pcfg NULL is invalid */ static void apply_remote_peer_cfg__pcfg_NULL(void **unused) { /* run test */ int ret = rpma_conn_apply_remote_peer_cfg(MOCK_CONN, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * apply_remote_peer_cfg__conn_pcfg_NULL -- conn and pcfg NULL are invalid */ static void apply_remote_peer_cfg__conn_pcfg_NULL(void **unused) { /* run test */ int ret = rpma_conn_apply_remote_peer_cfg(NULL, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * apply_remote_peer_cfg__success -- happy day scenario */ static void apply_remote_peer_cfg__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks */ will_return(rpma_peer_cfg_get_direct_write_to_pmem, true); /* run test */ int ret = rpma_conn_apply_remote_peer_cfg(cstate->conn, MOCK_PEER_PCFG); /* verify the results */ assert_ptr_equal(ret, 0); } static const struct CMUnitTest tests_apply_remote_peer_cfg[] = { /* rpma_conn_apply_remote_peer_cfg() unit tests */ cmocka_unit_test(apply_remote_peer_cfg__conn_NULL), cmocka_unit_test(apply_remote_peer_cfg__pcfg_NULL), cmocka_unit_test(apply_remote_peer_cfg__conn_pcfg_NULL), cmocka_unit_test_setup_teardown( apply_remote_peer_cfg__success, setup__conn_new, teardown__conn_delete), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_apply_remote_peer_cfg, NULL, NULL); } rpma-1.3.0/tests/unit/conn/conn-atomic_write.c000066400000000000000000000076241443364775400213470ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * conn-atomic_write.c -- the rpma_atomic_write() unit tests * * APIs covered: * - rpma_atomic_write() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" static const char Mock_src[8]; /* * atomic_write__conn_NULL -- NULL conn is invalid */ static void atomic_write__conn_NULL(void **unused) { /* run test */ int ret = rpma_atomic_write(NULL, MOCK_RPMA_MR_REMOTE, MOCK_OFFSET_ALIGNED, Mock_src, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * atomic_write__dst_NULL -- NULL dst is invalid */ static void atomic_write__dst_NULL(void **unused) { /* run test */ int ret = rpma_atomic_write(MOCK_CONN, NULL, MOCK_OFFSET_ALIGNED, Mock_src, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * atomic_write__src_NULL -- NULL src is invalid */ static void atomic_write__src_NULL(void **unused) { /* run test */ int ret = rpma_atomic_write(MOCK_CONN, MOCK_RPMA_MR_REMOTE, MOCK_OFFSET_ALIGNED, NULL, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * atomic_write__flags_0 -- flags == 0 is invalid */ static void atomic_write__flags_0(void **unused) { /* run test */ int ret = rpma_atomic_write(MOCK_CONN, MOCK_RPMA_MR_REMOTE, MOCK_OFFSET_ALIGNED, Mock_src, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * atomic_write__dst_offset_unaligned -- the unaligned dst_offset is invalid */ static void atomic_write__dst_offset_unaligned(void **unused) { /* run test */ int ret = rpma_atomic_write(MOCK_CONN, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, Mock_src, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * atomic_write__conn_dst_src_NULL_flags_0_dst_offset_unaligned -- NULL conn, * dst, src, flags == 0 and an unaligned dst_offset are invalid */ static void atomic_write__conn_dst_src_NULL_flags_0_dst_offset_unaligned(void **unused) { /* run test */ int ret = rpma_atomic_write(NULL, NULL, MOCK_REMOTE_OFFSET, NULL, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * atomic_write__success -- happy day scenario */ static void atomic_write__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_mr_atomic_write, qp, MOCK_QP); expect_value(rpma_mr_atomic_write, dst, MOCK_RPMA_MR_REMOTE); expect_value(rpma_mr_atomic_write, dst_offset, MOCK_OFFSET_ALIGNED); expect_value(rpma_mr_atomic_write, src, Mock_src); expect_value(rpma_mr_atomic_write, flags, MOCK_FLAGS); expect_value(rpma_mr_atomic_write, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_atomic_write, MOCK_OK); /* run test */ int ret = rpma_atomic_write(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_OFFSET_ALIGNED, Mock_src, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_atomic_write -- prepare resources for all tests in the group */ int group_setup_atomic_write(void **unused) { /* set value of QP in mock of CM ID */ Cm_id.qp = MOCK_QP; return 0; } static const struct CMUnitTest tests_atomic_write[] = { /* rpma_atomic_write() unit tests */ cmocka_unit_test(atomic_write__conn_NULL), cmocka_unit_test(atomic_write__dst_NULL), cmocka_unit_test(atomic_write__src_NULL), cmocka_unit_test(atomic_write__flags_0), cmocka_unit_test(atomic_write__dst_offset_unaligned), cmocka_unit_test( atomic_write__conn_dst_src_NULL_flags_0_dst_offset_unaligned), cmocka_unit_test_setup_teardown(atomic_write__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_atomic_write, group_setup_atomic_write, NULL); } rpma-1.3.0/tests/unit/conn/conn-common.c000066400000000000000000000073541443364775400201510ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn-common.c -- the connection unit tests common functions */ #include #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" const char Private_data[] = "Random data"; const char Private_data_2[] = "Another random data"; struct conn_test_state Conn_no_rcq_no_channel = { .rcq = NULL, .channel = NULL }; struct conn_test_state Conn_no_rcq_with_channel = { .rcq = NULL, .channel = MOCK_COMP_CHANNEL }; struct conn_test_state Conn_with_rcq_no_channel = { .rcq = MOCK_RPMA_RCQ, .channel = NULL }; struct conn_test_state Conn_with_rcq_with_channel = { .rcq = MOCK_RPMA_RCQ, .channel = MOCK_COMP_CHANNEL }; /* * rpma_private_data_store -- rpma_private_data_store() mock */ int rpma_private_data_store(struct rdma_cm_event *edata, struct rpma_conn_private_data *pdata) { const LargestIntegralType allowed_events[] = { RDMA_CM_EVENT_CONNECT_REQUEST, RDMA_CM_EVENT_ESTABLISHED}; assert_non_null(edata); assert_in_set(edata->event, allowed_events, sizeof(allowed_events) / sizeof(allowed_events[0])); assert_non_null(pdata); assert_null(pdata->ptr); assert_int_equal(pdata->len, 0); int ret = mock_type(int); if (ret) return ret; pdata->ptr = (void *)edata->param.conn.private_data; pdata->len = edata->param.conn.private_data_len; return 0; } /* * rpma_private_data_delete -- rpma_private_data_delete() mock */ void rpma_private_data_delete(struct rpma_conn_private_data *pdata) { assert_non_null(pdata); check_expected(pdata->ptr); check_expected(pdata->len); pdata->ptr = NULL; pdata->len = 0; } /* * setup__conn_new - prepare a valid rpma_conn object */ int setup__conn_new(void **cstate_ptr) { /* the default is Conn_no_rcq_no_channel */ struct conn_test_state *cstate = *cstate_ptr ? *cstate_ptr : &Conn_no_rcq_no_channel; cstate->conn = NULL; cstate->data.ptr = NULL; cstate->data.len = 0; /* configure mock */ will_return(rdma_create_event_channel, MOCK_EVCH); Rdma_migrate_id_counter = RDMA_MIGRATE_COUNTER_INIT; will_return(rdma_migrate_id, MOCK_OK); will_return(rpma_flush_new, MOCK_OK); will_return(__wrap__test_malloc, MOCK_OK); /* prepare an object */ int ret = rpma_conn_new(MOCK_PEER, MOCK_CM_ID, MOCK_RPMA_CQ, cstate->rcq, cstate->channel, &cstate->conn); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(cstate->conn); *cstate_ptr = cstate; return 0; } /* * teardown__conn_delete - delete the rpma_conn object */ int teardown__conn_delete(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks: */ will_return(rpma_flush_delete, MOCK_OK); expect_value(rdma_destroy_qp, id, MOCK_CM_ID); expect_value(rpma_cq_delete, *cq_ptr, cstate->rcq); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, MOCK_CM_ID); will_return(rdma_destroy_id, MOCK_OK); if (cstate->channel) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_value(rpma_private_data_delete, pdata->ptr, cstate->data.ptr); expect_value(rpma_private_data_delete, pdata->len, cstate->data.len); /* delete the object */ int ret = rpma_conn_delete(&cstate->conn); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->conn); *cstate_ptr = NULL; return 0; } /* * group_setup_common_conn -- prepare common resources * for all tests in the group */ int group_setup_common_conn(void **unused) { /* set the req_notify_cq callback in mock of IBV CQ */ MOCK_VERBS->ops.req_notify_cq = ibv_req_notify_cq_mock; Ibv_cq.context = MOCK_VERBS; Ibv_rcq.context = MOCK_VERBS; return 0; } rpma-1.3.0/tests/unit/conn/conn-common.h000066400000000000000000000035361443364775400201540ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn-common.c -- the connection unit tests common definitions */ #ifndef CONN_COMMON_H #define CONN_COMMON_H 1 #include "cmocka_headers.h" #include "conn.h" #include "mocks-rpma-cq.h" #define MOCK_RPMA_MR_REMOTE ((struct rpma_mr_remote *)0xC412) #define MOCK_REMOTE_OFFSET (size_t)0xC414 #define MOCK_OFFSET_ALIGNED (size_t)((MOCK_REMOTE_OFFSET / \ RPMA_ATOMIC_WRITE_ALIGNMENT) * RPMA_ATOMIC_WRITE_ALIGNMENT) #define MOCK_FD 0x00FD #define CONN_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ_CHANNEL(test_func, \ setup_func, teardown_func) \ {#test_func "__no_rcq_no_channel", (test_func), (setup_func), \ (teardown_func), &Conn_no_rcq_no_channel}, \ {#test_func "__no_rcq_with_channel", (test_func), (setup_func), \ (teardown_func), &Conn_no_rcq_with_channel}, \ {#test_func "__with_rcq_no_channel", (test_func), (setup_func), \ (teardown_func), &Conn_with_rcq_no_channel}, \ {#test_func "__with_rcq_with_channel", (test_func), (setup_func), \ (teardown_func), &Conn_with_rcq_with_channel} #define CONN_TEST_WITH_AND_WITHOUT_RCQ_CHANNEL(test_func) \ CONN_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ_CHANNEL(test_func, \ NULL, NULL) /* all the resources used between setup__conn_new and teardown__conn_delete */ struct conn_test_state { struct rpma_conn *conn; struct rpma_conn_private_data data; struct rpma_cq *rcq; struct ibv_comp_channel *channel; }; extern struct conn_test_state Conn_no_rcq_no_channel; extern struct conn_test_state Conn_no_rcq_with_channel; extern struct conn_test_state Conn_with_rcq_no_channel; extern struct conn_test_state Conn_with_rcq_with_channel; int setup__conn_new(void **cstate_ptr); int teardown__conn_delete(void **cstate_ptr); int group_setup_common_conn(void **unused); #endif /* CONN_COMMON_H */ rpma-1.3.0/tests/unit/conn/conn-disconnect.c000066400000000000000000000033711443364775400210050ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn-disconnect.c -- the connection disconnect unit tests * * API covered: * - rpma_conn_disconnect() */ #include "mocks-rdma_cm.h" #include "conn-common.h" /* * disconnect__conn_NULL - NULL conn is invalid */ static void disconnect__conn_NULL(void **unused) { /* run test */ int ret = rpma_conn_disconnect(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * disconnect__rdma_disconnect_ERRNO - * rdma_disconnect() fails with MOCK_ERRNO */ static void disconnect__rdma_disconnect_ERRNO(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_disconnect, id, MOCK_CM_ID); will_return(rdma_disconnect, MOCK_ERRNO); /* run test */ int ret = rpma_conn_disconnect(cstate->conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * disconnect__success - happy day scenario */ static void disconnect__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_disconnect, id, MOCK_CM_ID); will_return(rdma_disconnect, MOCK_OK); /* run test */ int ret = rpma_conn_disconnect(cstate->conn); /* verify the results */ assert_int_equal(ret, MOCK_OK); } static const struct CMUnitTest tests_disconnect[] = { /* rpma_conn_disconnect() unit tests */ cmocka_unit_test(disconnect__conn_NULL), cmocka_unit_test_setup_teardown( disconnect__rdma_disconnect_ERRNO, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( disconnect__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_disconnect, NULL, NULL); } rpma-1.3.0/tests/unit/conn/conn-flush.c000066400000000000000000000220501443364775400177700ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * conn-flush.c -- the rpma_flush() unit tests * * APIs covered: * - rpma_flush() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rpma-flush.h" /* * flush__conn_NULL - NULL conn is invalid */ static void flush__conn_NULL(void **unused) { /* run test */ int ret = rpma_flush(NULL, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_VISIBILITY, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * flush__dst_NULL - NULL dst is invalid */ static void flush__dst_NULL(void **unused) { /* run test */ int ret = rpma_flush(MOCK_CONN, NULL, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_VISIBILITY, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * flush__flags_0 - flags == 0 is invalid */ static void flush__flags_0(void **unused) { /* run test */ int ret = rpma_flush(MOCK_CONN, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_VISIBILITY, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * flush__conn_dst_NULL_flags_0 - NULL conn, dst * and flags == 0 are invalid */ static void flush__conn_dst_NULL_flags_0(void **unused) { /* run test */ int ret = rpma_flush(NULL, NULL, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_VISIBILITY, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * flush__FLUSH_PERSISTENT_NO_DIRECT_WRITE - flush fails with RPMA_E_NOSUPP * for RPMA_FLUSH_TYPE_PERSISTENT and not supported direct_write_to_pmem */ static void flush__FLUSH_PERSISTENT_NO_DIRECT_WRITE(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* set direct_write_to_pmem to false */ will_return(rpma_peer_cfg_get_direct_write_to_pmem, false); int ret = rpma_conn_apply_remote_peer_cfg(cstate->conn, MOCK_PEER_PCFG); assert_int_equal(ret, MOCK_OK); /* run test */ ret = rpma_flush(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_PERSISTENT, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_NOSUPP); } /* * flush__FLUSH_PERSISTENT_USAGE_EMPTY - flush fails with RPMA_E_NOSUPP * for RPMA_FLUSH_TYPE_PERSISTENT and no usage */ static void flush__FLUSH_PERSISTENT_USAGE_EMPTY(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* set direct_write_to_pmem to true */ will_return(rpma_peer_cfg_get_direct_write_to_pmem, true); int ret = rpma_conn_apply_remote_peer_cfg(cstate->conn, MOCK_PEER_PCFG); assert_int_equal(ret, MOCK_OK); /* configure mocks */ expect_value(rpma_mr_remote_get_flush_type, mr, MOCK_RPMA_MR_REMOTE); will_return(rpma_mr_remote_get_flush_type, 0); /* run test */ ret = rpma_flush(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_PERSISTENT, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_NOSUPP); } /* * flush__FLUSH_PERSISTENT_USAGE_VISIBILITY - flush fails with RPMA_E_NOSUPP * for RPMA_FLUSH_TYPE_PERSISTENT and RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY */ static void flush__FLUSH_PERSISTENT_USAGE_VISIBILITY(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* set direct_write_to_pmem to true */ will_return(rpma_peer_cfg_get_direct_write_to_pmem, true); int ret = rpma_conn_apply_remote_peer_cfg(cstate->conn, MOCK_PEER_PCFG); assert_int_equal(ret, MOCK_OK); /* configure mocks */ expect_value(rpma_mr_remote_get_flush_type, mr, MOCK_RPMA_MR_REMOTE); will_return(rpma_mr_remote_get_flush_type, RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY); /* run test */ ret = rpma_flush(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_PERSISTENT, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_NOSUPP); } /* * flush__FLUSH_VISIBILITY_USAGE_EMPTY - flush fails with RPMA_E_NOSUPP * for RPMA_FLUSH_TYPE_VISIBILITY and no usage */ static void flush__FLUSH_VISIBILITY_USAGE_EMPTY(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* set direct_write_to_pmem to true */ will_return(rpma_peer_cfg_get_direct_write_to_pmem, true); int ret = rpma_conn_apply_remote_peer_cfg(cstate->conn, MOCK_PEER_PCFG); assert_int_equal(ret, MOCK_OK); /* configure mocks */ expect_value(rpma_mr_remote_get_flush_type, mr, MOCK_RPMA_MR_REMOTE); will_return(rpma_mr_remote_get_flush_type, 0); /* run test */ ret = rpma_flush(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_VISIBILITY, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_NOSUPP); } /* * flush__FLUSH_VISIBILITY_USAGE_PERSISTENT - flush fails with RPMA_E_NOSUPP * for RPMA_FLUSH_TYPE_VISIBILITY and RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT */ static void flush__FLUSH_VISIBILITY_USAGE_PERSISTENT(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* set direct_write_to_pmem to true */ will_return(rpma_peer_cfg_get_direct_write_to_pmem, true); int ret = rpma_conn_apply_remote_peer_cfg(cstate->conn, MOCK_PEER_PCFG); assert_int_equal(ret, MOCK_OK); /* configure mocks */ expect_value(rpma_mr_remote_get_flush_type, mr, MOCK_RPMA_MR_REMOTE); will_return(rpma_mr_remote_get_flush_type, RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT); /* run test */ ret = rpma_flush(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_VISIBILITY, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_NOSUPP); } /* * flush__success_FLUSH_TYPE_VISIBILITY - happy day scenario * for RPMA_FLUSH_TYPE_VISIBILITY */ static void flush__success_FLUSH_TYPE_VISIBILITY(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_flush_mock_execute, qp, MOCK_QP); expect_value(rpma_flush_mock_execute, flush, MOCK_FLUSH); expect_value(rpma_flush_mock_execute, dst, MOCK_RPMA_MR_REMOTE); expect_value(rpma_flush_mock_execute, dst_offset, MOCK_REMOTE_OFFSET); expect_value(rpma_flush_mock_execute, len, MOCK_LEN); expect_value(rpma_flush_mock_execute, flags, MOCK_FLAGS); expect_value(rpma_flush_mock_execute, op_context, MOCK_OP_CONTEXT); expect_value(rpma_mr_remote_get_flush_type, mr, MOCK_RPMA_MR_REMOTE); will_return(rpma_mr_remote_get_flush_type, RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY); /* run test */ int ret = rpma_flush(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_VISIBILITY, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * flush__success_FLUSH_TYPE_PERSISTENT - happy day scenario * for RPMA_FLUSH_TYPE_PERSISTENT */ static void flush__success_FLUSH_TYPE_PERSISTENT(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* set direct_write_to_pmem to true */ will_return(rpma_peer_cfg_get_direct_write_to_pmem, true); int ret = rpma_conn_apply_remote_peer_cfg(cstate->conn, MOCK_PEER_PCFG); assert_int_equal(ret, MOCK_OK); /* configure mocks for rpma_flush() */ expect_value(rpma_flush_mock_execute, qp, MOCK_QP); expect_value(rpma_flush_mock_execute, flush, MOCK_FLUSH); expect_value(rpma_flush_mock_execute, dst, MOCK_RPMA_MR_REMOTE); expect_value(rpma_flush_mock_execute, dst_offset, MOCK_REMOTE_OFFSET); expect_value(rpma_flush_mock_execute, len, MOCK_LEN); expect_value(rpma_flush_mock_execute, flags, MOCK_FLAGS); expect_value(rpma_flush_mock_execute, op_context, MOCK_OP_CONTEXT); expect_value(rpma_mr_remote_get_flush_type, mr, MOCK_RPMA_MR_REMOTE); will_return(rpma_mr_remote_get_flush_type, RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT); /* run test */ ret = rpma_flush(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_PERSISTENT, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } static const struct CMUnitTest tests_flush[] = { /* rpma_read() unit tests */ cmocka_unit_test(flush__conn_NULL), cmocka_unit_test(flush__dst_NULL), cmocka_unit_test(flush__flags_0), cmocka_unit_test(flush__conn_dst_NULL_flags_0), cmocka_unit_test_setup_teardown( flush__FLUSH_PERSISTENT_NO_DIRECT_WRITE, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( flush__FLUSH_PERSISTENT_USAGE_EMPTY, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( flush__FLUSH_PERSISTENT_USAGE_VISIBILITY, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( flush__FLUSH_VISIBILITY_USAGE_EMPTY, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( flush__FLUSH_VISIBILITY_USAGE_PERSISTENT, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown(flush__success_FLUSH_TYPE_VISIBILITY, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown(flush__success_FLUSH_TYPE_PERSISTENT, setup__conn_new, teardown__conn_delete), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_flush, NULL, NULL); } rpma-1.3.0/tests/unit/conn/conn-get_compl_fd.c000066400000000000000000000055071443364775400213010ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * conn-get_compl_fd.c -- the rpma_conn_get_compl_fd() unit tests * * APIs covered: * - rpma_conn_get_compl_fd() */ #include "conn-common.h" #include "conn_cfg.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" #include "test-common.h" #include /* * get_compl_fd__conn_NULL -- conn NULL is invalid */ static void get_compl_fd__conn_NULL(void **unused) { /* run test */ int fd = 0; int ret = rpma_conn_get_compl_fd(NULL, &fd); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); assert_int_equal(fd, 0); } /* * get_compl_fd__fd_ptr_NULL -- fd_ptr NULL is invalid */ static void get_compl_fd__fd_ptr_NULL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_get_compl_fd(cstate->conn, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_compl_fd__conn_fd_NULL -- conn and fd NULL are invalid */ static void get_compl_fd__conn_fd_NULL(void **unused) { /* run test */ int ret = rpma_conn_get_compl_fd(NULL, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_compl_fd__E_NOT_SHARED_CHNL -- rpma_conn_get_compl_fd() * fails with RPMA_E_NOT_SHARED_CHNL */ static void get_compl_fd__E_NOT_SHARED_CHNL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ int fd = 0; int ret = rpma_conn_get_compl_fd(cstate->conn, &fd); /* verify the results */ assert_ptr_equal(ret, RPMA_E_NOT_SHARED_CHNL); assert_int_equal(fd, 0); } /* * get_compl_fd__success -- happy day scenario */ static void get_compl_fd__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ int fd = 0; int ret = rpma_conn_get_compl_fd(cstate->conn, &fd); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(fd, MOCK_COMPLETION_FD); } /* * group_setup_get_compl_fd -- prepare resources for all tests in the group */ static int group_setup_get_compl_fd(void **unused) { Ibv_comp_channel.fd = MOCK_COMPLETION_FD; return 0; } static const struct CMUnitTest tests_get_compl_fd[] = { /* rpma_conn_get_compl_fd() unit tests */ cmocka_unit_test(get_compl_fd__conn_NULL), cmocka_unit_test_setup_teardown( get_compl_fd__fd_ptr_NULL, setup__conn_new, teardown__conn_delete), cmocka_unit_test(get_compl_fd__conn_fd_NULL), cmocka_unit_test_prestate_setup_teardown( get_compl_fd__E_NOT_SHARED_CHNL, setup__conn_new, teardown__conn_delete, &Conn_no_rcq_no_channel), cmocka_unit_test_prestate_setup_teardown( get_compl_fd__success, setup__conn_new, teardown__conn_delete, &Conn_with_rcq_with_channel), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_compl_fd, group_setup_get_compl_fd, NULL); } rpma-1.3.0/tests/unit/conn/conn-get_cq_rcq.c000066400000000000000000000054201443364775400207600ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn-get_cq_rcq.c -- the rpma_conn_get_cq/rcq() unit tests * * APIs covered: * - rpma_conn_get_cq() * - rpma_conn_get_rcq() */ #include "conn-common.h" #include "mocks-ibverbs.h" /* * get_cq__conn_NULL -- conn NULL is invalid */ static void get_cq__conn_NULL(void **unused) { struct rpma_cq *cq = NULL; /* run test */ int ret = rpma_conn_get_cq(NULL, &cq); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_cq__cq_ptr_NULL -- cq_ptr NULL is invalid */ static void get_cq__cq_ptr_NULL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_get_cq(cstate->conn, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_cq__success -- happy day scenario */ static void get_cq__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; struct rpma_cq *cq = NULL; /* run test */ int ret = rpma_conn_get_cq(cstate->conn, &cq); /* verify the results */ assert_int_equal(ret, 0); assert_ptr_equal(cq, MOCK_RPMA_CQ); } /* * get_rcq__conn_NULL -- conn NULL is invalid */ static void get_rcq__conn_NULL(void **unused) { struct rpma_cq *rcq = NULL; /* run test */ int ret = rpma_conn_get_rcq(NULL, &rcq); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_rcq__rcq_ptr_NULL -- rcq_ptr NULL is invalid */ static void get_rcq__rcq_ptr_NULL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_get_rcq(cstate->conn, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_rcq__success -- happy day scenario */ static void get_rcq__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; struct rpma_cq *rcq = NULL; /* run test */ int ret = rpma_conn_get_rcq(cstate->conn, &rcq); /* verify the results */ assert_int_equal(ret, 0); assert_ptr_equal(rcq, cstate->rcq); } static const struct CMUnitTest tests_get_cq_rcq[] = { /* rpma_conn_get_cq() unit tests */ cmocka_unit_test(get_cq__conn_NULL), cmocka_unit_test_setup_teardown( get_cq__cq_ptr_NULL, setup__conn_new, teardown__conn_delete), CONN_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ_CHANNEL( get_cq__success, setup__conn_new, teardown__conn_delete), /* rpma_conn_get_rcq() unit tests */ cmocka_unit_test(get_rcq__conn_NULL), cmocka_unit_test_setup_teardown( get_rcq__rcq_ptr_NULL, setup__conn_new, teardown__conn_delete), CONN_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ_CHANNEL( get_rcq__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_cq_rcq, NULL, NULL); } rpma-1.3.0/tests/unit/conn/conn-get_event_fd.c000066400000000000000000000041261443364775400213040ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * conn-get_event_fd.c -- the connection get_event_fd unit tests * * API covered: * - rpma_conn_get_event_fd() */ #include "conn-common.h" #include "mocks-rdma_cm.h" /* * get_event_fd__conn_NULL -- conn NULL is invalid */ static void get_event_fd__conn_NULL(void **unused) { /* run test */ int fd = 0; int ret = rpma_conn_get_event_fd(NULL, &fd); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); assert_int_equal(fd, 0); } /* * get_event_fd__fd_NULL -- fd NULL is invalid */ static void get_event_fd__fd_NULL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_get_event_fd(cstate->conn, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_event_fd__conn_fd_NULL -- conn and fd NULL are invalid */ static void get_event_fd__conn_fd_NULL(void **unused) { /* run test */ int ret = rpma_conn_get_event_fd(NULL, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_event_fd__success -- happy day scenario */ static void get_event_fd__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ int fd = 0; int ret = rpma_conn_get_event_fd(cstate->conn, &fd); /* verify the results */ assert_ptr_equal(ret, 0); assert_int_equal(fd, MOCK_FD); } /* * group_setup_get_event_fd -- prepare resources for all tests in the group */ static int group_setup_get_event_fd(void **unused) { Evch.fd = MOCK_FD; return 0; } static const struct CMUnitTest tests_get_event_fd[] = { /* rpma_conn_get_event_fd() unit tests */ cmocka_unit_test(get_event_fd__conn_NULL), cmocka_unit_test_setup_teardown( get_event_fd__fd_NULL, setup__conn_new, teardown__conn_delete), cmocka_unit_test(get_event_fd__conn_fd_NULL), cmocka_unit_test_setup_teardown( get_event_fd__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_event_fd, group_setup_get_event_fd, NULL); } rpma-1.3.0/tests/unit/conn/conn-get_qp_num.c000066400000000000000000000055601443364775400210140ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021, Fujitsu */ /* * conn-get_qp_num.c -- the connection get_qp_num unit tests * * API covered: * - rpma_conn_get_qp_num() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" /* * get_qp_num__conn_NULL -- conn NULL is invalid */ static void get_qp_num__conn_NULL(void **unused) { /* run test */ uint32_t qp_num = 0; int ret = rpma_conn_get_qp_num(NULL, &qp_num); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); assert_int_equal(qp_num, 0); } /* * get_qp_num__qp_num_NULL -- qp_num NULL is invalid */ static void get_qp_num__qp_num_NULL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_get_qp_num(cstate->conn, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_qp_num__conn_qp_num_NULL -- conn and qp_num NULL are invalid */ static void get_qp_num__conn_qp_num_NULL(void **unused) { /* run test */ int ret = rpma_conn_get_qp_num(NULL, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_qp_num__success -- happy day scenario */ static void get_qp_num__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ uint32_t qp_num = 0; int ret = rpma_conn_get_qp_num(cstate->conn, &qp_num); /* verify the results */ assert_ptr_equal(ret, MOCK_OK); assert_int_equal(qp_num, MOCK_QP_NUM); } /* * get_qp_num__success_after_disconnect - get the connection's qp_num * successfully after rpma_conn_disconnect(). */ static void get_qp_num__success_after_disconnect(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_disconnect, id, MOCK_CM_ID); will_return(rdma_disconnect, MOCK_OK); /* run test */ int ret = rpma_conn_disconnect(cstate->conn); assert_int_equal(ret, MOCK_OK); uint32_t qp_num = 0; ret = rpma_conn_get_qp_num(cstate->conn, &qp_num); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(qp_num, MOCK_QP_NUM); } /* * group_setup_get_qp_num -- prepare resources for all tests in the group */ static int group_setup_get_qp_num(void **unused) { Ibv_qp.qp_num = MOCK_QP_NUM; Cm_id.qp = MOCK_QP; return 0; } static const struct CMUnitTest tests_get_qp_num[] = { /* rpma_conn_get_qp_num() unit tests */ cmocka_unit_test(get_qp_num__conn_NULL), cmocka_unit_test_setup_teardown( get_qp_num__qp_num_NULL, setup__conn_new, teardown__conn_delete), cmocka_unit_test(get_qp_num__conn_qp_num_NULL), cmocka_unit_test_setup_teardown( get_qp_num__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( get_qp_num__success_after_disconnect, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_qp_num, group_setup_get_qp_num, NULL); } rpma-1.3.0/tests/unit/conn/conn-new.c000066400000000000000000000412761443364775400174530ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn-new.c -- the connection new/delete unit tests * * APIs covered: * - rpma_conn_new() * - rpma_conn_delete() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" #include "test-common.h" /* * new__peer_NULL - NULL peer is invalid */ static void new__peer_NULL(void **unused) { /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_new(NULL, MOCK_CM_ID, MOCK_RPMA_CQ, NULL, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(conn); } /* * new__id_NULL - NULL id is invalid */ static void new__id_NULL(void **unused) { /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_new(MOCK_PEER, NULL, MOCK_RPMA_CQ, NULL, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(conn); } /* * new__cq_NULL - NULL cq is invalid */ static void new__cq_NULL(void **unused) { /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_new(MOCK_PEER, MOCK_CM_ID, NULL, NULL, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(conn); } /* * new__conn_ptr_NULL - NULL conn_ptr is invalid */ static void new__conn_ptr_NULL(void **unused) { /* run test */ int ret = rpma_conn_new(MOCK_PEER, MOCK_CM_ID, MOCK_RPMA_CQ, NULL, NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__peer_id_cq_conn_ptr_NULL - NULL peer, id, cq and conn_ptr are * invalid */ static void new__peer_id_cq_conn_ptr_NULL(void **unused) { /* run test */ int ret = rpma_conn_new(NULL, NULL, NULL, NULL, NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__create_evch_ERRNO - rdma_create_event_channel() fails with MOCK_ERRNO */ static void new__create_evch_ERRNO(void **unused) { /* configure mock */ will_return(rdma_create_event_channel, NULL); will_return(rdma_create_event_channel, MOCK_ERRNO); will_return_maybe(rpma_flush_new, MOCK_OK); will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_new(MOCK_PEER, MOCK_CM_ID, MOCK_RPMA_CQ, NULL, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(conn); } /* * new__migrate_id_ERRNO - rdma_migrate_id() fails with MOCK_ERRNO */ static void new__migrate_id_ERRNO(void **unused) { /* configure mock */ will_return(rdma_create_event_channel, MOCK_EVCH); Rdma_migrate_id_counter = RDMA_MIGRATE_COUNTER_INIT; will_return(rdma_migrate_id, MOCK_ERRNO); will_return_maybe(rpma_flush_new, MOCK_OK); will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_new(MOCK_PEER, MOCK_CM_ID, MOCK_RPMA_CQ, NULL, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(conn); } /* * new__flush_E_NOMEM - rpma_flush_new() fails with RPMA_E_NOMEM */ static void new__flush_E_NOMEM(void **unused) { /* configure mock */ will_return(rpma_flush_new, RPMA_E_NOMEM); will_return_maybe(rdma_create_event_channel, MOCK_EVCH); Rdma_migrate_id_counter = RDMA_MIGRATE_COUNTER_INIT; will_return_maybe(rdma_migrate_id, MOCK_OK); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_new(MOCK_PEER, MOCK_CM_ID, MOCK_RPMA_CQ, NULL, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(conn); } /* * new__malloc_ERRNO - malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **unused) { /* configure mock */ will_return(__wrap__test_malloc, MOCK_ERRNO); will_return_maybe(rdma_create_event_channel, MOCK_EVCH); Rdma_migrate_id_counter = RDMA_MIGRATE_COUNTER_INIT; will_return_maybe(rdma_migrate_id, MOCK_OK); will_return_maybe(rpma_flush_new, MOCK_OK); will_return_maybe(rpma_flush_delete, MOCK_OK); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_new(MOCK_PEER, MOCK_CM_ID, MOCK_RPMA_CQ, NULL, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(conn); } /* * conn_test_lifecycle - happy day scenario */ static void conn_test_lifecycle(void **cstate_ptr) { /* * Main things are done by setup__conn_new() * and teardown__conn_delete(). */ struct conn_test_state *cstate = *cstate_ptr; /* get private data */ struct rpma_conn_private_data data; int ret = rpma_conn_get_private_data(cstate->conn, &data); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(data.ptr, cstate->data.ptr); assert_int_equal(data.len, cstate->data.len); } /* * delete__conn_ptr_NULL - conn_ptr NULL is invalid */ static void delete__conn_ptr_NULL(void **unused) { /* run test */ int ret = rpma_conn_delete(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * delete__conn_NULL - *conn_ptr NULL should cause quick exit */ static void delete__conn_NULL(void **unused) { /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_delete(&conn); /* verify the results */ assert_int_equal(ret, 0); } /* * delete__flush_delete_ERRNO - rpma_flush_delete() fails with MOCK_ERRNO */ static void delete__flush_delete_ERRNO(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* * Cmocka does not allow freeing an object in a test if the object was * created in the setup step whereas even failing rpma_conn_delete() * will deallocate the rpma_conn object. */ int ret = setup__conn_new((void **)&cstate); assert_int_equal(ret, 0); assert_non_null(cstate->conn); /* configure mocks: */ will_return(rpma_flush_delete, RPMA_E_PROVIDER); will_return(rpma_flush_delete, MOCK_ERRNO); expect_value(rdma_destroy_qp, id, MOCK_CM_ID); expect_value(rpma_cq_delete, *cq_ptr, cstate->rcq); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, MOCK_CM_ID); will_return_maybe(rdma_destroy_id, MOCK_OK); if (cstate->channel) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_value(rpma_private_data_delete, pdata->ptr, NULL); expect_value(rpma_private_data_delete, pdata->len, 0); /* run test */ ret = rpma_conn_delete(&cstate->conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->conn); } /* * delete__flush_delete_E_INVAL - rpma_flush_delete() * fails with RPMA_E_INVAL */ static void delete__flush_delete_E_INVAL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* * Cmocka does not allow freeing an object in a test if the object was * created in the setup step whereas even failing rpma_conn_delete() * will deallocate the rpma_conn object. */ int ret = setup__conn_new((void **)&cstate); assert_int_equal(ret, 0); assert_non_null(cstate->conn); /* configure mocks */ will_return(rpma_flush_delete, RPMA_E_INVAL); expect_value(rdma_destroy_qp, id, MOCK_CM_ID); expect_value(rpma_cq_delete, *cq_ptr, cstate->rcq); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, MOCK_CM_ID); will_return_maybe(rdma_destroy_id, MOCK_OK); if (cstate->channel) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_value(rpma_private_data_delete, pdata->ptr, NULL); expect_value(rpma_private_data_delete, pdata->len, 0); /* run test */ ret = rpma_conn_delete(&cstate->conn); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(cstate->conn); } /* * delete__rcq_delete_ERRNO - rpma_cq_delete(&conn->rcq) fails with MOCK_ERRNO */ static void delete__rcq_delete_ERRNO(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* * Cmocka does not allow freeing an object in a test if the object was * created in the setup step whereas even failing rpma_conn_delete() * will deallocate the rpma_conn object. */ int ret = setup__conn_new((void **)&cstate); assert_int_equal(ret, 0); assert_non_null(cstate->conn); /* configure mocks: */ will_return(rpma_flush_delete, MOCK_OK); expect_value(rdma_destroy_qp, id, MOCK_CM_ID); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_RCQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, MOCK_CM_ID); will_return(rdma_destroy_id, MOCK_OK); expect_value(rpma_private_data_delete, pdata->ptr, NULL); expect_value(rpma_private_data_delete, pdata->len, 0); /* run test */ ret = rpma_conn_delete(&cstate->conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->conn); } /* * delete__rcq_delete_ERRNO_subsequent_ERRNO2 -- rpma_cq_delete(&conn->rcq) * fails with MOCK_ERRNO whereas subsequent (rpma_cq_delete(&conn->cq), * rdma_destroy_id()) fail with MOCK_ERRNO2 */ static void delete__rcq_delete_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* * Cmocka does not allow freeing an object in a test if the object was * created in the setup step whereas even failing rpma_conn_delete() * will deallocate the rpma_conn object. */ int ret = setup__conn_new((void **)&cstate); assert_int_equal(ret, 0); assert_non_null(cstate->conn); /* configure mocks */ will_return(rpma_flush_delete, MOCK_OK); expect_value(rdma_destroy_qp, id, MOCK_CM_ID); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_RCQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); /* first error */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ expect_value(rdma_destroy_id, id, MOCK_CM_ID); will_return(rdma_destroy_id, MOCK_ERRNO2); /* third error */ expect_value(rpma_private_data_delete, pdata->ptr, NULL); expect_value(rpma_private_data_delete, pdata->len, 0); /* run test */ ret = rpma_conn_delete(&cstate->conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->conn); } /* * delete__cq_delete_ERRNO - rpma_cq_delete(&conn->cq) fails with MOCK_ERRNO */ static void delete__cq_delete_ERRNO(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* * Cmocka does not allow freeing an object in a test if the object was * created in the setup step whereas even failing rpma_conn_delete() * will deallocate the rpma_conn object. */ int ret = setup__conn_new((void **)&cstate); assert_int_equal(ret, 0); assert_non_null(cstate->conn); /* configure mocks: */ will_return(rpma_flush_delete, MOCK_OK); expect_value(rdma_destroy_qp, id, MOCK_CM_ID); expect_value(rpma_cq_delete, *cq_ptr, cstate->rcq); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); expect_value(rdma_destroy_id, id, MOCK_CM_ID); will_return(rdma_destroy_id, MOCK_OK); if (cstate->channel) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_value(rpma_private_data_delete, pdata->ptr, NULL); expect_value(rpma_private_data_delete, pdata->len, 0); /* run test */ ret = rpma_conn_delete(&cstate->conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->conn); } /* * delete__cq_delete_ERRNO_subsequent_ERRNO2 -- rdma_destroy_id() fails * with MOCK_ERRNO2 after rpma_cq_delete(&conn->cq) failed with MOCK_ERRNO */ static void delete__cq_delete_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* * Cmocka does not allow freeing an object in a test if the object was * created in the setup step whereas even failing rpma_conn_delete() * will deallocate the rpma_conn object. */ int ret = setup__conn_new((void **)&cstate); assert_int_equal(ret, 0); assert_non_null(cstate->conn); /* configure mocks: */ will_return(rpma_flush_delete, MOCK_OK); expect_value(rdma_destroy_qp, id, MOCK_CM_ID); expect_value(rpma_cq_delete, *cq_ptr, cstate->rcq); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); /* first error */ expect_value(rdma_destroy_id, id, MOCK_CM_ID); will_return(rdma_destroy_id, MOCK_ERRNO2); /* second error */ if (cstate->channel) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_value(rpma_private_data_delete, pdata->ptr, NULL); expect_value(rpma_private_data_delete, pdata->len, 0); /* run test */ ret = rpma_conn_delete(&cstate->conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->conn); } /* * delete__destroy_id_ERRNO -- rdma_destroy_id() fails with MOCK_ERRNO */ static void delete__destroy_id_ERRNO(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* * Cmocka does not allow freeing an object in a test if the object was * created in the setup step whereas even failing rpma_conn_delete() * will deallocate the rpma_conn object. */ int ret = setup__conn_new((void **)&cstate); assert_int_equal(ret, 0); assert_non_null(cstate->conn); /* configure mocks: */ will_return(rpma_flush_delete, MOCK_OK); expect_value(rdma_destroy_qp, id, MOCK_CM_ID); expect_value(rpma_cq_delete, *cq_ptr, cstate->rcq); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, MOCK_CM_ID); will_return(rdma_destroy_id, MOCK_ERRNO); if (cstate->channel) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_value(rpma_private_data_delete, pdata->ptr, NULL); expect_value(rpma_private_data_delete, pdata->len, 0); /* run test */ ret = rpma_conn_delete(&cstate->conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->conn); } /* * delete__ibv_destroy_comp_channel_E_PROVIDER -- * rpma_ibv_destroy_comp_channel() fails with RPMA_E_PROVIDER */ static void delete__ibv_destroy_comp_channel_E_PROVIDER(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* shared completion channel is required in this test */ if (!cstate->channel) return; /* * Cmocka does not allow freeing an object in a test if the object was * created in the setup step whereas even failing rpma_conn_delete() * will deallocate the rpma_conn object. */ int ret = setup__conn_new((void **)&cstate); assert_int_equal(ret, 0); assert_non_null(cstate->conn); /* configure mocks: */ will_return(rpma_flush_delete, MOCK_OK); expect_value(rdma_destroy_qp, id, MOCK_CM_ID); expect_value(rpma_cq_delete, *cq_ptr, cstate->rcq); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, MOCK_CM_ID); will_return(rdma_destroy_id, MOCK_OK); expect_value(rpma_private_data_delete, pdata->ptr, NULL); expect_value(rpma_private_data_delete, pdata->len, 0); will_return(ibv_destroy_comp_channel, MOCK_ERRNO); /* run test */ ret = rpma_conn_delete(&cstate->conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->conn); } static const struct CMUnitTest tests_new[] = { /* rpma_conn_new() unit tests */ cmocka_unit_test(new__peer_NULL), cmocka_unit_test(new__id_NULL), cmocka_unit_test(new__cq_NULL), cmocka_unit_test(new__conn_ptr_NULL), cmocka_unit_test(new__peer_id_cq_conn_ptr_NULL), cmocka_unit_test(new__create_evch_ERRNO), cmocka_unit_test(new__migrate_id_ERRNO), cmocka_unit_test(new__flush_E_NOMEM), cmocka_unit_test(new__malloc_ERRNO), /* rpma_conn_new()/_delete() lifecycle */ CONN_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ_CHANNEL( conn_test_lifecycle, setup__conn_new, teardown__conn_delete), /* rpma_conn_delete() unit tests */ cmocka_unit_test(delete__conn_ptr_NULL), cmocka_unit_test(delete__conn_NULL), CONN_TEST_WITH_AND_WITHOUT_RCQ_CHANNEL(delete__flush_delete_ERRNO), CONN_TEST_WITH_AND_WITHOUT_RCQ_CHANNEL(delete__flush_delete_E_INVAL), cmocka_unit_test_prestate(delete__rcq_delete_ERRNO, &Conn_with_rcq_no_channel), cmocka_unit_test_prestate( delete__rcq_delete_ERRNO_subsequent_ERRNO2, &Conn_with_rcq_no_channel), CONN_TEST_WITH_AND_WITHOUT_RCQ_CHANNEL(delete__cq_delete_ERRNO), CONN_TEST_WITH_AND_WITHOUT_RCQ_CHANNEL( delete__cq_delete_ERRNO_subsequent_ERRNO2), CONN_TEST_WITH_AND_WITHOUT_RCQ_CHANNEL(delete__destroy_id_ERRNO), CONN_TEST_WITH_AND_WITHOUT_RCQ_CHANNEL( delete__ibv_destroy_comp_channel_E_PROVIDER), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_new, NULL, NULL); } rpma-1.3.0/tests/unit/conn/conn-next_event.c000066400000000000000000000432041443364775400210320ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn-next-event.c -- the connection next_event unit tests * * API covered: * - rpma_conn_next_event() */ #include "conn-common.h" #include "mocks-rdma_cm.h" /* * next_event__conn_NULL - NULL conn is invalid */ static void next_event__conn_NULL(void **unused) { /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(NULL, &c_event); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); assert_int_equal(c_event, RPMA_CONN_UNDEFINED); } /* * next_event__event_NULL - NULL event is invalid */ static void next_event__event_NULL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_next_event(cstate->conn, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * next_event__conn_NULL_event_NULL - NULL conn and NULL event are invalid */ static void next_event__conn_NULL_event_NULL(void **unused) { /* run test */ int ret = rpma_conn_next_event(NULL, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * next_event__get_cm_event_ERRNO - * rdma_get_cm_event() fails with MOCK_ERRNO */ static void next_event__get_cm_event_ERRNO(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); will_return(rdma_get_cm_event, NULL); will_return(rdma_get_cm_event, MOCK_ERRNO); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_int_equal(c_event, RPMA_CONN_UNDEFINED); } /* * next_event__get_cm_event_ENODATA - * rdma_get_cm_event() fails with ENODATA */ static void next_event__get_cm_event_ENODATA(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); will_return(rdma_get_cm_event, NULL); will_return(rdma_get_cm_event, ENODATA); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, RPMA_E_NO_EVENT); assert_int_equal(c_event, RPMA_CONN_UNDEFINED); } /* * next_event__event__ADDR_ERROR - * RDMA_CM_EVENT_ADDR_ERROR is unexpected */ static void next_event__event__ADDR_ERROR(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_ADDR_ERROR; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, RPMA_E_UNKNOWN); assert_int_equal(c_event, RPMA_CONN_UNDEFINED); } /* * next_event__event_UNREACHABLE_ack_ERRNO - * rdma_ack_cm_event() fails with MOCK_ERRNO after obtaining * an RDMA_CM_EVENT_UNREACHABLE event */ static void next_event__event_UNREACHABLE_ack_ERRNO(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_UNREACHABLE; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_ERRNO); expect_value(rpma_private_data_delete, pdata->ptr, NULL); expect_value(rpma_private_data_delete, pdata->len, 0); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_int_equal(c_event, RPMA_CONN_UNDEFINED); } /* * next_event__data_store_E_NOMEM - rpma_private_data_store() fails * with RPMA_E_NOMEM */ static void next_event__data_store_E_NOMEM(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_ESTABLISHED; will_return(rdma_get_cm_event, &event); will_return(rpma_private_data_store, RPMA_E_NOMEM); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_int_equal(c_event, RPMA_CONN_UNDEFINED); } /* * next_event__success_no_data_ESTABLISHED_no_data - happy day scenario, * no private data in the connection and no private data in the event */ static void next_event__success_no_data_ESTABLISHED_no_data(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks for rpma_conn_next_event() */ expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_ESTABLISHED; /* no private data in the event */ event.param.conn.private_data = NULL; event.param.conn.private_data_len = 0; will_return(rdma_get_cm_event, &event); will_return(rpma_private_data_store, MOCK_OK); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_ESTABLISHED); /* get private data for verification */ struct rpma_conn_private_data check_data; ret = rpma_conn_get_private_data(cstate->conn, &check_data); /* verify the results of rpma_conn_get_private_data() */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(check_data.ptr, NULL); assert_int_equal(check_data.len, 0); /* set expected private data */ cstate->data.ptr = NULL; cstate->data.len = 0; } /* * next_event__success_no_data_ESTABLISHED_with_data - happy day scenario * no private data in the connection and with private data in the event */ static void next_event__success_no_data_ESTABLISHED_with_data(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks for rpma_conn_next_event() */ expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_ESTABLISHED; /* with private data in the event */ event.param.conn.private_data = MOCK_PRIVATE_DATA; event.param.conn.private_data_len = MOCK_PDATA_LEN; will_return(rdma_get_cm_event, &event); will_return(rpma_private_data_store, MOCK_OK); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_ESTABLISHED); /* get private data for verification */ struct rpma_conn_private_data check_data; ret = rpma_conn_get_private_data(cstate->conn, &check_data); /* verify the results of rpma_conn_get_private_data() */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(check_data.ptr, MOCK_PRIVATE_DATA); assert_int_equal(check_data.len, MOCK_PDATA_LEN); /* set expected private data */ cstate->data.ptr = MOCK_PRIVATE_DATA; cstate->data.len = MOCK_PDATA_LEN; } /* * next_event__success_with_data_ESTABLISHED_no_data - happy day scenario, * with private data in the connection and with no private data in the event */ static void next_event__success_with_data_ESTABLISHED_no_data(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks for rpma_conn_transfer_private_data() */ struct rpma_conn_private_data data; data.ptr = MOCK_PRIVATE_DATA; data.len = MOCK_PDATA_LEN; /* transfer the private data to the connection (a take over) */ rpma_conn_transfer_private_data(cstate->conn, &data); /* verify the source of the private data is zeroed */ assert_ptr_equal(data.ptr, NULL); assert_int_equal(data.len, 0); /* get the private data */ struct rpma_conn_private_data check_data; int ret = rpma_conn_get_private_data(cstate->conn, &check_data); /* verify the results of rpma_conn_get_private_data() */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(check_data.ptr, MOCK_PRIVATE_DATA); assert_int_equal(check_data.len, MOCK_PDATA_LEN); /* configure mocks for rpma_conn_next_event() */ expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_ESTABLISHED; /* no private data in the event */ event.param.conn.private_data = NULL; event.param.conn.private_data_len = 0; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_ESTABLISHED); /* get private data for verification */ ret = rpma_conn_get_private_data(cstate->conn, &check_data); /* verify the results of rpma_conn_get_private_data() */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(check_data.ptr, MOCK_PRIVATE_DATA); assert_int_equal(check_data.len, MOCK_PDATA_LEN); /* set expected private data */ cstate->data.ptr = MOCK_PRIVATE_DATA; cstate->data.len = MOCK_PDATA_LEN; } /* * next_event__success_with_data_ESTABLISHED_with_data - happy day scenario, * with private data in the connection and with another private data * in the event */ static void next_event__success_with_data_ESTABLISHED_with_data(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks for rpma_conn_transfer_private_data() */ struct rpma_conn_private_data data; data.ptr = MOCK_PRIVATE_DATA; data.len = MOCK_PDATA_LEN; /* transfer the private data to the connection (a take over) */ rpma_conn_transfer_private_data(cstate->conn, &data); /* verify the source of the private data is zeroed */ assert_ptr_equal(data.ptr, NULL); assert_int_equal(data.len, 0); /* get the private data */ struct rpma_conn_private_data check_data; int ret = rpma_conn_get_private_data(cstate->conn, &check_data); /* verify the results of rpma_conn_get_private_data() */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(check_data.ptr, MOCK_PRIVATE_DATA); assert_int_equal(check_data.len, MOCK_PDATA_LEN); /* configure mocks for rpma_conn_next_event() */ expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_ESTABLISHED; /* with another private data in the event */ event.param.conn.private_data = MOCK_PRIVATE_DATA_2; event.param.conn.private_data_len = MOCK_PDATA_LEN_2; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_ESTABLISHED); /* get private data for verification */ ret = rpma_conn_get_private_data(cstate->conn, &check_data); /* verify the results of rpma_conn_get_private_data() */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(check_data.ptr, MOCK_PRIVATE_DATA); assert_int_equal(check_data.len, MOCK_PDATA_LEN); /* set expected private data */ cstate->data.ptr = MOCK_PRIVATE_DATA; cstate->data.len = MOCK_PDATA_LEN; } /* * next_event__success_CONNECT_ERROR - happy day scenario */ static void next_event__success_CONNECT_ERROR(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_CONNECT_ERROR; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_LOST); } /* * next_event__success_DEVICE_REMOVAL - happy day scenario */ static void next_event__success_DEVICE_REMOVAL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_LOST); } /* * next_event__success_DISCONNECTED - happy day scenario */ static void next_event__success_DISCONNECTED(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_DISCONNECTED; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_CLOSED); } /* * next_event__success_TIMEWAIT_EXIT - happy day scenario */ static void next_event__success_TIMEWAIT_EXIT(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_CLOSED); } /* * next_event__success_REJECTED - happy day scenario */ static void next_event__success_REJECTED(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_REJECTED; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_REJECTED); } /* * next_event__success_UNREACHABLE - happy day scenario */ static void next_event__success_UNREACHABLE(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; expect_value(rdma_get_cm_event, channel, MOCK_EVCH); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_UNREACHABLE; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ enum rpma_conn_event c_event = RPMA_CONN_UNDEFINED; int ret = rpma_conn_next_event(cstate->conn, &c_event); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(c_event, RPMA_CONN_UNREACHABLE); } static const struct CMUnitTest tests_next_event[] = { /* rpma_conn_next_event() unit tests */ cmocka_unit_test(next_event__conn_NULL), cmocka_unit_test_setup_teardown( next_event__event_NULL, setup__conn_new, teardown__conn_delete), cmocka_unit_test(next_event__conn_NULL_event_NULL), cmocka_unit_test_setup_teardown( next_event__get_cm_event_ERRNO, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__get_cm_event_ENODATA, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__event__ADDR_ERROR, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__event_UNREACHABLE_ack_ERRNO, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__data_store_E_NOMEM, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_no_data_ESTABLISHED_no_data, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_no_data_ESTABLISHED_with_data, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_with_data_ESTABLISHED_no_data, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_with_data_ESTABLISHED_with_data, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_CONNECT_ERROR, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_DEVICE_REMOVAL, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_DISCONNECTED, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_TIMEWAIT_EXIT, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_REJECTED, setup__conn_new, teardown__conn_delete), cmocka_unit_test_setup_teardown( next_event__success_UNREACHABLE, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_next_event, NULL, NULL); } rpma-1.3.0/tests/unit/conn/conn-private_data.c000066400000000000000000000056041443364775400213200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * conn-private-data.c -- the connection's private data unit tests * * APIs covered: * - rpma_conn_transfer_private_data() * - rpma_conn_get_private_data() */ #include "conn-common.h" /* * get_private_data__conn_NULL - NULL conn is invalid */ static void get_private_data__conn_NULL(void **unused) { /* get private data */ struct rpma_conn_private_data data = {0}; int ret = rpma_conn_get_private_data(NULL, &data); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_ptr_equal(data.ptr, NULL); assert_int_equal(data.len, 0); } /* * get_private_data__pdata_NULL - NULL pdata is invalid */ static void get_private_data__pdata_NULL(void **unused) { /* get private data */ struct rpma_conn *conn = MOCK_CONN; int ret = rpma_conn_get_private_data(conn, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_private_data__conn_NULL_pdata_NULL - * (conn == NULL && pdata == NULL) is invalid */ static void get_private_data__conn_NULL_pdata_NULL(void **unused) { /* get private data */ int ret = rpma_conn_get_private_data(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * transfer_private_data__success - rpma_conn_transfer_private_data() succeeds */ static void transfer_private_data__success(void **cstate_ptr) { /* * Common things are done by setup__conn_new() * and teardown__conn_delete(). */ struct conn_test_state *cstate = *cstate_ptr; /* configure mocks for rpma_conn_transfer_private_data() */ cstate->data.ptr = MOCK_PRIVATE_DATA; cstate->data.len = MOCK_PDATA_LEN; /* transfer private data */ rpma_conn_transfer_private_data(cstate->conn, &cstate->data); /* verify the source of the private data is zeroed */ assert_ptr_equal(cstate->data.ptr, NULL); assert_int_equal(cstate->data.len, 0); /* get private data */ struct rpma_conn_private_data check_data; int ret = rpma_conn_get_private_data(cstate->conn, &check_data); /* verify the results of rpma_conn_get_private_data() */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(check_data.ptr, MOCK_PRIVATE_DATA); assert_int_equal(check_data.len, MOCK_PDATA_LEN); /* set expected private data which will be used during teardown */ cstate->data.ptr = MOCK_PRIVATE_DATA; cstate->data.len = MOCK_PDATA_LEN; } static const struct CMUnitTest tests_private_data[] = { /* rpma_conn_transfer_private_data() unit tests */ cmocka_unit_test_setup_teardown( transfer_private_data__success, setup__conn_new, teardown__conn_delete), /* rpma_conn_get_private_data() unit tests */ cmocka_unit_test(get_private_data__conn_NULL), cmocka_unit_test(get_private_data__pdata_NULL), cmocka_unit_test(get_private_data__conn_NULL_pdata_NULL), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_private_data, NULL, NULL); } rpma-1.3.0/tests/unit/conn/conn-read.c000066400000000000000000000117121443364775400175650ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * conn-read.c -- the rpma_read() unit tests * * APIs covered: * - rpma_read() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" /* * read__conn_NULL - NULL conn is invalid */ static void read__conn_NULL(void **unused) { /* run test */ int ret = rpma_read(NULL, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * read__dst_NULL - NULL dst is invalid */ static void read__dst_NULL(void **unused) { /* run test */ int ret = rpma_read(MOCK_CONN, NULL, MOCK_LOCAL_OFFSET, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * read__src_NULL_dst_not_NULL -- NULL src and not NULL dst are invalid */ static void read__src_NULL_dst_not_NULL(void **unused) { /* run test */ int ret = rpma_read(MOCK_CONN, MOCK_RPMA_MR_LOCAL, 0, NULL, 0, 0, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * read__src_NULL_dst_offset_not_NULL -- NULL src and * dst_offset != 0 are invalid */ static void read__src_NULL_dst_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_read(MOCK_CONN, NULL, MOCK_LOCAL_OFFSET, NULL, 0, 0, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * read__src_NULL_src_offset_not_NULL -- NULL src and * src_offset != 0 are invalid */ static void read__src_NULL_src_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_read(MOCK_CONN, NULL, 0, NULL, MOCK_REMOTE_OFFSET, 0, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * read__src_NULL_len_not_NULL -- NULL src and len != 0 are invalid */ static void read__src_NULL_len_not_NULL(void **unused) { /* run test */ int ret = rpma_read(MOCK_CONN, NULL, 0, NULL, 0, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * read__src_NULL_dst_offsets_len_not_NULL - NULL src is invalid */ static void read__src_NULL_dst_offsets_len_not_NULL(void **unused) { /* run test */ int ret = rpma_read(MOCK_CONN, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, NULL, MOCK_REMOTE_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * read__flags_0 - flags == 0 is invalid */ static void read__flags_0(void **unused) { /* run test */ int ret = rpma_read(MOCK_CONN, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * read__conn_dst_NULL_flags_0 - NULL conn, dst * and flags == 0 are invalid */ static void read__conn_dst_NULL_flags_0(void **unused) { /* run test */ int ret = rpma_read(NULL, NULL, MOCK_LOCAL_OFFSET, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * read__success - happy day scenario */ static void read__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_mr_read, qp, MOCK_QP); expect_value(rpma_mr_read, dst, MOCK_RPMA_MR_LOCAL); expect_value(rpma_mr_read, dst_offset, MOCK_LOCAL_OFFSET); expect_value(rpma_mr_read, src, MOCK_RPMA_MR_REMOTE); expect_value(rpma_mr_read, src_offset, MOCK_REMOTE_OFFSET); expect_value(rpma_mr_read, len, MOCK_LEN); expect_value(rpma_mr_read, flags, MOCK_FLAGS); expect_value(rpma_mr_read, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_read, MOCK_OK); /* run test */ int ret = rpma_read(cstate->conn, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_read -- prepare resources for all tests in the group */ static int group_setup_read(void **unused) { /* set value of QP in mock of CM ID */ Cm_id.qp = MOCK_QP; return 0; } static const struct CMUnitTest tests_read[] = { /* rpma_read() unit tests */ cmocka_unit_test(read__conn_NULL), cmocka_unit_test(read__dst_NULL), cmocka_unit_test(read__src_NULL_dst_not_NULL), cmocka_unit_test(read__src_NULL_dst_offset_not_NULL), cmocka_unit_test(read__src_NULL_src_offset_not_NULL), cmocka_unit_test(read__src_NULL_len_not_NULL), cmocka_unit_test(read__src_NULL_dst_offsets_len_not_NULL), cmocka_unit_test(read__flags_0), cmocka_unit_test(read__conn_dst_NULL_flags_0), cmocka_unit_test_setup_teardown(read__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_read, group_setup_read, NULL); } rpma-1.3.0/tests/unit/conn/conn-recv.c000066400000000000000000000053551443364775400176170ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * conn-recv.c -- the rpma_recv() unit tests * * APIs covered: * - rpma_recv() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" /* * recv__conn_NULL - NULL conn is invalid */ static void recv__conn_NULL(void **unused) { /* run test */ int ret = rpma_recv(NULL, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__dst_NULL_offset_not_NULL - NULL dst * and not NULL offset is invalid */ static void recv__dst_NULL_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_recv(MOCK_CONN, NULL, MOCK_LOCAL_OFFSET, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__dst_NULL_len_not_NULL - NULL dst * and not NULL len is invalid */ static void recv__dst_NULL_len_not_NULL(void **unused) { /* run test */ int ret = rpma_recv(MOCK_CONN, NULL, 0, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__dst_NULL_offset_len_not_NULL - NULL dst * and not NULL offset or len are invalid */ static void recv__dst_NULL_offset_len_not_NULL(void **unused) { /* run test */ int ret = rpma_recv(MOCK_CONN, NULL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__success - happy day scenario */ static void recv__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_mr_recv, qp, MOCK_QP); expect_value(rpma_mr_recv, dst, MOCK_RPMA_MR_LOCAL); expect_value(rpma_mr_recv, offset, MOCK_LOCAL_OFFSET); expect_value(rpma_mr_recv, len, MOCK_LEN); expect_value(rpma_mr_recv, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_recv, MOCK_OK); /* run test */ int ret = rpma_recv(cstate->conn, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_recv -- prepare resources for all tests in the group */ static int group_setup_recv(void **unused) { /* set value of QP in mock of CM ID */ Cm_id.qp = MOCK_QP; return 0; } static const struct CMUnitTest tests_recv[] = { /* rpma_recv() unit tests */ cmocka_unit_test(recv__conn_NULL), cmocka_unit_test(recv__dst_NULL_offset_not_NULL), cmocka_unit_test(recv__dst_NULL_len_not_NULL), cmocka_unit_test(recv__dst_NULL_offset_len_not_NULL), cmocka_unit_test_setup_teardown(recv__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_recv, group_setup_recv, NULL); } rpma-1.3.0/tests/unit/conn/conn-send.c000066400000000000000000000071331443364775400176050ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * conn-send.c -- the rpma_send() unit tests * * APIs covered: * - rpma_send() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" /* * send__src_NULL_offset_not_NULL -- NULL src * and not NULL offset is invalid */ static void send__src_NULL_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_send(MOCK_CONN, NULL, MOCK_LOCAL_OFFSET, 0, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send__src_NULL_len_not_NULL -- NULL src * and not NULL len is invalid */ static void send__src_NULL_len_not_NULL(void **unused) { /* run test */ int ret = rpma_send(MOCK_CONN, NULL, 0, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send__src_NULL_offset_len_not_NULL -- NULL src * and not NULL offset or len are invalid */ static void send__src_NULL_offset_len_not_NULL(void **unused) { /* run test */ int ret = rpma_send(MOCK_CONN, NULL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send__conn_NULL -- NULL conn is invalid */ static void send__conn_NULL(void **unused) { /* run test */ int ret = rpma_send(NULL, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send__flags_0 -- flags == 0 is invalid */ static void send__flags_0(void **unused) { /* run test */ int ret = rpma_send(MOCK_CONN, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send__conn_NULL_flags_0 -- NULL conn * and flags == 0 are invalid */ static void send__conn_NULL_flags_0(void **unused) { /* run test */ int ret = rpma_send(NULL, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send__success -- happy day scenario */ static void send__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_mr_send, qp, MOCK_QP); expect_value(rpma_mr_send, src, MOCK_RPMA_MR_LOCAL); expect_value(rpma_mr_send, offset, MOCK_LOCAL_OFFSET); expect_value(rpma_mr_send, len, MOCK_LEN); expect_value(rpma_mr_send, flags, MOCK_FLAGS); expect_value(rpma_mr_send, operation, IBV_WR_SEND); expect_value(rpma_mr_send, imm, 0); expect_value(rpma_mr_send, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_send, MOCK_OK); /* run test */ int ret = rpma_send(cstate->conn, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_send -- prepare resources for all tests in the group */ static int group_setup_send(void **unused) { /* set value of QP in mock of CM ID */ Cm_id.qp = MOCK_QP; return 0; } static const struct CMUnitTest tests_send[] = { /* rpma_read() unit tests */ cmocka_unit_test(send__src_NULL_offset_not_NULL), cmocka_unit_test(send__src_NULL_len_not_NULL), cmocka_unit_test(send__src_NULL_offset_len_not_NULL), cmocka_unit_test(send__conn_NULL), cmocka_unit_test(send__flags_0), cmocka_unit_test(send__conn_NULL_flags_0), cmocka_unit_test_setup_teardown(send__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_send, group_setup_send, NULL); } rpma-1.3.0/tests/unit/conn/conn-send_with_imm.c000066400000000000000000000100561443364775400215000ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2020 Fujitsu */ /* Copyright 2021, Intel Corporation */ /* * conn-send_with_imm.c -- the rpma_send_with_imm() unit tests * * APIs covered: * - rpma_send_with_imm() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" /* * send_with_imm__src_NULL_offset_not_NULL -- NULL src * and not NULL offset is invalid */ static void send_with_imm__src_NULL_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_send_with_imm(MOCK_CONN, NULL, MOCK_LOCAL_OFFSET, 0, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send_with_imm__src_NULL_len_not_NULL -- NULL src * and not NULL len is invalid */ static void send_with_imm__src_NULL_len_not_NULL(void **unused) { /* run test */ int ret = rpma_send_with_imm(MOCK_CONN, NULL, 0, MOCK_LEN, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send_with_imm__src_NULL_offset_len_not_NULL -- NULL src * and not NULL offset or len are invalid */ static void send_with_imm__src_NULL_offset_len_not_NULL(void **unused) { /* run test */ int ret = rpma_send_with_imm(MOCK_CONN, NULL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send_with_imm__conn_NULL -- NULL conn is invalid */ static void send_with_imm__conn_NULL(void **unused) { /* run test */ int ret = rpma_send_with_imm(NULL, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send_with_imm__flags_0 -- flags == 0 is invalid */ static void send_with_imm__flags_0(void **unused) { /* run test */ int ret = rpma_send_with_imm(MOCK_CONN, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, 0, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send_with_imm__conn_NULL_flags_0 -- NULL conn, src * and flags == 0 are invalid */ static void send_with_imm__conn_NULL_flags_0(void **unused) { /* run test */ int ret = rpma_send_with_imm(NULL, NULL, MOCK_LOCAL_OFFSET, MOCK_LEN, 0, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * send_with_imm__success -- happy day scenario */ static void send_with_imm__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_mr_send, qp, MOCK_QP); expect_value(rpma_mr_send, src, MOCK_RPMA_MR_LOCAL); expect_value(rpma_mr_send, offset, MOCK_LOCAL_OFFSET); expect_value(rpma_mr_send, len, MOCK_LEN); expect_value(rpma_mr_send, flags, MOCK_FLAGS); expect_value(rpma_mr_send, operation, IBV_WR_SEND_WITH_IMM); expect_value(rpma_mr_send, imm, MOCK_IMM_DATA); expect_value(rpma_mr_send, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_send, MOCK_OK); /* run test */ int ret = rpma_send_with_imm(cstate->conn, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_send -- prepare resources for all tests in the group */ static int group_setup_send(void **unused) { /* set value of QP in mock of CM ID */ Cm_id.qp = MOCK_QP; return 0; } static const struct CMUnitTest tests_send_with_imm[] = { /* rpma_read_with_imm() unit tests */ cmocka_unit_test(send_with_imm__src_NULL_offset_not_NULL), cmocka_unit_test(send_with_imm__src_NULL_len_not_NULL), cmocka_unit_test(send_with_imm__src_NULL_offset_len_not_NULL), cmocka_unit_test(send_with_imm__conn_NULL), cmocka_unit_test(send_with_imm__flags_0), cmocka_unit_test(send_with_imm__conn_NULL_flags_0), cmocka_unit_test_setup_teardown(send_with_imm__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_send_with_imm, group_setup_send, NULL); } rpma-1.3.0/tests/unit/conn/conn-wait.c000066400000000000000000000141471443364775400176230ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * conn-wait.c -- the rpma_conn_wait() unit tests * * APIs covered: * - rpma_conn_wait() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" #include "test-common.h" /* * wait__conn_NULL - NULL conn is invalid */ static void wait__conn_NULL(void **unused) { /* run test */ struct rpma_cq *cq = NULL; bool is_rcq; int ret = rpma_conn_wait(NULL, 0, &cq, &is_rcq); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(cq); } /* * wait__cq_NULL - NULL cq is invalid */ static void wait__cq_NULL(void **unused) { /* run test */ bool is_rcq; int ret = rpma_conn_wait(MOCK_CONN, 0, NULL, &is_rcq); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * wait__channel_not_shared - NULL conn->channel is invalid */ static void wait__channel_not_shared(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* run test */ struct rpma_cq *cq = NULL; bool is_rcq; int ret = rpma_conn_wait(cstate->conn, 0, &cq, &is_rcq); /* verify the results */ assert_int_equal(ret, RPMA_E_NOT_SHARED_CHNL); assert_null(cq); } /* * wait__get_cq_event_ERRNO - ibv_get_cq_event() fails with MOCK_ERRNO */ static void wait__get_cq_event_ERRNO(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mock */ expect_value(ibv_get_cq_event, channel, MOCK_COMP_CHANNEL); will_return(ibv_get_cq_event, MOCK_ERRNO); /* run test */ struct rpma_cq *cq = NULL; bool is_rcq; int ret = rpma_conn_wait(cstate->conn, 0, &cq, &is_rcq); /* verify the results */ assert_int_equal(ret, RPMA_E_NO_COMPLETION); assert_null(cq); } /* * wait__get_cq_event_UNKNOWN - ibv_get_cq_event() returned unknown CQ */ static void wait__get_cq_event_UNKNOWN(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mock */ expect_value(ibv_get_cq_event, channel, MOCK_COMP_CHANNEL); will_return(ibv_get_cq_event, MOCK_OK); will_return(ibv_get_cq_event, MOCK_IBV_CQ_UNKNOWN); expect_value(rpma_cq_get_ibv_cq, cq, MOCK_RPMA_CQ); will_return(rpma_cq_get_ibv_cq, MOCK_IBV_CQ); expect_value(rpma_cq_get_ibv_cq, cq, MOCK_RPMA_RCQ); will_return(rpma_cq_get_ibv_cq, MOCK_IBV_RCQ); /* run test */ struct rpma_cq *cq = NULL; bool is_rcq; int ret = rpma_conn_wait(cstate->conn, 0, &cq, &is_rcq); /* verify the results */ assert_int_equal(ret, RPMA_E_UNKNOWN); assert_null(cq); } /* * wait__req_notify_cq_ERRNO - ibv_req_notify_cq() fails with MOCK_ERRNO */ static void wait__req_notify_cq_ERRNO(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mock */ expect_value(ibv_get_cq_event, channel, MOCK_COMP_CHANNEL); will_return(ibv_get_cq_event, MOCK_OK); will_return(ibv_get_cq_event, MOCK_IBV_CQ); expect_value(rpma_cq_get_ibv_cq, cq, MOCK_RPMA_CQ); will_return(rpma_cq_get_ibv_cq, MOCK_IBV_CQ); expect_value(ibv_ack_cq_events, cq, MOCK_IBV_CQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_CQ); will_return(ibv_req_notify_cq_mock, MOCK_ERRNO); /* run test */ struct rpma_cq *cq = NULL; bool is_rcq; int ret = rpma_conn_wait(cstate->conn, 0, &cq, &is_rcq); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cq); assert_int_equal(is_rcq, false); } /* * wait__success_is_rcq_NULL - happy day scenario without is_rcq */ static void wait__success_is_rcq_NULL(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mock */ expect_value(ibv_get_cq_event, channel, MOCK_COMP_CHANNEL); will_return(ibv_get_cq_event, MOCK_OK); will_return(ibv_get_cq_event, MOCK_IBV_RCQ); expect_value(rpma_cq_get_ibv_cq, cq, MOCK_RPMA_CQ); will_return(rpma_cq_get_ibv_cq, MOCK_IBV_CQ); expect_value(rpma_cq_get_ibv_cq, cq, MOCK_RPMA_RCQ); will_return(rpma_cq_get_ibv_cq, MOCK_IBV_RCQ); expect_value(ibv_ack_cq_events, cq, MOCK_IBV_RCQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_RCQ); will_return(ibv_req_notify_cq_mock, MOCK_OK); /* run test */ struct rpma_cq *cq = NULL; int ret = rpma_conn_wait(cstate->conn, 0, &cq, NULL); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(cq, MOCK_RPMA_RCQ); } /* * wait__success - happy day scenario */ static void wait__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mock */ expect_value(ibv_get_cq_event, channel, MOCK_COMP_CHANNEL); will_return(ibv_get_cq_event, MOCK_OK); will_return(ibv_get_cq_event, MOCK_IBV_RCQ); expect_value(rpma_cq_get_ibv_cq, cq, MOCK_RPMA_CQ); will_return(rpma_cq_get_ibv_cq, MOCK_IBV_CQ); expect_value(rpma_cq_get_ibv_cq, cq, MOCK_RPMA_RCQ); will_return(rpma_cq_get_ibv_cq, MOCK_IBV_RCQ); expect_value(ibv_ack_cq_events, cq, MOCK_IBV_RCQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_RCQ); will_return(ibv_req_notify_cq_mock, MOCK_OK); /* run test */ struct rpma_cq *cq = NULL; bool is_rcq; int ret = rpma_conn_wait(cstate->conn, 0, &cq, &is_rcq); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(cq, MOCK_RPMA_RCQ); assert_int_equal(is_rcq, true); } static const struct CMUnitTest tests_new[] = { /* rpma_conn_wait() unit tests */ cmocka_unit_test(wait__conn_NULL), cmocka_unit_test(wait__cq_NULL), cmocka_unit_test_setup_teardown(wait__channel_not_shared, setup__conn_new, teardown__conn_delete), cmocka_unit_test_prestate_setup_teardown(wait__get_cq_event_ERRNO, setup__conn_new, teardown__conn_delete, &Conn_with_rcq_with_channel), cmocka_unit_test_prestate_setup_teardown(wait__get_cq_event_UNKNOWN, setup__conn_new, teardown__conn_delete, &Conn_with_rcq_with_channel), cmocka_unit_test_prestate_setup_teardown(wait__req_notify_cq_ERRNO, setup__conn_new, teardown__conn_delete, &Conn_with_rcq_with_channel), cmocka_unit_test_prestate_setup_teardown(wait__success_is_rcq_NULL, setup__conn_new, teardown__conn_delete, &Conn_with_rcq_with_channel), cmocka_unit_test_prestate_setup_teardown(wait__success, setup__conn_new, teardown__conn_delete, &Conn_with_rcq_with_channel) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_new, group_setup_common_conn, NULL); } rpma-1.3.0/tests/unit/conn/conn-write.c000066400000000000000000000122411443364775400200020ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * conn-write.c -- the rpma_write() unit tests * * APIs covered: * - rpma_write() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" /* * write__conn_NULL -- NULL conn is invalid */ static void write__conn_NULL(void **unused) { /* run test */ int ret = rpma_write(NULL, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write__dst_NULL -- NULL dst is invalid */ static void write__dst_NULL(void **unused) { /* run test */ int ret = rpma_write(MOCK_CONN, NULL, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write__src_NULL_dst_not_NULL -- NULL src and not NULL dst are invalid */ static void write__src_NULL_dst_not_NULL(void **unused) { /* run test */ int ret = rpma_write(MOCK_CONN, MOCK_RPMA_MR_REMOTE, 0, NULL, 0, 0, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write__src_NULL_dst_offset_not_NULL -- NULL src and * dst_offset != 0 are invalid */ static void write__src_NULL_dst_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_write(MOCK_CONN, NULL, MOCK_REMOTE_OFFSET, NULL, 0, 0, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write__src_NULL_src_offset_not_NULL -- NULL src and * src_offset != 0 are invalid */ static void write__src_NULL_src_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_write(MOCK_CONN, NULL, 0, NULL, MOCK_LOCAL_OFFSET, 0, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write__src_NULL_len_not_NULL -- NULL src and len != 0 are invalid */ static void write__src_NULL_len_not_NULL(void **unused) { /* run test */ int ret = rpma_write(MOCK_CONN, NULL, 0, NULL, 0, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write__src_NULL_dst_offsets_len_not_NULL -- NULL src is invalid * and not NULL dst, dst_offset, src_offset or len */ static void write__src_NULL_dst_offsets_len_not_NULL(void **unused) { /* run test */ int ret = rpma_write(MOCK_CONN, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, NULL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write__flags_0 -- flags == 0 is invalid */ static void write__flags_0(void **unused) { /* run test */ int ret = rpma_write(MOCK_CONN, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write__conn_dst_NULL_flags_0 -- NULL conn, dst * and flags == 0 are invalid */ static void write__conn_dst_NULL_flags_0(void **unused) { /* run test */ int ret = rpma_write(NULL, NULL, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write__success -- happy day scenario */ static void write__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_mr_write, qp, MOCK_QP); expect_value(rpma_mr_write, dst, MOCK_RPMA_MR_REMOTE); expect_value(rpma_mr_write, dst_offset, MOCK_REMOTE_OFFSET); expect_value(rpma_mr_write, src, MOCK_RPMA_MR_LOCAL); expect_value(rpma_mr_write, src_offset, MOCK_LOCAL_OFFSET); expect_value(rpma_mr_write, len, MOCK_LEN); expect_value(rpma_mr_write, flags, MOCK_FLAGS); expect_value(rpma_mr_write, operation, IBV_WR_RDMA_WRITE); expect_value(rpma_mr_write, imm, 0); expect_value(rpma_mr_write, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_write, MOCK_OK); /* run test */ int ret = rpma_write(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_write -- prepare resources for all tests in the group */ static int group_setup_write(void **unused) { /* set value of QP in mock of CM ID */ Cm_id.qp = MOCK_QP; return 0; } static const struct CMUnitTest tests_write[] = { /* rpma_read() unit tests */ cmocka_unit_test(write__conn_NULL), cmocka_unit_test(write__dst_NULL), cmocka_unit_test(write__src_NULL_dst_not_NULL), cmocka_unit_test(write__src_NULL_dst_offset_not_NULL), cmocka_unit_test(write__src_NULL_src_offset_not_NULL), cmocka_unit_test(write__src_NULL_len_not_NULL), cmocka_unit_test(write__src_NULL_dst_offsets_len_not_NULL), cmocka_unit_test(write__flags_0), cmocka_unit_test(write__conn_dst_NULL_flags_0), cmocka_unit_test_setup_teardown(write__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_write, group_setup_write, NULL); } rpma-1.3.0/tests/unit/conn/conn-write_with_imm.c000066400000000000000000000133531443364775400217040ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2021 Fujitsu */ /* Copyright 2021-2022, Intel Corporation */ /* * conn-write_with_imm.c -- the rpma_write_with_imm() unit tests * * APIs covered: * - rpma_write_with_imm() */ #include "conn-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" /* * write__conn_NULL -- NULL conn is invalid */ static void write_with_imm__conn_NULL(void **unused) { /* run test */ int ret = rpma_write_with_imm(NULL, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write_with_imm__dst_NULL -- NULL dst is invalid */ static void write_with_imm__dst_NULL(void **unused) { /* run test */ int ret = rpma_write_with_imm(MOCK_CONN, NULL, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write_with_imm__src_NULL_dst_not_NULL -- NULL src and not NULL dst is invalid */ static void write_with_imm__src_NULL_dst_not_NULL(void **unused) { /* run test */ int ret = rpma_write_with_imm(MOCK_CONN, MOCK_RPMA_MR_REMOTE, 0, NULL, 0, 0, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write_with_imm__src_NULL_dst_offset_not_NULL -- NULL src and * dst_offset != 0 are invalid */ static void write_with_imm__src_NULL_dst_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_write_with_imm(MOCK_CONN, NULL, MOCK_REMOTE_OFFSET, NULL, 0, 0, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write_with_imm__src_NULL_src_offset_not_NULL -- NULL src and * src_offset != 0 are invalid */ static void write_with_imm__src_NULL_src_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_write_with_imm(MOCK_CONN, NULL, 0, NULL, MOCK_LOCAL_OFFSET, 0, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write_with_imm__src_NULL_len_not_NULL -- NULL src and len != 0 are invalid */ static void write_with_imm__src_NULL_len_not_NULL(void **unused) { /* run test */ int ret = rpma_write_with_imm(MOCK_CONN, NULL, 0, NULL, 0, MOCK_LEN, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write_with_imm__src_NULL_dst_offsets_len_not_NULL -- NULL src is invalid * and not NULL dst, dst_offset, src_offset or len */ static void write_with_imm__src_NULL_dst_offsets_len_not_NULL(void **unused) { /* run test */ int ret = rpma_write_with_imm(MOCK_CONN, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, NULL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write_with_imm__flags_0 -- flags == 0 is invalid */ static void write_with_imm__flags_0(void **unused) { /* run test */ int ret = rpma_write_with_imm(MOCK_CONN, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, 0, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write_with_imm__conn_dst_NULL_flags_0 -- NULL conn, dst * and flags == 0 are invalid */ static void write_with_imm__conn_dst_NULL_flags_0(void **unused) { /* run test */ int ret = rpma_write_with_imm(NULL, NULL, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, 0, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * write_with_imm__success -- happy day scenario */ static void write_with_imm__success(void **cstate_ptr) { struct conn_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_mr_write, qp, MOCK_QP); expect_value(rpma_mr_write, dst, MOCK_RPMA_MR_REMOTE); expect_value(rpma_mr_write, dst_offset, MOCK_REMOTE_OFFSET); expect_value(rpma_mr_write, src, MOCK_RPMA_MR_LOCAL); expect_value(rpma_mr_write, src_offset, MOCK_LOCAL_OFFSET); expect_value(rpma_mr_write, len, MOCK_LEN); expect_value(rpma_mr_write, flags, MOCK_FLAGS); expect_value(rpma_mr_write, operation, IBV_WR_RDMA_WRITE_WITH_IMM); expect_value(rpma_mr_write, imm, MOCK_IMM_DATA); expect_value(rpma_mr_write, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_write, MOCK_OK); /* run test */ int ret = rpma_write_with_imm(cstate->conn, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_FLAGS, MOCK_IMM_DATA, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_write -- prepare resources for all tests in the group */ static int group_setup_write(void **unused) { /* set value of QP in mock of CM ID */ Cm_id.qp = MOCK_QP; return 0; } static const struct CMUnitTest tests_write[] = { /* rpma_read() unit tests */ cmocka_unit_test(write_with_imm__conn_NULL), cmocka_unit_test(write_with_imm__dst_NULL), cmocka_unit_test(write_with_imm__src_NULL_dst_not_NULL), cmocka_unit_test(write_with_imm__src_NULL_dst_offset_not_NULL), cmocka_unit_test(write_with_imm__src_NULL_src_offset_not_NULL), cmocka_unit_test(write_with_imm__src_NULL_len_not_NULL), cmocka_unit_test(write_with_imm__src_NULL_dst_offsets_len_not_NULL), cmocka_unit_test(write_with_imm__flags_0), cmocka_unit_test(write_with_imm__conn_dst_NULL_flags_0), cmocka_unit_test_setup_teardown(write_with_imm__success, setup__conn_new, teardown__conn_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_write, group_setup_write, NULL); } rpma-1.3.0/tests/unit/conn_cfg/000077500000000000000000000000001443364775400163705ustar00rootroot00000000000000rpma-1.3.0/tests/unit/conn_cfg/CMakeLists.txt000066400000000000000000000016021443364775400211270ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021-2022, Fujitsu # include(../../cmake/ctest_helpers.cmake) function(add_test_conn_cfg name) set(src_name conn_cfg-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c conn_cfg-common.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${LIBRPMA_SOURCE_DIR}/conn_cfg.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_conn_cfg(compl_channel) add_test_conn_cfg(cqe) add_test_conn_cfg(cq_size) add_test_conn_cfg(delete) add_test_conn_cfg(new) add_test_conn_cfg(rcqe) add_test_conn_cfg(rcq_size) add_test_conn_cfg(rq_size) add_test_conn_cfg(sq_size) add_test_conn_cfg(srq) add_test_conn_cfg(timeout) rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-common.c000066400000000000000000000016631443364775400216040ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * conn_cfg-common.c -- the conn_cfg unit tests common functions */ #include "conn_cfg-common.h" /* * setup__conn_cfg -- prepare a new rpma_conn_cfg */ int setup__conn_cfg(void **cstate_ptr) { static struct conn_cfg_test_state cstate = {0}; /* configure mocks */ will_return(__wrap__test_malloc, MOCK_OK); /* prepare an object */ int ret = rpma_conn_cfg_new(&cstate.cfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(cstate.cfg); *cstate_ptr = &cstate; return 0; } /* * teardown__conn_cfg -- delete the rpma_conn_cfg */ int teardown__conn_cfg(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* prepare an object */ int ret = rpma_conn_cfg_delete(&cstate->cfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->cfg); *cstate_ptr = NULL; return 0; } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-common.h000066400000000000000000000010431443364775400216010ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * conn_cfg-common.h -- the conn_cfg unit tests common definitions */ #ifndef CONN_CFG_COMMON #define CONN_CFG_COMMON #include "cmocka_headers.h" #include "test-common.h" #include "conn_req.h" /* * All the resources used between setup__conn_cfg_new and teardown__conn_cfg_new */ struct conn_cfg_test_state { struct rpma_conn_cfg *cfg; }; int setup__conn_cfg(void **cstate_ptr); int teardown__conn_cfg(void **cstate_ptr); #endif /* CONN_CFG_COMMON */ rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-compl_channel.c000066400000000000000000000050401443364775400231070ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* * conn_cfg-compl_channel.c -- the rpma_conn_cfg_set/get_compl_channel() * unit tests * * APIs covered: * - rpma_conn_cfg_set_compl_channel() * - rpma_conn_cfg_get_compl_channel() */ #include "conn_cfg-common.h" #include "test-common.h" /* * set__cfg_NULL -- NULL cfg is invalid */ static void set__cfg_NULL(void **unused) { /* run test */ int ret = rpma_conn_cfg_set_compl_channel(NULL, true); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__cfg_NULL -- NULL cfg is invalid */ static void get__cfg_NULL(void **unused) { /* run test */ bool shared; int ret = rpma_conn_cfg_get_compl_channel(NULL, &shared); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__shared_NULL -- NULL shared is invalid */ static void get__shared_NULL(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_get_compl_channel(cstate->cfg, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_default__success -- get the default value */ static void get_default__success(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ bool shared; int ret = rpma_conn_cfg_get_compl_channel(cstate->cfg, &shared); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(shared, false); } /* * compl_channel__lifecycle -- happy day scenario */ static void compl_channel__lifecycle(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_compl_channel(cstate->cfg, true); /* verify the results */ assert_int_equal(ret, MOCK_OK); bool shared; ret = rpma_conn_cfg_get_compl_channel(cstate->cfg, &shared); assert_int_equal(ret, MOCK_OK); assert_int_equal(shared, true); } static const struct CMUnitTest test_compl_channel[] = { /* rpma_conn_cfg_set_compl_channel() unit tests */ cmocka_unit_test(set__cfg_NULL), /* rpma_conn_cfg_get_compl_channel() unit tests */ cmocka_unit_test(get__cfg_NULL), cmocka_unit_test_setup_teardown(get__shared_NULL, setup__conn_cfg, teardown__conn_cfg), cmocka_unit_test_setup_teardown(get_default__success, setup__conn_cfg, teardown__conn_cfg), /* rpma_conn_cfg_set/get_compl_channel() lifecycle */ cmocka_unit_test_setup_teardown(compl_channel__lifecycle, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_compl_channel, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-cq_size.c000066400000000000000000000040321443364775400217420ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * conn_cfg-cq_size.c -- the rpma_conn_cfg_set/get_cq_size() unit tests * * APIs covered: * - rpma_conn_cfg_set_cq_size() * - rpma_conn_cfg_get_cq_size() */ #include "conn_cfg-common.h" #include "test-common.h" /* * set__cfg_NULL -- NULL cfg is invalid */ static void set__cfg_NULL(void **unused) { /* run test */ int ret = rpma_conn_cfg_set_cq_size(NULL, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__cfg_NULL -- NULL cfg is invalid */ static void get__cfg_NULL(void **unused) { /* run test */ uint32_t cq_size; int ret = rpma_conn_cfg_get_cq_size(NULL, &cq_size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__cq_size_NULL -- NULL cq_size is invalid */ static void get__cq_size_NULL(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_get_cq_size(cstate->cfg, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * cq_size__lifecycle -- happy day scenario */ static void cq_size__lifecycle(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_cq_size(cstate->cfg, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, MOCK_OK); uint32_t cq_size; ret = rpma_conn_cfg_get_cq_size(cstate->cfg, &cq_size); assert_int_equal(ret, MOCK_OK); assert_int_equal(cq_size, MOCK_Q_SIZE); } static const struct CMUnitTest test_cq_size[] = { /* rpma_conn_cfg_set_cq_size() unit tests */ cmocka_unit_test(set__cfg_NULL), /* rpma_conn_cfg_get_cq_size() unit tests */ cmocka_unit_test(get__cfg_NULL), cmocka_unit_test_setup_teardown(get__cq_size_NULL, setup__conn_cfg, teardown__conn_cfg), /* rpma_conn_cfg_set/get_cq_size() lifecycle */ cmocka_unit_test_setup_teardown(cq_size__lifecycle, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_cq_size, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-cqe.c000066400000000000000000000026471443364775400210670ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * conn_cfg-cqe.c -- the rpma_conn_cfg_get_cqe() unit tests * * APIs covered: * - rpma_conn_cfg_get_cqe() */ #include #include "conn_cfg.h" #include "conn_cfg-common.h" #include "test-common.h" /* * cqe__lifecycle -- happy day scenario */ static void cqe__lifecycle(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_cq_size(cstate->cfg, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, MOCK_OK); int cqe; rpma_conn_cfg_get_cqe(cstate->cfg, &cqe); assert_int_equal(cqe, MOCK_Q_SIZE); } /* * cqe__clipped -- cq_size > INT_MAX => cqe = INT_MAX */ static void cqe__clipped(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_cq_size(cstate->cfg, (uint32_t)INT_MAX + 1); /* verify the results */ assert_int_equal(ret, MOCK_OK); int cqe; rpma_conn_cfg_get_cqe(cstate->cfg, &cqe); assert_int_equal(cqe, INT_MAX); } static const struct CMUnitTest test_cqe[] = { /* rpma_conn_cfg_set/get_cq_size() lifecycle */ cmocka_unit_test_setup_teardown(cqe__lifecycle, setup__conn_cfg, teardown__conn_cfg), cmocka_unit_test_setup_teardown(cqe__clipped, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_cqe, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-delete.c000066400000000000000000000017561443364775400215610ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * conn_cfg-delete.c -- the rpma_conn_cfg_delete() unit tests * * APIs covered: * - rpma_conn_cfg_delete() */ #include "conn_cfg-common.h" /* * delete__cfg_ptr_NULL -- NULL cfg_ptr is invalid */ static void delete__cfg_ptr_NULL(void **unused) { /* run test */ int ret = rpma_conn_cfg_delete(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * delete__cfg_NULL -- NULL cfg is valid - quick exit */ static void delete__cfg_NULL(void **unused) { /* run test */ struct rpma_conn_cfg *cfg = NULL; int ret = rpma_conn_cfg_delete(&cfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cfg); } static const struct CMUnitTest test_delete[] = { /* rpma_conn_cfg_delete() unit tests */ cmocka_unit_test(delete__cfg_ptr_NULL), cmocka_unit_test(delete__cfg_NULL), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_delete, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-new.c000066400000000000000000000045451443364775400211070ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn_cfg-new.c -- the rpma_conn_cfg_new() unit tests * * API covered: * - rpma_conn_cfg_new() */ #include "conn_cfg.h" #include "conn_cfg-common.h" /* * new__cfg_ptr_NULL -- NULL cfg_ptr is invalid */ static void new__cfg_ptr_NULL(void **unused) { /* configure mocks */ will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ int ret = rpma_conn_cfg_new(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **unused) { /* configure mocks */ will_return(__wrap__test_malloc, MOCK_ERRNO); /* run test */ struct rpma_conn_cfg *cfg = NULL; int ret = rpma_conn_cfg_new(&cfg); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(cfg); } /* * new__success -- all is OK */ static void new__success(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* get the default configuration for comparison */ struct rpma_conn_cfg *cfg_default = rpma_conn_cfg_default(); int a; int b; uint32_t ua; uint32_t ub; int ret; /* collect values and compare to defaults */ ret = rpma_conn_cfg_get_timeout(cstate->cfg, &a); assert_int_equal(ret, MOCK_OK); ret = rpma_conn_cfg_get_timeout(cfg_default, &b); assert_int_equal(ret, MOCK_OK); assert_int_equal(a, b); ret = rpma_conn_cfg_get_cq_size(cstate->cfg, &ua); assert_int_equal(ret, MOCK_OK); ret = rpma_conn_cfg_get_cq_size(cfg_default, &ub); assert_int_equal(ret, MOCK_OK); assert_int_equal(ua, ub); ret = rpma_conn_cfg_get_sq_size(cstate->cfg, &ua); assert_int_equal(ret, MOCK_OK); ret = rpma_conn_cfg_get_sq_size(cfg_default, &ub); assert_int_equal(ret, MOCK_OK); assert_int_equal(ua, ub); ret = rpma_conn_cfg_get_rq_size(cstate->cfg, &ua); assert_int_equal(ret, MOCK_OK); ret = rpma_conn_cfg_get_rq_size(cfg_default, &ub); assert_int_equal(ret, MOCK_OK); assert_int_equal(ua, ub); } static const struct CMUnitTest test_new[] = { /* rpma_conn_cfg_new() unit tests */ cmocka_unit_test(new__cfg_ptr_NULL), cmocka_unit_test(new__malloc_ERRNO), cmocka_unit_test_setup_teardown(new__success, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_new, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-rcq_size.c000066400000000000000000000041251443364775400221270ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021, Fujitsu */ /* * conn_cfg-cq_size.c -- the rpma_conn_cfg_set/get_rcq_size() unit tests * * APIs covered: * - rpma_conn_cfg_set_rcq_size() * - rpma_conn_cfg_get_rcq_size() */ #include "conn_cfg-common.h" #include "test-common.h" /* * set__cfg_NULL -- NULL cfg is invalid */ static void set__cfg_NULL(void **unused) { /* run test */ int ret = rpma_conn_cfg_set_rcq_size(NULL, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__cfg_NULL -- NULL cfg is invalid */ static void get__cfg_NULL(void **unused) { /* run test */ uint32_t rcq_size; int ret = rpma_conn_cfg_get_rcq_size(NULL, &rcq_size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__rcq_size_NULL -- NULL rcq_size is invalid */ static void get__rcq_size_NULL(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_get_rcq_size(cstate->cfg, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * rcq_size__lifecycle -- happy day scenario */ static void rcq_size__lifecycle(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_rcq_size(cstate->cfg, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, MOCK_OK); /* run test */ uint32_t rcq_size; ret = rpma_conn_cfg_get_rcq_size(cstate->cfg, &rcq_size); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(rcq_size, MOCK_Q_SIZE); } static const struct CMUnitTest test_rcq_size[] = { /* rpma_conn_cfg_set_rcq_size() unit tests */ cmocka_unit_test(set__cfg_NULL), /* rpma_conn_cfg_get_rcq_size() unit tests */ cmocka_unit_test(get__cfg_NULL), cmocka_unit_test_setup_teardown(get__rcq_size_NULL, setup__conn_cfg, teardown__conn_cfg), /* rpma_conn_cfg_set/get_rcq_size() lifecycle */ cmocka_unit_test_setup_teardown(rcq_size__lifecycle, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_rcq_size, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-rcqe.c000066400000000000000000000027311443364775400212430ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn_cfg-rcqe.c -- the rpma_conn_cfg_get_rcqe() unit tests * * API covered: * - rpma_conn_cfg_get_rcqe() */ #include #include "conn_cfg.h" #include "conn_cfg-common.h" #include "test-common.h" /* * rcqe__lifecycle -- happy day scenario */ static void rcqe__lifecycle(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_rcq_size(cstate->cfg, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, MOCK_OK); int rcqe; rpma_conn_cfg_get_rcqe(cstate->cfg, &rcqe); assert_int_equal(rcqe, MOCK_Q_SIZE); } /* * rcqe__clipped -- rcq_size > INT_MAX => rcqe = INT_MAX */ static void rcqe__clipped(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_rcq_size(cstate->cfg, (uint32_t)INT_MAX + 1); /* verify the results */ assert_int_equal(ret, MOCK_OK); int rcqe; rpma_conn_cfg_get_rcqe(cstate->cfg, &rcqe); assert_int_equal(rcqe, INT_MAX); } static const struct CMUnitTest test_rcqe[] = { /* rpma_conn_cfg_set/get_cq_size() lifecycle */ cmocka_unit_test_setup_teardown(rcqe__lifecycle, setup__conn_cfg, teardown__conn_cfg), cmocka_unit_test_setup_teardown(rcqe__clipped, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_rcqe, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-rq_size.c000066400000000000000000000040321443364775400217610ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * conn_cfg-rq_size.c -- the rpma_conn_cfg_set/get_rq_size() unit tests * * APIs covered: * - rpma_conn_cfg_set_rq_size() * - rpma_conn_cfg_get_rq_size() */ #include "conn_cfg-common.h" #include "test-common.h" /* * set__cfg_NULL -- NULL cfg is invalid */ static void set__cfg_NULL(void **unused) { /* run test */ int ret = rpma_conn_cfg_set_rq_size(NULL, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__cfg_NULL -- NULL cfg is invalid */ static void get__cfg_NULL(void **unused) { /* run test */ uint32_t rq_size; int ret = rpma_conn_cfg_get_rq_size(NULL, &rq_size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__rq_size_NULL -- NULL rq_size is invalid */ static void get__rq_size_NULL(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_get_rq_size(cstate->cfg, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * rq_size__lifecycle -- happy day scenario */ static void rq_size__lifecycle(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_rq_size(cstate->cfg, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, MOCK_OK); uint32_t rq_size; ret = rpma_conn_cfg_get_rq_size(cstate->cfg, &rq_size); assert_int_equal(ret, MOCK_OK); assert_int_equal(rq_size, MOCK_Q_SIZE); } static const struct CMUnitTest test_rq_size[] = { /* rpma_conn_cfg_set_rq_size() unit tests */ cmocka_unit_test(set__cfg_NULL), /* rpma_conn_cfg_get_rq_size() unit tests */ cmocka_unit_test(get__cfg_NULL), cmocka_unit_test_setup_teardown(get__rq_size_NULL, setup__conn_cfg, teardown__conn_cfg), /* rpma_conn_cfg_set/get_rq_size() lifecycle */ cmocka_unit_test_setup_teardown(rq_size__lifecycle, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_rq_size, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-sq_size.c000066400000000000000000000040321443364775400217620ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * conn_cfg-sq_size.c -- the rpma_conn_cfg_set/get_sq_size() unit tests * * APIs covered: * - rpma_conn_cfg_set_sq_size() * - rpma_conn_cfg_get_sq_size() */ #include "conn_cfg-common.h" #include "test-common.h" /* * set__cfg_NULL -- NULL cfg is invalid */ static void set__cfg_NULL(void **unused) { /* run test */ int ret = rpma_conn_cfg_set_sq_size(NULL, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__cfg_NULL -- NULL cfg is invalid */ static void get__cfg_NULL(void **unused) { /* run test */ uint32_t sq_size; int ret = rpma_conn_cfg_get_sq_size(NULL, &sq_size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__sq_size_NULL -- NULL sq_size is invalid */ static void get__sq_size_NULL(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_get_sq_size(cstate->cfg, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * sq_size__lifecycle -- happy day scenario */ static void sq_size__lifecycle(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_sq_size(cstate->cfg, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, MOCK_OK); uint32_t sq_size; ret = rpma_conn_cfg_get_sq_size(cstate->cfg, &sq_size); assert_int_equal(ret, MOCK_OK); assert_int_equal(sq_size, MOCK_Q_SIZE); } static const struct CMUnitTest test_sq_size[] = { /* rpma_conn_cfg_set_sq_size() unit tests */ cmocka_unit_test(set__cfg_NULL), /* rpma_conn_cfg_get_sq_size() unit tests */ cmocka_unit_test(get__cfg_NULL), cmocka_unit_test_setup_teardown(get__sq_size_NULL, setup__conn_cfg, teardown__conn_cfg), /* rpma_conn_cfg_set/get_sq_size() lifecycle */ cmocka_unit_test_setup_teardown(sq_size__lifecycle, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_sq_size, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-srq.c000066400000000000000000000037571443364775400211270ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * conn_cfg-srq.c -- the rpma_conn_cfg_set/get_srq() unit tests * * APIs covered: * - rpma_conn_cfg_set_srq() * - rpma_conn_cfg_get_srq() */ #include "conn_cfg-common.h" #include "test-common.h" #include "mocks-rpma-srq.h" /* * set__cfg_NULL -- NULL cfg is invalid */ static void set__cfg_NULL(void **unused) { /* run test */ int ret = rpma_conn_cfg_set_srq(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__cfg_NULL -- NULL cfg is invalid */ static void get__cfg_NULL(void **unused) { /* run test */ struct rpma_srq *srq = NULL; int ret = rpma_conn_cfg_get_srq(NULL, &srq); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__srq_ptr_NULL -- NULL srq_ptr is invalid */ static void get__srq_ptr_NULL(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_get_srq(cstate->cfg, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * srq__lifecycle -- happy day scenario */ static void srq__lifecycle(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_srq(cstate->cfg, MOCK_RPMA_SRQ); /* verify the results */ assert_int_equal(ret, MOCK_OK); struct rpma_srq *srq = NULL; ret = rpma_conn_cfg_get_srq(cstate->cfg, &srq); assert_int_equal(ret, MOCK_OK); assert_int_equal(srq, MOCK_RPMA_SRQ); } static const struct CMUnitTest test_srq[] = { /* rpma_conn_cfg_set_srq() unit tests */ cmocka_unit_test(set__cfg_NULL), /* rpma_conn_cfg_get_srq() unit tests */ cmocka_unit_test(get__cfg_NULL), cmocka_unit_test_setup_teardown(get__srq_ptr_NULL, setup__conn_cfg, teardown__conn_cfg), /* rpma_conn_cfg_set/get_srq() lifecycle */ cmocka_unit_test_setup_teardown(srq__lifecycle, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_srq, NULL, NULL); } rpma-1.3.0/tests/unit/conn_cfg/conn_cfg-timeout.c000066400000000000000000000053141443364775400217770ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * conn_cfg-timeout.c -- the rpma_conn_cfg_set/get_timeout() unit tests * * APIs covered: * - rpma_conn_cfg_set_timeout() * - rpma_conn_cfg_get_timeout() */ #include "conn_cfg-common.h" #include "test-common.h" /* * set__cfg_NULL -- NULL cfg is invalid */ static void set__cfg_NULL(void **unused) { /* run test */ int ret = rpma_conn_cfg_set_timeout(NULL, MOCK_TIMEOUT_MS); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * set__timeout_negative -- timeout_ms < 0 is invalid */ static void set__timeout_negative(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* cache the before value */ int before; int ret = rpma_conn_cfg_get_timeout(cstate->cfg, &before); assert_int_equal(ret, MOCK_OK); /* run test */ ret = rpma_conn_cfg_set_timeout(cstate->cfg, -1); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); int after; ret = rpma_conn_cfg_get_timeout(cstate->cfg, &after); assert_int_equal(ret, MOCK_OK); assert_int_equal(before, after); } /* * get__cfg_NULL -- NULL cfg is invalid */ static void get__cfg_NULL(void **unused) { /* run test */ int timeout_ms; int ret = rpma_conn_cfg_get_timeout(NULL, &timeout_ms); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__timeout_NULL -- NULL timeout_ms is invalid */ static void get__timeout_NULL(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_get_timeout(cstate->cfg, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * timeout__lifecycle -- happy day scenario */ static void timeout__lifecycle(void **cstate_ptr) { struct conn_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_conn_cfg_set_timeout(cstate->cfg, MOCK_TIMEOUT_MS); /* verify the results */ assert_int_equal(ret, MOCK_OK); int timeout_ms; ret = rpma_conn_cfg_get_timeout(cstate->cfg, &timeout_ms); assert_int_equal(ret, MOCK_OK); assert_int_equal(timeout_ms, MOCK_TIMEOUT_MS); } static const struct CMUnitTest test_timeout[] = { /* rpma_conn_cfg_set_timeout() unit tests */ cmocka_unit_test(set__cfg_NULL), cmocka_unit_test_setup_teardown(set__timeout_negative, setup__conn_cfg, teardown__conn_cfg), /* rpma_conn_cfg_get_timeout() unit tests */ cmocka_unit_test(get__cfg_NULL), cmocka_unit_test_setup_teardown(get__timeout_NULL, setup__conn_cfg, teardown__conn_cfg), /* rpma_conn_cfg_set/get_timeout() lifecycle */ cmocka_unit_test_setup_teardown(timeout__lifecycle, setup__conn_cfg, teardown__conn_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_timeout, NULL, NULL); } rpma-1.3.0/tests/unit/conn_req/000077500000000000000000000000001443364775400164205ustar00rootroot00000000000000rpma-1.3.0/tests/unit/conn_req/CMakeLists.txt000066400000000000000000000025241443364775400211630ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021, Fujitsu # include(../../cmake/ctest_helpers.cmake) function(add_test_conn_req name) set(src_name conn_req-${name}) set(name ut-${name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c conn_req-common.c ${TEST_UNIT_COMMON_DIR}/mocks-ibverbs.c ${TEST_UNIT_COMMON_DIR}/mocks-rdma_cm.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-conn.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-conn_cfg.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-cq.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-info.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-mr.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-peer.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-private_data.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-srq.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${TEST_UNIT_COMMON_DIR}/mocks-stdio.c ${LIBRPMA_SOURCE_DIR}/conn_req.c ${LIBRPMA_SOURCE_DIR}/rpma_err.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc,--wrap=snprintf,--wrap=vsnprintf") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_conn_req(connect) add_test_conn_req(delete) add_test_conn_req(new_from_cm_event) add_test_conn_req(new) add_test_conn_req(private_data) add_test_conn_req(recv) rpma-1.3.0/tests/unit/conn_req/conn_req-common.c000066400000000000000000000230511443364775400216570ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * conn_req-common.c -- the conn_req unit tests common functions */ #include "conn_req-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" #include "mocks-rpma-conn_cfg.h" #include "mocks-rpma-srq.h" const char Private_data[] = "Random data"; struct conn_req_new_test_state Conn_req_new_conn_cfg_default = { .get_args.cfg = MOCK_CONN_CFG_DEFAULT, .get_args.timeout_ms = RPMA_DEFAULT_TIMEOUT_MS, .get_args.cq_size = MOCK_CQ_SIZE_DEFAULT, .get_args.rcq_size = MOCK_RCQ_SIZE_DEFAULT, .get_args.shared = MOCK_SHARED_DEFAULT, .get_args.srq = NULL, .get_args.srq_rcq = NULL }; struct conn_req_new_test_state Conn_req_new_conn_cfg_custom = { .get_args.cfg = MOCK_CONN_CFG_CUSTOM, .get_args.timeout_ms = MOCK_TIMEOUT_MS_CUSTOM, .get_args.cq_size = MOCK_CQ_SIZE_CUSTOM, .get_args.rcq_size = MOCK_RCQ_SIZE_CUSTOM, .get_args.shared = MOCK_SHARED_CUSTOM, .get_args.srq = NULL, .get_args.srq_rcq = NULL }; struct conn_req_new_test_state Conn_req_new_conn_cfg_custom_without_srq_rcq = { .get_args.cfg = MOCK_CONN_CFG_CUSTOM, .get_args.timeout_ms = MOCK_TIMEOUT_MS_CUSTOM, .get_args.cq_size = MOCK_CQ_SIZE_CUSTOM, .get_args.rcq_size = MOCK_RCQ_SIZE_CUSTOM, .get_args.shared = MOCK_SHARED_CUSTOM, .get_args.srq = MOCK_RPMA_SRQ, .get_args.srq_rcq = NULL }; struct conn_req_new_test_state Conn_req_new_conn_cfg_default_with_srq_rcq = { .get_args.cfg = MOCK_CONN_CFG_DEFAULT, .get_args.timeout_ms = RPMA_DEFAULT_TIMEOUT_MS, .get_args.cq_size = MOCK_CQ_SIZE_DEFAULT, .get_args.rcq_size = MOCK_RCQ_SIZE_DEFAULT, .get_args.shared = MOCK_SHARED_DEFAULT, .get_args.srq = MOCK_RPMA_SRQ, .get_args.srq_rcq = MOCK_RPMA_SRQ_RCQ }; struct conn_req_test_state Conn_req_conn_cfg_default = { .get_args.cfg = MOCK_CONN_CFG_DEFAULT, .get_args.cq_size = MOCK_CQ_SIZE_DEFAULT, .get_args.rcq_size = MOCK_RCQ_SIZE_DEFAULT, .get_args.shared = MOCK_SHARED_DEFAULT, .get_args.srq = NULL, .get_args.srq_rcq = NULL }; struct conn_req_test_state Conn_req_conn_cfg_custom = { .get_args.cfg = MOCK_CONN_CFG_CUSTOM, .get_args.cq_size = MOCK_CQ_SIZE_CUSTOM, .get_args.rcq_size = MOCK_RCQ_SIZE_CUSTOM, .get_args.shared = MOCK_SHARED_CUSTOM, .get_args.srq = NULL, .get_args.srq_rcq = NULL }; struct conn_req_test_state Conn_req_conn_cfg_custom_without_srq_rcq = { .get_args.cfg = MOCK_CONN_CFG_CUSTOM, .get_args.cq_size = MOCK_CQ_SIZE_CUSTOM, .get_args.rcq_size = MOCK_RCQ_SIZE_CUSTOM, .get_args.shared = MOCK_SHARED_CUSTOM, .get_args.srq = MOCK_RPMA_SRQ, .get_args.srq_rcq = NULL }; struct conn_req_test_state Conn_req_conn_cfg_default_with_srq_rcq = { .get_args.cfg = MOCK_CONN_CFG_DEFAULT, .get_args.cq_size = MOCK_CQ_SIZE_DEFAULT, .get_args.rcq_size = MOCK_RCQ_SIZE_DEFAULT, .get_args.shared = MOCK_SHARED_DEFAULT, .get_args.srq = MOCK_RPMA_SRQ, .get_args.srq_rcq = MOCK_RPMA_SRQ_RCQ }; /* * configure_conn_req_new -- configure prestate for rpma_conn_req_new() */ void configure_conn_req_new(void **cstate_ptr) { /* the default is Conn_req_new_conn_cfg_default */ struct conn_req_new_test_state *cstate = *cstate_ptr ? *cstate_ptr : &Conn_req_new_conn_cfg_default; cstate->id.verbs = MOCK_VERBS; cstate->id.qp = MOCK_QP; cstate->id.route.path_rec = MOCK_PATH_REC; *cstate_ptr = cstate; } /* * configure_conn_req -- configure prestate for rpma_conn_req_connect() */ void configure_conn_req(void **cstate_ptr) { /* the default is Conn_req_conn_cfg_default */ struct conn_req_test_state *cstate = *cstate_ptr ? *cstate_ptr : &Conn_req_conn_cfg_default; cstate->event.event = RDMA_CM_EVENT_CONNECT_REQUEST; cstate->id.verbs = MOCK_VERBS; cstate->id.route.path_rec = MOCK_PATH_REC; cstate->event.id = &cstate->id; *cstate_ptr = cstate; } /* * setup__conn_req_new_from_cm_event -- prepare a valid rpma_conn_req object from CM * event */ int setup__conn_req_new_from_cm_event(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); will_return(rpma_peer_setup_qp, MOCK_OK); will_return(__wrap__test_malloc, MOCK_OK); will_return_maybe(__wrap_snprintf, MOCK_OK); will_return(rpma_private_data_store, MOCK_PRIVATE_DATA); /* run test */ int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &cstate->req); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(cstate->req); *cstate_ptr = cstate; return 0; } /* * teardown__conn_req_new_from_cm_event -- delete the rpma_conn_req object created * from a CM event */ int teardown__conn_req_new_from_cm_event(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_OK); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_function_call(rpma_private_data_delete); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->req); *cstate_ptr = NULL; return 0; } /* * setup__conn_req_new -- prepare a new outgoing rpma_conn_req */ int setup__conn_req_new(void **cstate_ptr) { struct conn_req_new_test_state *cstate = *cstate_ptr; configure_conn_req_new((void **)&cstate); /* configure mocks for rpma_conn_req_new() */ Mock_ctrl_defer_destruction = MOCK_CTRL_DEFER; will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); will_return(rpma_peer_setup_qp, MOCK_OK); will_return(__wrap__test_malloc, MOCK_OK); will_return_maybe(__wrap_snprintf, MOCK_STDIO_ERROR); /* run test */ int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, MOCK_GET_CONN_CFG(cstate), &cstate->req); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(cstate->req); *cstate_ptr = cstate; /* restore default mock configuration */ Mock_ctrl_defer_destruction = MOCK_CTRL_NO_DEFER; return 0; } /* * teardown__conn_req_new -- delete the outgoing rpma_conn_req object */ int teardown__conn_req_new(void **cstate_ptr) { struct conn_req_new_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, 0); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_function_call(rpma_private_data_delete); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->req); *cstate_ptr = NULL; return 0; } rpma-1.3.0/tests/unit/conn_req/conn_req-common.h000066400000000000000000000104771443364775400216740ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn_req-common.h -- the conn_req unit tests common definitions */ #ifndef CONN_REQ_COMMON #define CONN_REQ_COMMON #include "cmocka_headers.h" #include "conn_req.h" #include "mocks-ibverbs.h" #include "mocks-rpma-conn_cfg.h" #include "mocks-rpma-cq.h" #include "mocks-rpma-srq.h" #include "mocks-stdio.h" #define DEFAULT_VALUE "The default one" #define DEFAULT_LEN (strlen(DEFAULT_VALUE) + 1) #define MOCK_CONN_REQ (struct rpma_conn_req *)0xC410 #define MOCK_GET_RCQ(cstate) \ ((cstate)->get_args.srq_rcq ? MOCK_RPMA_SRQ_RCQ : \ ((cstate)->get_args.rcq_size ? MOCK_RPMA_RCQ : NULL)) #define MOCK_GET_RCQ_DEL(cstate) \ ((!(cstate)->get_args.srq_rcq && (cstate)->get_args.rcq_size) ? MOCK_RPMA_RCQ : NULL) #define MOCK_GET_CHANNEL(cstate) \ ((!(cstate)->get_args.srq_rcq && (cstate)->get_args.shared) ? \ MOCK_COMP_CHANNEL : NULL) #define MOCK_GET_CONN_CFG(cstate) \ ((cstate)->get_args.cfg == MOCK_CONN_CFG_DEFAULT ? \ NULL : (cstate)->get_args.cfg) #define CONN_REQ_NEW_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ(test_func, \ setup_func, teardown_func) \ {#test_func "__without_rcq", (test_func), (setup_func), \ (teardown_func), &Conn_req_new_conn_cfg_default}, \ {#test_func "__with_rcq", (test_func), (setup_func), \ (teardown_func), &Conn_req_new_conn_cfg_custom} #define CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ(test_func) \ CONN_REQ_NEW_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ(test_func, \ NULL, NULL) #define CONN_REQ_NEW_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_SRQ_RCQ(test_func, \ setup_func, teardown_func) \ {#test_func "__without_srq_rcq", (test_func), (setup_func), (teardown_func), \ &Conn_req_new_conn_cfg_custom_without_srq_rcq}, \ {#test_func "__with_srq_rcq", (test_func), (setup_func), (teardown_func), \ &Conn_req_new_conn_cfg_default_with_srq_rcq} #define CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_SRQ_RCQ(test_func) \ CONN_REQ_NEW_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_SRQ_RCQ(test_func, NULL, NULL) #define CONN_REQ_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ(test_func, \ setup_func, teardown_func) \ {#test_func "__without_rcq", (test_func), (setup_func), \ (teardown_func), &Conn_req_conn_cfg_default}, \ {#test_func "__with_rcq", (test_func), (setup_func), \ (teardown_func), &Conn_req_conn_cfg_custom} #define CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ(test_func) \ CONN_REQ_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ(test_func, \ NULL, NULL) #define CONN_REQ_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_SRQ_RCQ(test_func, \ setup_func, teardown_func) \ {#test_func "__without_srq_rcq", (test_func), (setup_func), (teardown_func), \ &Conn_req_conn_cfg_custom_without_srq_rcq}, \ {#test_func "__with_rcq", (test_func), (setup_func), (teardown_func), \ &Conn_req_conn_cfg_default_with_srq_rcq} #define CONN_REQ_TEST_WITH_AND_WITHOUT_SRQ_RCQ(test_func) \ CONN_REQ_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_SRQ_RCQ(test_func, NULL, NULL) /* * All the resources used between setup__conn_req_new_from_cm_event and * teardown__conn_req_new_from_cm_event */ struct conn_req_test_state { struct conn_cfg_get_mock_args get_args; struct rdma_cm_event event; struct rdma_cm_id id; struct rpma_conn_req *req; }; extern struct conn_req_test_state Conn_req_conn_cfg_default; extern struct conn_req_test_state Conn_req_conn_cfg_custom; extern struct conn_req_test_state Conn_req_conn_cfg_custom_without_srq_rcq; extern struct conn_req_test_state Conn_req_conn_cfg_default_with_srq_rcq; int setup__conn_req_new_from_cm_event(void **cstate_ptr); int teardown__conn_req_new_from_cm_event(void **cstate_ptr); /* * All the resources used between setup__conn_req_new and teardown__conn_req_new */ struct conn_req_new_test_state { struct conn_cfg_get_mock_args get_args; struct rdma_cm_id id; struct rpma_conn_req *req; }; extern struct conn_req_new_test_state Conn_req_new_conn_cfg_default; extern struct conn_req_new_test_state Conn_req_new_conn_cfg_custom; extern struct conn_req_new_test_state Conn_req_new_conn_cfg_custom_without_srq_rcq; extern struct conn_req_new_test_state Conn_req_new_conn_cfg_default_with_srq_rcq; int setup__conn_req_new(void **cstate_ptr); int teardown__conn_req_new(void **cstate_ptr); void configure_conn_req_new(void **cstate_ptr); void configure_conn_req(void **cstate_ptr); #endif /* CONN_REQ_COMMON */ rpma-1.3.0/tests/unit/conn_req/conn_req-connect.c000066400000000000000000000467331443364775400220340ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn_req-connect.c -- the rpma_conn_req_connect() unit tests * * API covered: * - rpma_conn_req_connect() */ #include "conn_req-common.h" #include "test-common.h" /* * connect__req_ptr_NULL -- NULL req_ptr is invalid */ static void connect__req_ptr_NULL(void **unused) { /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(NULL, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(conn); } /* * connect__req_NULL -- NULL *req_ptr is invalid */ static void connect__req_NULL(void **unused) { /* run test */ struct rpma_conn_req *req = NULL; struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&req, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(conn); assert_null(req); } /* * configure_mocks_conn_req_delete -- configure mocks for rpma_conn_req_delete() */ static void configure_mocks_conn_req_delete(struct conn_req_test_state *cstate) { /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_OK); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_function_call(rpma_private_data_delete); } /* * connect__conn_ptr_NULL -- NULL conn_ptr is invalid */ static void connect__conn_ptr_NULL(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks for rpma_conn_req_delete() */ configure_mocks_conn_req_delete(cstate); /* run test */ int ret = rpma_conn_req_connect(&cstate->req, NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(cstate->req); } /* * connect__pdata_NULL_pdata_ptr_NULL -- pdata->ptr == NULL is invalid */ static void connect__pdata_NULL_pdata_ptr_NULL(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks for rpma_conn_req_delete() */ configure_mocks_conn_req_delete(cstate); /* run test */ struct rpma_conn *conn = NULL; struct rpma_conn_private_data pdata = {NULL, 1}; int ret = rpma_conn_req_connect(&cstate->req, &pdata, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(cstate->req); assert_null(conn); } /* * connect__pdata_NULL_pdata_len_0 -- pdata->len == 0 is invalid */ static void connect__pdata_NULL_pdata_len_0(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks for rpma_conn_req_delete() */ configure_mocks_conn_req_delete(cstate); /* run test */ char buff = 0; struct rpma_conn *conn = NULL; struct rpma_conn_private_data pdata = {&buff, 0}; int ret = rpma_conn_req_connect(&cstate->req, &pdata, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(cstate->req); assert_null(conn); } /* * connect__pdata_NULL_pdata_ptr_NULL_len_0 -- pdata->ptr == NULL and * pdata->len == 0 are invalid */ static void connect__pdata_NULL_pdata_ptr_NULL_len_0(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks for rpma_conn_req_delete() */ configure_mocks_conn_req_delete(cstate); /* run test */ struct rpma_conn *conn = NULL; struct rpma_conn_private_data pdata = {NULL, 0}; int ret = rpma_conn_req_connect(&cstate->req, &pdata, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(cstate->req); assert_null(conn); } /* * connect_via_accept__accept_ERRNO -- rdma_accept() fails with MOCK_ERRNO */ static void connect_via_accept__accept_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_accept, id, &cstate->id); will_return(rdma_accept, MOCK_ERRNO); expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_function_call(rpma_private_data_delete); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); assert_null(conn); } /* * connect_via_accept__accept_ERRNO_subsequent_ERRNO2 -- rdma_accept() * fails with MOCK_ERRNO whereas subsequent (rdma_ack_cm_event(), * rpma_cq_delete(&req->rcq), rpma_cq_delete(&req->cq)) fail with MOCK_ERRNO2 */ static void connect_via_accept__accept_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_accept, id, &cstate->id); will_return(rdma_accept, MOCK_ERRNO); /* first error */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); if (cstate->get_args.rcq_size) { will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* third error */ } else { /* rcq == NULL cannot fail */ will_return(rpma_cq_delete, MOCK_OK); } expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* third or fourth error */ if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_function_call(rpma_private_data_delete); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); assert_null(conn); } /* * connect_via_accept__conn_new_ERRNO -- rpma_conn_new() fails with * MOCK_ERRNO */ static void connect_via_accept__conn_new_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_accept, id, &cstate->id); will_return(rdma_accept, MOCK_OK); expect_value(rpma_conn_new, id, &cstate->id); expect_value(rpma_conn_new, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_conn_new, channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_conn_new, NULL); will_return(rpma_conn_new, RPMA_E_PROVIDER); will_return(rpma_conn_new, MOCK_ERRNO); expect_value(rdma_disconnect, id, &cstate->id); will_return(rdma_disconnect, MOCK_OK); expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_function_call(rpma_private_data_delete); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); assert_null(conn); } /* * connect_via_accept__conn_new_ERRNO_subsequent_ERRNO2 -- * rpma_conn_new() fails with MOCK_ERRNO whereas subsequent (rdma_disconnect(), * rpma_cq_delete(&req->rcq), rpma_cq_delete(&req->cq)) fail with MOCK_ERRNO2 */ static void connect_via_accept__conn_new_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_accept, id, &cstate->id); will_return(rdma_accept, MOCK_OK); expect_value(rpma_conn_new, id, &cstate->id); expect_value(rpma_conn_new, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_conn_new, channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_conn_new, NULL); will_return(rpma_conn_new, RPMA_E_PROVIDER); will_return(rpma_conn_new, MOCK_ERRNO); /* first error */ expect_value(rdma_disconnect, id, &cstate->id); will_return(rdma_disconnect, MOCK_ERRNO2); /* second error */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); if (cstate->get_args.rcq_size) { will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* third error */ } else { /* rcq == NULL cannot fail */ will_return(rpma_cq_delete, MOCK_OK); } expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* third or fourth error */ if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_function_call(rpma_private_data_delete); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); assert_null(conn); } /* * connect_via_accept__success_incoming -- rpma_conn_req_connect() * success (using an incoming connection request) */ static void connect_via_accept__success_incoming(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_accept, id, &cstate->id); will_return(rdma_accept, MOCK_OK); expect_value(rpma_conn_new, id, &cstate->id); expect_value(rpma_conn_new, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_conn_new, channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_conn_new, MOCK_CONN); expect_value(rpma_conn_transfer_private_data, conn, MOCK_CONN); expect_value(rpma_conn_transfer_private_data, pdata->ptr, MOCK_PRIVATE_DATA); expect_value(rpma_conn_transfer_private_data, pdata->len, MOCK_PDATA_LEN); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->req); assert_int_equal(conn, MOCK_CONN); } /* * connect_via_connect__connect_ERRNO -- rdma_connect() fails with MOCK_ERRNO */ static void connect_via_connect__connect_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rpma_conn_new, id, &cstate->id); expect_value(rpma_conn_new, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_conn_new, channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_conn_new, MOCK_CONN); expect_value(rdma_connect, id, &cstate->id); will_return(rdma_connect, MOCK_ERRNO); expect_value(rpma_conn_delete, conn, MOCK_CONN); will_return(rpma_conn_delete, MOCK_OK); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); assert_null(conn); } /* * connect_via_connect__connect_ERRNO_subsequent_ERRNO2 -- rpma_conn_delete() * fails with MOCK_ERRNO2 after rdma_connect() failed with MOCK_ERRNO */ static void connect_via_connect__connect_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rpma_conn_new, id, &cstate->id); expect_value(rpma_conn_new, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_conn_new, channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_conn_new, MOCK_CONN); expect_value(rdma_connect, id, &cstate->id); will_return(rdma_connect, MOCK_ERRNO); /* first error */ expect_value(rpma_conn_delete, conn, MOCK_CONN); will_return(rpma_conn_delete, RPMA_E_PROVIDER); will_return(rpma_conn_delete, MOCK_ERRNO2); /* second error */ /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); assert_null(conn); } /* * connect_via_connect__conn_new_ERRNO -- rpma_conn_new() fails with * MOCK_ERRNO */ static void connect_via_connect__conn_new_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rpma_conn_new, id, &cstate->id); expect_value(rpma_conn_new, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_conn_new, channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_conn_new, NULL); will_return(rpma_conn_new, RPMA_E_PROVIDER); will_return(rpma_conn_new, MOCK_ERRNO); expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_OK); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); assert_null(conn); } /* * connect_via_connect__conn_new_ERRNO_subsequent_ERRNO2 -- * rpma_conn_new() fails with MOCK_ERRNO whereas subsequent * (rdma_disconnect(), rpma_cq_delete(&req->rcq), rpma_cq_delete(&req->cq), * rdma_destroy_id()) fail with MOCK_ERRNO2 */ static void connect_via_connect__conn_new_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rpma_conn_new, id, &cstate->id); expect_value(rpma_conn_new, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_conn_new, channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_conn_new, NULL); will_return(rpma_conn_new, RPMA_E_PROVIDER); will_return(rpma_conn_new, MOCK_ERRNO); /* first error */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); if (cstate->get_args.rcq_size) { will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ } else { /* rcq == NULL cannot fail */ will_return(rpma_cq_delete, MOCK_OK); } expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second or third error */ expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_ERRNO2); /* third or fourth error */ if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); assert_null(conn); } /* * connect_via_connect__success_outgoing -- rpma_conn_req_connect() * success (using an outgoing connection request) */ static void connect_via_connect__success_outgoing(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_connect, id, &cstate->id); will_return(rdma_connect, MOCK_OK); expect_value(rpma_conn_new, id, &cstate->id); expect_value(rpma_conn_new, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_conn_new, channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_conn_new, MOCK_CONN); /* run test */ struct rpma_conn *conn = NULL; int ret = rpma_conn_req_connect(&cstate->req, NULL, &conn); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->req); assert_int_equal(conn, MOCK_CONN); } /* * connect_via_connect_with_pdata__success_outgoing -- rpma_conn_req_connect() * success (using an outgoing connection request) */ static void connect_via_connect_with_pdata__success_outgoing(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_connect, id, &cstate->id); will_return(rdma_connect, MOCK_OK); expect_value(rpma_conn_new, id, &cstate->id); expect_value(rpma_conn_new, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_conn_new, channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_conn_new, MOCK_CONN); /* run test */ char buff[] = DEFAULT_VALUE; struct rpma_conn *conn = NULL; struct rpma_conn_private_data pdata = {&buff, DEFAULT_LEN}; int ret = rpma_conn_req_connect(&cstate->req, &pdata, &conn); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->req); assert_int_equal(conn, MOCK_CONN); } static const struct CMUnitTest test_connect[] = { /* rpma_conn_req_connect() unit tests */ cmocka_unit_test(connect__req_ptr_NULL), cmocka_unit_test(connect__req_NULL), cmocka_unit_test(connect__conn_ptr_NULL), cmocka_unit_test(connect__pdata_NULL_pdata_ptr_NULL), cmocka_unit_test(connect__pdata_NULL_pdata_len_0), cmocka_unit_test(connect__pdata_NULL_pdata_ptr_NULL_len_0), /* connect via rdma_accept() */ CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ(connect_via_accept__accept_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( connect_via_accept__accept_ERRNO_subsequent_ERRNO2), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ(connect_via_accept__conn_new_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( connect_via_accept__conn_new_ERRNO_subsequent_ERRNO2), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( connect_via_accept__success_incoming), /* connect via rdma_connect() */ CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( connect_via_connect__connect_ERRNO), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( connect_via_connect__connect_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( connect_via_connect__conn_new_ERRNO), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( connect_via_connect__conn_new_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( connect_via_connect__success_outgoing), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( connect_via_connect_with_pdata__success_outgoing), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_connect, NULL, NULL); } rpma-1.3.0/tests/unit/conn_req/conn_req-delete.c000066400000000000000000000442471443364775400216430ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn_req-delete.c -- the rpma_conn_req_delete() unit tests * * API covered: * - rpma_conn_req_delete() */ #include "conn_req-common.h" /* * delete__req_ptr_NULL -- NULL req_ptr is invalid */ static void delete__req_ptr_NULL(void **unused) { /* run test */ int ret = rpma_conn_req_delete(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * delete__req_NULL -- NULL req is valid - quick exit */ static void delete__req_NULL(void **unused) { /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_delete(&req); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * delete_via_reject__rcq_delete_ERRNO - rpma_cq_delete(&req->rcq) * fails with MOCK_ERRNO */ static void delete_via_reject__rcq_delete_ERRNO(void **unused) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = &Conn_req_conn_cfg_custom; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_RCQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_OK); expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_reject__rcq_delete_ERRNO_subsequent_ERRNO2 - * rpma_cq_delete(&req->rcq) fails with MOCK_ERRNO whereas subsequent * (rpma_cq_delete(&req->cq), rdma_reject(), rdma_ack_cm_event()) * fail with MOCK_ERRNO2 */ static void delete_via_reject__rcq_delete_ERRNO_subsequent_ERRNO2(void **unused) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = &Conn_req_conn_cfg_custom; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_RCQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); /* first error */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_ERRNO2); /* third error */ expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_reject__cq_delete_ERRNO - rpma_cq_delete(&req->cq) * fails with MOCK_ERRNO */ static void delete_via_reject__cq_delete_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_OK); expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_reject__cq_delete_ERRNO_subsequent_ERRNO2 - * rpma_cq_delete() fails with MOCK_ERRNO whereas subsequent * (rdma_reject(), rdma_ack_cm_event()) fail with MOCK_ERRNO2 */ static void delete_via_reject__cq_delete_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); /* first error */ expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_ERRNO2); /* second error */ expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_reject__reject_ERRNO -- rdma_reject() fails with MOCK_ERRNO */ static void delete_via_reject__reject_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_ERRNO); expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_reject__reject_ERRNO_ack_ERRNO2 - rdma_ack_cm_event() * fails with MOCK_ERRNO2 after rdma_reject() failed with MOCK_ERRNO */ static void delete_via_reject__reject_ERRNO_ack_ERRNO2(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_ERRNO); /* first error */ expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_reject__ibv_destroy_comp_channel_ERRNO - * ibv_destroy_comp_channel() fails with MOCK_ERRNO */ static void delete_via_reject__ibv_destroy_comp_channel_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_OK); expect_function_call(rpma_private_data_delete); will_return(ibv_destroy_comp_channel, MOCK_ERRNO); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_reject__ack_ERRNO_ibv_ERRNO2 - ibv_destroy_comp_channel() * fails with MOCK_ERRNO2 after rdma_ack_cm_event() failed with MOCK_ERRNO */ static void delete_via_reject__ack_ERRNO_ibv_ERRNO2(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_OK); expect_function_call(rpma_private_data_delete); will_return(ibv_destroy_comp_channel, MOCK_ERRNO2); /* second error */ /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_destroy__rcq_delete_ERRNO - rpma_cq_delete(&req->rcq) * fails with MOCK_ERRNO */ static void delete_via_destroy__rcq_delete_ERRNO(void **unused) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = &Conn_req_new_conn_cfg_custom; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_RCQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_OK); expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_destroy__rcq_delete_ERRNO_subsequent_ERRNO2 -- * rpma_cq_delete(&req->rcq) fails with MOCK_ERRNO whereas subsequent * (rpma_cq_delete(&req->cq), rdma_destroy_id()) fail with MOCK_ERRNO2 */ static void delete_via_destroy__rcq_delete_ERRNO_subsequent_ERRNO2(void **unused) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = &Conn_req_new_conn_cfg_custom; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_RCQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); /* first error */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_ERRNO2); /* third error */ expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_destroy__cq_delete_ERRNO - rpma_cq_delete(&req->cq) * fails with MOCK_ERRNO */ static void delete_via_destroy__cq_delete_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_OK); expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_destroy__cq_delete_ERRNO_subsequent_ERRNO2 -- * rdma_destroy_id() fails with MOCK_ERRNO2 after rpma_cq_delete(&req->cq) * failed with MOCK_ERRNO */ static void delete_via_destroy__cq_delete_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); /* first error */ expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_ERRNO2); /* second error */ expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_destroy__destroy_id_ERRNO - rdma_destroy_id() * fails with MOCK_ERRNO */ static void delete_via_destroy__destroy_id_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_ERRNO); expect_function_call(rpma_private_data_delete); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_destroy__ibv_destroy_comp_channel_ERRNO - * ibv_destroy_comp_channel() fails with MOCK_ERRNO */ static void delete_via_destroy__ibv_destroy_comp_channel_ERRNO(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_OK); expect_function_call(rpma_private_data_delete); will_return(ibv_destroy_comp_channel, MOCK_ERRNO); /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } /* * delete_via_destroy__rdma_ERRNO_ibv_ERRNO2 - ibv_destroy_comp_channel() * fails with MOCK_ERRNO2 after rdma_destroy_id() failed with MOCK_ERRNO */ static void delete_via_destroy__rdma_ERRNO_ibv_ERRNO2(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_new_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_ERRNO); /* first error */ expect_function_call(rpma_private_data_delete); will_return(ibv_destroy_comp_channel, MOCK_ERRNO2); /* second error */ /* run test */ int ret = rpma_conn_req_delete(&cstate->req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->req); } static const struct CMUnitTest test_delete[] = { /* rpma_conn_req_delete() unit tests */ cmocka_unit_test(delete__req_ptr_NULL), cmocka_unit_test(delete__req_NULL), /* delete via rdma_reject() */ cmocka_unit_test(delete_via_reject__rcq_delete_ERRNO), cmocka_unit_test( delete_via_reject__rcq_delete_ERRNO_subsequent_ERRNO2), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( delete_via_reject__cq_delete_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( delete_via_reject__cq_delete_ERRNO_subsequent_ERRNO2), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ(delete_via_reject__reject_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( delete_via_reject__reject_ERRNO_ack_ERRNO2), cmocka_unit_test_prestate( delete_via_reject__ibv_destroy_comp_channel_ERRNO, &Conn_req_conn_cfg_custom), cmocka_unit_test_prestate(delete_via_reject__ack_ERRNO_ibv_ERRNO2, &Conn_req_conn_cfg_custom), /* delete via rdma_destroy_id() */ cmocka_unit_test(delete_via_destroy__rcq_delete_ERRNO), cmocka_unit_test( delete_via_destroy__rcq_delete_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( delete_via_destroy__cq_delete_ERRNO), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( delete_via_destroy__cq_delete_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( delete_via_destroy__destroy_id_ERRNO), cmocka_unit_test_prestate( delete_via_destroy__ibv_destroy_comp_channel_ERRNO, &Conn_req_new_conn_cfg_custom), cmocka_unit_test_prestate(delete_via_destroy__rdma_ERRNO_ibv_ERRNO2, &Conn_req_new_conn_cfg_custom), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_delete, NULL, NULL); } rpma-1.3.0/tests/unit/conn_req/conn_req-new.c000066400000000000000000000744661443364775400212000ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * conn_req-new.c -- the rpma_conn_req_new() unit tests * * API covered: * - rpma_conn_req_new() */ #include "conn_req-common.h" #include "mocks-ibverbs.h" #include "mocks-rpma-conn_cfg.h" /* * new__peer_NULL -- NULL peer is invalid */ static void new__peer_NULL(void **unused) { /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(NULL, MOCK_IP_ADDRESS, MOCK_PORT, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * new__addr_NULL -- NULL addr is invalid */ static void new__addr_NULL(void **unused) { /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, NULL, MOCK_PORT, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * new__port_NULL -- NULL port is invalid */ static void new__port_NULL(void **unused) { /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, NULL, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * new__req_ptr_NULL -- NULL req_ptr is invalid */ static void new__req_ptr_NULL(void **unused) { /* run test */ int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__all_NULL -- all NULL arguments are invalid */ static void new__all_NULL(void **unused) { /* run test */ int ret = rpma_conn_req_new(NULL, NULL, NULL, NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__info_new_ERRNO -- rpma_info_new() fails with MOCK_ERRNO */ static void new__info_new_ERRNO(void **unused) { struct conn_req_new_test_state *cstate = NULL; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, NULL); will_return(rpma_info_new, RPMA_E_PROVIDER); will_return(rpma_info_new, MOCK_ERRNO); will_return_maybe(rdma_create_id, &cstate->id); will_return_maybe(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__create_id_ERRNO -- rdma_create_id() fails with MOCK_ERRNO */ static void new__create_id_ERRNO(void **unused) { struct conn_req_new_test_state *cstate = NULL; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rdma_create_id, NULL); will_return(rdma_create_id, MOCK_ERRNO); will_return_maybe(rpma_info_new, MOCK_INFO); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__resolve_addr_ERRNO -- rpma_info_resolve_addr() fails * with MOCK_ERRNO */ static void new__resolve_addr_ERRNO(void **unused) { struct conn_req_new_test_state *cstate = NULL; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, RPMA_DEFAULT_TIMEOUT_MS); will_return(rpma_info_resolve_addr, RPMA_E_PROVIDER); will_return(rpma_info_resolve_addr, MOCK_ERRNO); will_return(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__resolve_addr_ERRNO_subsequent_ERRNO2 -- rdma_destroy_id() fails * with MOCK_ERRNO2 after rpma_info_resolve_addr() failed with MOCK_ERRNO */ static void new__resolve_addr_ERRNO_subsequent_ERRNO2(void **unused) { struct conn_req_new_test_state *cstate = NULL; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, RPMA_DEFAULT_TIMEOUT_MS); will_return(rpma_info_resolve_addr, RPMA_E_PROVIDER); will_return(rpma_info_resolve_addr, MOCK_ERRNO); /* first error */ will_return(rdma_destroy_id, MOCK_ERRNO2); /* second error */ /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__resolve_route_ERRNO -- rdma_resolve_route() fails with MOCK_ERRNO */ static void new__resolve_route_ERRNO(void **unused) { struct conn_req_new_test_state *cstate = NULL; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, RPMA_DEFAULT_TIMEOUT_MS); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, RPMA_DEFAULT_TIMEOUT_MS); will_return(rdma_resolve_route, MOCK_ERRNO); will_return(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__resolve_route_ERRNO_subsequent_ERRNO2 -- rdma_destroy_id() fails with * MOCK_ERRNO2 after rdma_resolve_route() failed with MOCK_ERRNO */ static void new__resolve_route_ERRNO_subsequent_ERRNO2(void **unused) { struct conn_req_new_test_state *cstate = NULL; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, RPMA_DEFAULT_TIMEOUT_MS); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, RPMA_DEFAULT_TIMEOUT_MS); will_return(rdma_resolve_route, MOCK_ERRNO); /* first error */ will_return(rdma_destroy_id, MOCK_ERRNO2); /* second error */ /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__shared_true_srq_rcq_not_NULL -- true shared and non-NULL srq_rcq are invalid */ static void new__shared_true_srq_rcq_not_NULL(void **unused) { struct conn_req_new_test_state *cstate = &Conn_req_new_conn_cfg_custom_without_srq_rcq; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, MOCK_RPMA_SRQ_RCQ); } will_return(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, MOCK_GET_CONN_CFG(cstate), &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * new__cq_new_ERRNO -- rpma_cq_new(cqe) fails with MOCK_ERRNO */ static void new__cq_new_ERRNO(void **cstate_ptr) { struct conn_req_new_test_state *cstate = *cstate_ptr; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, NULL); will_return(rpma_cq_new, RPMA_E_PROVIDER); will_return(rpma_cq_new, MOCK_ERRNO); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); will_return(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, MOCK_GET_CONN_CFG(cstate), &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__cq_new_ERRNO_subsequent_ERRNO2 -- rdma_destroy_id() fails with * MOCK_ERRNO2 after rpma_cq_new(cqe) failed with MOCK_ERRNO */ static void new__cq_new_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { struct conn_req_new_test_state *cstate = *cstate_ptr; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, NULL); will_return(rpma_cq_new, RPMA_E_PROVIDER); will_return(rpma_cq_new, MOCK_ERRNO); /* first error */ if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); will_return(rdma_destroy_id, MOCK_ERRNO2); /* second error */ /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, MOCK_GET_CONN_CFG(cstate), &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__rcq_new_ERRNO -- rpma_cq_new(rcqe) fails with MOCK_ERRNO */ static void new__rcq_new_ERRNO(void **unused) { struct conn_req_new_test_state *cstate = &Conn_req_new_conn_cfg_custom; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, NULL); will_return(rpma_cq_new, RPMA_E_PROVIDER); will_return(rpma_cq_new, MOCK_ERRNO); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); will_return(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__rcq_new_ERRNO_subsequent_ERRNO2 -- rpma_cq_new(rcqe) fails with * MOCK_ERRNO whereas subsequent (rpma_cq_delete(&cq), rdma_destroy_id()) * fail with MOCK_ERRNO2 */ static void new__rcq_new_ERRNO_subsequent_ERRNO2(void **unused) { struct conn_req_new_test_state *cstate = &Conn_req_new_conn_cfg_custom; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, NULL); will_return(rpma_cq_new, RPMA_E_PROVIDER); will_return(rpma_cq_new, MOCK_ERRNO); /* first error */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); will_return(rdma_destroy_id, MOCK_ERRNO2); /* third error */ /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__peer_create_qp_ERRNO -- rpma_peer_setup_qp() fails * with MOCK_ERRNO */ static void new__peer_create_qp_ERRNO(void **cstate_ptr) { struct conn_req_new_test_state *cstate = *cstate_ptr; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); will_return(rpma_peer_setup_qp, RPMA_E_PROVIDER); will_return(rpma_peer_setup_qp, MOCK_ERRNO); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); will_return(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, MOCK_GET_CONN_CFG(cstate), &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__peer_create_qp_ERRNO_subsequent_ERRNO2 -- rpma_peer_setup_qp() * fails with MOCK_ERRNO whereas subsequent (rpma_cq_delete(&rcq), * rpma_cq_delete(&cq), rdma_destroy_id()) fail with MOCK_ERRNO2 */ static void new__peer_create_qp_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { struct conn_req_new_test_state *cstate = *cstate_ptr; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); will_return(rpma_peer_setup_qp, RPMA_E_PROVIDER); will_return(rpma_peer_setup_qp, MOCK_ERRNO); /* first error */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ } else { /* rcq == NULL cannot fail */ will_return(rpma_cq_delete, MOCK_OK); } expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second or third error */ if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); will_return(rdma_destroy_id, MOCK_ERRNO2); /* third or fourth error */ /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, MOCK_GET_CONN_CFG(cstate), &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * new__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **cstate_ptr) { struct conn_req_new_test_state *cstate = *cstate_ptr; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); will_return(rpma_peer_setup_qp, MOCK_OK); will_return(__wrap__test_malloc, MOCK_ERRNO); expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); will_return(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, MOCK_GET_CONN_CFG(cstate), &req); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(req); } /* * new__malloc_ERRNO_subsequent_ERRNO2 -- malloc() fails with MOCK_ERRNO * whereas subsequent (rpma_cq_delete(&rcq), rpma_cq_delete(&cq), * rdma_destroy_id()) fail with MOCK_ERRNO2 */ static void new__malloc_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { struct conn_req_new_test_state *cstate = *cstate_ptr; configure_conn_req_new((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_timeout, &cstate->get_args); will_return(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, &cstate->id); expect_value(rpma_info_resolve_addr, id, &cstate->id); expect_value(rpma_info_resolve_addr, timeout_ms, cstate->get_args.timeout_ms); will_return(rpma_info_resolve_addr, MOCK_OK); /* * XXX rdma_resolve_route() mock assumes all its expects comes from * another mock. The following expect breaks this assumption. */ expect_value(rdma_resolve_route, timeout_ms, cstate->get_args.timeout_ms); will_return(rdma_resolve_route, MOCK_OK); will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); will_return(rpma_peer_setup_qp, MOCK_OK); will_return(__wrap__test_malloc, MOCK_ERRNO); /* first error */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ } else { /* rcq == NULL cannot fail */ will_return(rpma_cq_delete, MOCK_OK); } expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second or third error */ if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); will_return(rdma_destroy_id, MOCK_ERRNO2); /* third or fourth error */ /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, MOCK_GET_CONN_CFG(cstate), &req); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(req); } /* * new__success -- all is OK */ static void new__success(void **unused) { /* * The thing is done by setup__conn_req_new() and * teardown__conn_req_new(). */ } static const struct CMUnitTest test_new[] = { /* rpma_conn_req_new() unit tests */ cmocka_unit_test(new__peer_NULL), cmocka_unit_test(new__addr_NULL), cmocka_unit_test(new__port_NULL), cmocka_unit_test(new__req_ptr_NULL), cmocka_unit_test(new__all_NULL), cmocka_unit_test(new__info_new_ERRNO), cmocka_unit_test(new__create_id_ERRNO), cmocka_unit_test(new__resolve_addr_ERRNO), cmocka_unit_test(new__resolve_addr_ERRNO_subsequent_ERRNO2), cmocka_unit_test(new__resolve_route_ERRNO), cmocka_unit_test(new__resolve_route_ERRNO_subsequent_ERRNO2), cmocka_unit_test(new__shared_true_srq_rcq_not_NULL), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ(new__cq_new_ERRNO), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_SRQ_RCQ(new__cq_new_ERRNO), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( new__cq_new_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_SRQ_RCQ( new__cq_new_ERRNO_subsequent_ERRNO2), cmocka_unit_test(new__rcq_new_ERRNO), cmocka_unit_test(new__rcq_new_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ(new__peer_create_qp_ERRNO), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_SRQ_RCQ(new__peer_create_qp_ERRNO), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( new__peer_create_qp_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_SRQ_RCQ( new__peer_create_qp_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ(new__malloc_ERRNO), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_SRQ_RCQ(new__malloc_ERRNO), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_RCQ( new__malloc_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_WITH_AND_WITHOUT_SRQ_RCQ(new__malloc_ERRNO_subsequent_ERRNO2), CONN_REQ_NEW_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ( new__success, setup__conn_req_new, teardown__conn_req_new), CONN_REQ_NEW_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_SRQ_RCQ( new__success, setup__conn_req_new, teardown__conn_req_new), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_new, NULL, NULL); } rpma-1.3.0/tests/unit/conn_req/conn_req-new_from_cm_event.c000066400000000000000000000601421443364775400240650ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021-2022, Fujitsu */ /* * conn_req-new_from_cm_event.c -- the rpma_conn_req_new_from_cm_event() unit tests * * API covered: * - rpma_conn_req_new_from_cm_event() */ #include "conn_req-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" #include "mocks-rpma-conn_cfg.h" /* * from_cm_event__peer_NULL -- NULL peer is invalid */ static void from_cm_event__peer_NULL(void **unused) { /* run test */ struct rdma_cm_event event = CM_EVENT_CONNECTION_REQUEST_INIT; struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(NULL, &event, MOCK_CONN_CFG_DEFAULT, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * from_cm_event__edata_NULL -- NULL edata is invalid */ static void from_cm_event__edata_NULL(void **unused) { /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, NULL, MOCK_CONN_CFG_DEFAULT, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * from_cm_event__req_ptr_NULL -- NULL req_ptr is invalid */ static void from_cm_event__req_ptr_NULL(void **unused) { /* run test */ struct rdma_cm_event event = CM_EVENT_CONNECTION_REQUEST_INIT; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &event, MOCK_CONN_CFG_DEFAULT, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * from_cm_event__peer_NULL_edata_NULL_req_ptr_NULL -- NULL peer, * NULL edata and NULL req_ptr are not valid */ static void from_cm_event__peer_NULL_edata_NULL_req_ptr_NULL(void **unused) { /* run test */ int ret = rpma_conn_req_new_from_cm_event(NULL, NULL, MOCK_CONN_CFG_DEFAULT, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * from_cm_event__RDMA_CM_EVENT_CONNECT_ERROR -- edata of type * RDMA_CM_EVENT_CONNECT_ERROR */ static void from_cm_event__RDMA_CM_EVENT_CONNECT_ERROR(void **unused) { /* run test */ struct rdma_cm_event event = CM_EVENT_CONNECT_ERROR_INIT; struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &event, MOCK_CONN_CFG_DEFAULT, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * from_cm_event__shared_true_srq_rcq_not_NULL -- * true shared and non-NULL srq_rcq are invalid */ static void from_cm_event__shared_true_srq_rcq_not_NULL(void **unused) { struct conn_req_test_state *cstate = &Conn_req_conn_cfg_custom_without_srq_rcq; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, MOCK_RPMA_SRQ_RCQ); } /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * from_cm_event__ibv_create_comp_channel_ERRNO -- * ibv_create_comp_channel() fails with MOCK_ERRNO */ static void from_cm_event__ibv_create_comp_channel_ERRNO(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); will_return(ibv_create_comp_channel, NULL); will_return(ibv_create_comp_channel, MOCK_ERRNO); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * from_cm_event__ibv_create_comp_channel_ERRNO_rdma_ack_cm_event_ERRNO2 -- * ibv_create_comp_channel() fails with MOCK_ERRNO and rdma_ack_cm_event fails with MOCK_ERRNO2 */ static void from_cm_event__ibv_create_comp_channel_ERRNO_rdma_ack_cm_event_ERRNO2(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); will_return(ibv_create_comp_channel, NULL); will_return(ibv_create_comp_channel, MOCK_ERRNO); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * from_cm_event__cq_new_ERRNO -- rpma_cq_new(cqe) fails with MOCK_ERRNO */ static void from_cm_event__cq_new_ERRNO(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, NULL); will_return(rpma_cq_new, RPMA_E_PROVIDER); will_return(rpma_cq_new, MOCK_ERRNO); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * from_cm_event__rcq_new_ERRNO -- rpma_cq_new(rcqe) fails with MOCK_ERRNO */ static void from_cm_event__rcq_new_ERRNO(void **unused) { /* rpma_cq_new(rcqe) is called only when rcqe > 0 */ struct conn_req_test_state *cstate = &Conn_req_conn_cfg_custom; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, NULL); will_return(rpma_cq_new, RPMA_E_PROVIDER); will_return(rpma_cq_new, MOCK_ERRNO); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * from_cm_event__peer_create_qp_ERRNO -- rpma_peer_setup_qp() * fails with MOCK_ERRNO */ static void from_cm_event__peer_create_qp_ERRNO(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); will_return(rpma_peer_setup_qp, RPMA_E_PROVIDER); will_return(rpma_peer_setup_qp, MOCK_ERRNO); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * from_cm_event__create_qp_ERRNO_subsequent_ERRNO2 -- rpma_peer_setup_qp() * fails with MOCK_ERRNO whereas subsequent (rpma_cq_delete(&rcq), * rpma_cq_delete(&cq)) fail with MOCK_ERRNO2 */ static void from_cm_event__create_qp_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); will_return(rpma_peer_setup_qp, RPMA_E_PROVIDER); will_return(rpma_peer_setup_qp, MOCK_ERRNO); /* first error */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ } else { /* rcq == NULL cannot fail */ will_return(rpma_cq_delete, MOCK_OK); } expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second or third error */ if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * from_cm_event__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void from_cm_event__malloc_ERRNO(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); will_return(rpma_peer_setup_qp, MOCK_OK); will_return(__wrap__test_malloc, MOCK_ERRNO); expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(req); } /* * from_cm_event__malloc_ERRNO_subsequent_ERRNO2 -- malloc() fails * with MOCK_ERRNO whereas subsequent (rpma_cq_delete(&rcq), * rpma_cq_delete(&cq)) fail with MOCK_ERRNO2 */ static void from_cm_event__malloc_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); will_return(rpma_peer_setup_qp, MOCK_OK); will_return(__wrap__test_malloc, MOCK_ERRNO); /* first error */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ } else { /* rcq == NULL cannot fail */ will_return(rpma_cq_delete, MOCK_OK); } expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second or third error */ if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(req); } /* * from_cm_event__private_data_store_E_NOMEM -- rpma_private_data_store() * fails with RPMA_E_NOMEM */ static void from_cm_event__private_data_store_E_NOMEM(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); will_return(rpma_peer_setup_qp, MOCK_OK); will_return(__wrap__test_malloc, MOCK_OK); will_return_maybe(__wrap_snprintf, MOCK_OK); will_return(rpma_private_data_store, NULL); /* RPMA_E_NOMEM */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_OK); expect_function_call(rpma_private_data_delete); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(req); } /* * from_cm_event__private_data_store_E_NOMEM_subsequent_ERRNO2 -- * rpma_private_data_store() fails with RPMA_E_NOMEM whereas subsequent * (rpma_cq_delete(&rcq), rpma_cq_delete(&cq), rdma_destroy_id()) fail * with MOCK_ERRNO2 */ static void from_cm_event__private_data_store_E_NOMEM_subsequent_ERRNO2(void **cstate_ptr) { struct conn_req_test_state *cstate = *cstate_ptr; configure_conn_req((void **)&cstate); /* configure mocks */ will_return(rpma_conn_cfg_get_cqe, &cstate->get_args); will_return(rpma_conn_cfg_get_rcqe, &cstate->get_args); will_return(rpma_conn_cfg_get_compl_channel, &cstate->get_args); will_return(rpma_conn_cfg_get_srq, &cstate->get_args); if (cstate->get_args.srq) { expect_value(rpma_srq_get_rcq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_rcq, cstate->get_args.srq_rcq); } if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(rpma_cq_new, cqe, cstate->get_args.cq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_CQ); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { expect_value(rpma_cq_new, cqe, cstate->get_args.rcq_size); expect_value(rpma_cq_new, shared_channel, MOCK_GET_CHANNEL(cstate)); will_return(rpma_cq_new, MOCK_RPMA_RCQ); } expect_value(rpma_peer_setup_qp, id, &cstate->id); expect_value(rpma_peer_setup_qp, rcq, MOCK_GET_RCQ(cstate)); expect_value(rpma_peer_setup_qp, cfg, cstate->get_args.cfg); will_return(rpma_peer_setup_qp, MOCK_OK); will_return(__wrap__test_malloc, MOCK_OK); will_return_maybe(__wrap_snprintf, MOCK_STDIO_ERROR); will_return(rpma_private_data_store, NULL); /* first RPMA_E_NOMEM */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ_DEL(cstate)); if (!cstate->get_args.srq_rcq && cstate->get_args.rcq_size) { will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second error */ } else { /* rcq == NULL cannot fail */ will_return(rpma_cq_delete, MOCK_OK); } expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); /* second or third error */ expect_value(rdma_destroy_id, id, &cstate->id); will_return(rdma_destroy_id, MOCK_ERRNO2); /* third or fourth error */ expect_function_call(rpma_private_data_delete); if (!cstate->get_args.srq_rcq && cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_conn_req_new_from_cm_event(MOCK_PEER, &cstate->event, cstate->get_args.cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(req); } /* * conn_req_new_from_cm__lifecycle - happy day scenario */ static void conn_req_new_from_cm__lifecycle(void **unused) { /* * The thing is done by setup__conn_req_new_from_cm_event() and * teardown__conn_req_new_from_cm_event(). */ } static const struct CMUnitTest test_from_cm_event[] = { /* rpma_conn_req_new_from_cm_event() unit tests */ cmocka_unit_test(from_cm_event__peer_NULL), cmocka_unit_test(from_cm_event__edata_NULL), cmocka_unit_test(from_cm_event__req_ptr_NULL), cmocka_unit_test( from_cm_event__peer_NULL_edata_NULL_req_ptr_NULL), cmocka_unit_test( from_cm_event__RDMA_CM_EVENT_CONNECT_ERROR), cmocka_unit_test( from_cm_event__shared_true_srq_rcq_not_NULL), cmocka_unit_test_prestate( from_cm_event__ibv_create_comp_channel_ERRNO, &Conn_req_conn_cfg_custom), cmocka_unit_test_prestate( from_cm_event__ibv_create_comp_channel_ERRNO_rdma_ack_cm_event_ERRNO2, &Conn_req_conn_cfg_custom), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ(from_cm_event__cq_new_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_SRQ_RCQ(from_cm_event__cq_new_ERRNO), cmocka_unit_test(from_cm_event__rcq_new_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( from_cm_event__peer_create_qp_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_SRQ_RCQ( from_cm_event__peer_create_qp_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( from_cm_event__create_qp_ERRNO_subsequent_ERRNO2), CONN_REQ_TEST_WITH_AND_WITHOUT_SRQ_RCQ( from_cm_event__create_qp_ERRNO_subsequent_ERRNO2), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ(from_cm_event__malloc_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_SRQ_RCQ(from_cm_event__malloc_ERRNO), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( from_cm_event__malloc_ERRNO_subsequent_ERRNO2), CONN_REQ_TEST_WITH_AND_WITHOUT_SRQ_RCQ( from_cm_event__malloc_ERRNO_subsequent_ERRNO2), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( from_cm_event__private_data_store_E_NOMEM), CONN_REQ_TEST_WITH_AND_WITHOUT_SRQ_RCQ(from_cm_event__private_data_store_E_NOMEM), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ( from_cm_event__private_data_store_E_NOMEM_subsequent_ERRNO2), CONN_REQ_TEST_WITH_AND_WITHOUT_SRQ_RCQ( from_cm_event__private_data_store_E_NOMEM_subsequent_ERRNO2), /* rpma_conn_req_new_from_cm_event()/_delete() lifecycle */ CONN_REQ_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_RCQ( conn_req_new_from_cm__lifecycle, setup__conn_req_new_from_cm_event, teardown__conn_req_new_from_cm_event), CONN_REQ_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_SRQ_RCQ( conn_req_new_from_cm__lifecycle, setup__conn_req_new_from_cm_event, teardown__conn_req_new_from_cm_event), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_from_cm_event, NULL, NULL); } rpma-1.3.0/tests/unit/conn_req/conn_req-private_data.c000066400000000000000000000057711443364775400230430ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn_req-private_data.c -- the rpma_conn_req_get_private_data() unit tests * * API covered: * - rpma_conn_req_get_private_data() */ #include "conn_req-common.h" /* * get_private_data__success -- happy day scenario */ static void get_private_data__success(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_OK); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_function_call(rpma_private_data_delete); /* run test */ struct rpma_conn_private_data data; int ret = rpma_conn_req_get_private_data(cstate->req, &data); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(data.ptr, MOCK_PRIVATE_DATA); assert_int_equal(data.len, MOCK_PDATA_LEN); /* clean up test */ rpma_conn_req_delete(&cstate->req); } /* * get_private_data__conn_req_NULL -- conn_req NULL is invalid */ static void get_private_data__conn_req_NULL(void **unused) { /* run test */ struct rpma_conn_private_data data; int ret = rpma_conn_req_get_private_data(NULL, &data); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_private_data__pdata_NULL -- pdata NULL is invalid */ static void get_private_data__pdata_NULL(void **cstate_ptr) { /* WA for cmocka/issues#47 */ struct conn_req_test_state *cstate = *cstate_ptr; assert_int_equal(setup__conn_req_new_from_cm_event((void **)&cstate), 0); assert_non_null(cstate); /* configure mocks */ expect_value(rdma_destroy_qp, id, &cstate->id); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_CQ); will_return(rpma_cq_delete, MOCK_OK); expect_value(rdma_reject, id, &cstate->id); will_return(rdma_reject, MOCK_OK); if (cstate->get_args.shared) will_return(ibv_destroy_comp_channel, MOCK_OK); expect_function_call(rpma_private_data_delete); /* run test */ int ret = rpma_conn_req_get_private_data(cstate->req, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); /* clean up test */ rpma_conn_req_delete(&cstate->req); } static const struct CMUnitTest test_private_data[] = { cmocka_unit_test(get_private_data__conn_req_NULL), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ(get_private_data__success), CONN_REQ_TEST_WITH_AND_WITHOUT_RCQ(get_private_data__pdata_NULL), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_private_data, NULL, NULL); } rpma-1.3.0/tests/unit/conn_req/conn_req-recv.c000066400000000000000000000042261443364775400213310ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * conn_req-recv.c -- the rpma_conn_req_recv() unit tests * * API covered: * - rpma_conn_req_recv() */ #include "conn_req-common.h" #include "mocks-ibverbs.h" #include "mocks-rdma_cm.h" /* * recv__req_NULL - NULL req is invalid */ static void recv__req_NULL(void **unused) { /* run test */ int ret = rpma_conn_req_recv(NULL, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__dst_NULL - NULL dst is invalid */ static void recv__dst_NULL(void **unused) { /* run test */ int ret = rpma_conn_req_recv(MOCK_CONN_REQ, NULL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__req_dst_NULL - NULL req and dst are invalid */ static void recv__req_dst_NULL(void **unused) { /* run test */ int ret = rpma_conn_req_recv(NULL, NULL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__success - happy day scenario */ static void recv__success(void **cstate_ptr) { struct conn_req_new_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_mr_recv, qp, MOCK_QP); expect_value(rpma_mr_recv, dst, MOCK_RPMA_MR_LOCAL); expect_value(rpma_mr_recv, offset, MOCK_LOCAL_OFFSET); expect_value(rpma_mr_recv, len, MOCK_LEN); expect_value(rpma_mr_recv, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_recv, MOCK_OK); /* run test */ int ret = rpma_conn_req_recv(cstate->req, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } static const struct CMUnitTest tests_recv[] = { /* rpma_conn_req_recv() unit tests */ cmocka_unit_test(recv__req_NULL), cmocka_unit_test(recv__dst_NULL), cmocka_unit_test(recv__req_dst_NULL), cmocka_unit_test_setup_teardown(recv__success, setup__conn_req_new, teardown__conn_req_new), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_recv, NULL, NULL); } rpma-1.3.0/tests/unit/cq/000077500000000000000000000000001443364775400152175ustar00rootroot00000000000000rpma-1.3.0/tests/unit/cq/CMakeLists.txt000066400000000000000000000014051443364775400177570ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Intel Corporation # Copyright 2021-2022, Fujitsu # include(../../cmake/ctest_helpers.cmake) function (add_test_cq name) set(src_name cq-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c cq-common.c ${TEST_UNIT_COMMON_DIR}/mocks-ibverbs.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${LIBRPMA_SOURCE_DIR}/cq.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_cq(get_fd) add_test_cq(get_ibv_cq) add_test_cq(get_wc) add_test_cq(new_delete) add_test_cq(wait) rpma-1.3.0/tests/unit/cq/cq-common.c000066400000000000000000000037761443364775400172710ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * cq-common.c -- the rpma_cq unit tests common functions */ #include #include #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "mocks-rpma-conn_cfg.h" #include "cq-common.h" struct cq_test_state CQ_without_channel = { .shared_channel = NULL }; struct cq_test_state CQ_with_channel = { .shared_channel = MOCK_COMP_CHANNEL }; /* * setup__cq_new -- prepare a valid cq object */ int setup__cq_new(void **cq_ptr) { /* the default is CQ_without_channel */ struct cq_test_state *cstate = *cq_ptr ? *cq_ptr : &CQ_without_channel; /* configure mocks */ if (!cstate->shared_channel) will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(ibv_create_cq, cqe, MOCK_CQ_SIZE_DEFAULT); will_return(ibv_create_cq, MOCK_IBV_CQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_CQ); will_return(ibv_req_notify_cq_mock, MOCK_OK); will_return(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_cq *cq = NULL; int ret = rpma_cq_new(MOCK_VERBS, MOCK_CQ_SIZE_DEFAULT, cstate->shared_channel, &cq); /* verify the result */ assert_int_equal(ret, MOCK_OK); cstate->cq = cq; *cq_ptr = cstate; return 0; } /* * teardown__cq_delete -- destroy the cq object */ int teardown__cq_delete(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* configure mocks */ will_return(ibv_destroy_cq, MOCK_OK); if (!cstate->shared_channel) will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_cq_delete(&cq); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_null(cq); return 0; } /* * group_setup_common_cq -- prepare common resources * for all tests in the group */ int group_setup_common_cq(void **unused) { /* set the req_notify_cq callback in mock of IBV CQ */ MOCK_VERBS->ops.req_notify_cq = ibv_req_notify_cq_mock; Ibv_cq.context = MOCK_VERBS; return 0; } rpma-1.3.0/tests/unit/cq/cq-common.h000066400000000000000000000013001443364775400172530ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * cq-common.h -- the rpma_cq unit tests common definitions */ #ifndef CQ_COMMON #define CQ_COMMON #include "test-common.h" #include "cq.h" #define MOCK_WC_STATUS_ERROR (int)0x51A5 /* all the resources used between setup__cq_new and teardown__cq_delete */ struct cq_test_state { struct ibv_comp_channel *shared_channel; struct rpma_cq *cq; }; extern struct cq_test_state CQ_without_channel; extern struct cq_test_state CQ_with_channel; int setup__cq_new(void **cq_ptr); int teardown__cq_delete(void **cq_ptr); int group_setup_common_cq(void **unused); #endif /* CQ_COMMON */ rpma-1.3.0/tests/unit/cq/cq-get_fd.c000066400000000000000000000034241443364775400172170ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * cq-get_fd.c -- the rpma_cq_get_fd() unit tests * * API covered: * - rpma_cq_get_fd() */ #include "librpma.h" #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "cq-common.h" /* * get_fd__cq_NULL -- cq NULL is invalid */ static void get_fd__cq_NULL(void **unused) { /* run test */ int fd = 0; int ret = rpma_cq_get_fd(NULL, &fd); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_fd__fd_NULL -- fd NULL is invalid */ static void get_fd__fd_NULL(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* run test */ int ret = rpma_cq_get_fd(cq, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_fd__success -- happy day scenario */ static void get_fd__success(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* run test */ int fd = 0; int ret = rpma_cq_get_fd(cq, &fd); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(fd, MOCK_COMPLETION_FD); } /* * group_setup_get_fd -- prepare resources for all tests in the group */ static int group_setup_get_fd(void **unused) { Ibv_comp_channel.fd = MOCK_COMPLETION_FD; return group_setup_common_cq(NULL); } static const struct CMUnitTest tests_get_fd[] = { /* rpma_cq_get_fd() unit tests */ cmocka_unit_test(get_fd__cq_NULL), cmocka_unit_test_setup_teardown( get_fd__fd_NULL, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown( get_fd__success, setup__cq_new, teardown__cq_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_fd, group_setup_get_fd, NULL); } rpma-1.3.0/tests/unit/cq/cq-get_ibv_cq.c000066400000000000000000000017041443364775400200700ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * cq-get_ibv_cq.c -- the rpma_cq_get_ibv_cq() unit tests * * API covered: * - rpma_cq_get_ibv_cq() */ #include "librpma.h" #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "cq-common.h" /* * get_ibv_cq__success -- happy day scenario */ static void get_ibv_cq__success(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* run test */ struct ibv_cq *ibv_cq = rpma_cq_get_ibv_cq(cq); /* verify the results */ assert_int_equal(ibv_cq, MOCK_IBV_CQ); } static const struct CMUnitTest tests_get_ibv_cq[] = { /* rpma_cq_get_fd() unit tests */ cmocka_unit_test_setup_teardown( get_ibv_cq__success, setup__cq_new, teardown__cq_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_ibv_cq, group_setup_common_cq, NULL); } rpma-1.3.0/tests/unit/cq/cq-get_wc.c000066400000000000000000000174471443364775400172510ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2022, Fujitsu */ /* * cq-get_wc.c -- the rpma_cq_get_wc() unit tests * * API covered: * - rpma_cq_get_wc() */ #include #include #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "cq-common.h" static enum ibv_wc_opcode opcodes[] = { IBV_WC_RDMA_READ, IBV_WC_RDMA_WRITE, IBV_WC_SEND, IBV_WC_RECV, IBV_WC_RECV, IBV_WC_RECV_RDMA_WITH_IMM }; static unsigned flags[] = { 0, 0, 0, 0, IBV_WC_WITH_IMM, IBV_WC_WITH_IMM }; static int All_values = sizeof(opcodes) / sizeof(opcodes[0]); /* * poll_cq -- mock of ibv_poll_cq() */ int poll_cq(struct ibv_cq *cq, int num_entries, struct ibv_wc *wc) { check_expected_ptr(cq); check_expected(num_entries); assert_non_null(wc); int result = mock_type(int); if (result < 1 || result > num_entries) return result; struct ibv_wc *wc_ret = mock_type(struct ibv_wc *); memcpy(wc, wc_ret, sizeof(struct ibv_wc) * (size_t)result); return result; } /* * get_wc__cq_NULL - cq NULL is invalid */ static void get_wc__cq_NULL(void **unused) { /* run test */ struct ibv_wc wc = {0}; int ret = rpma_cq_get_wc(NULL, 1, &wc, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_wc__num_entries_non_positive - num_entries < 1 is invalid */ static void get_wc__num_entries_non_positive(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* run test */ struct ibv_wc wc = {0}; int ret = rpma_cq_get_wc(cq, -1, &wc, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_wc__wc_NULL - wc NULL is invalid */ static void get_wc__wc_NULL(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* run test */ int ret = rpma_cq_get_wc(cq, 1, NULL, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_wc__num_entries_2_num_entries_got_NULL - num_entries > 1 * and num_entries_got NULL are invalid */ static void get_wc__num_entries_2_num_entries_got_NULL(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* run test */ struct ibv_wc wc[2]; memset(wc, 0, sizeof(wc)); int ret = rpma_cq_get_wc(cq, 2, wc, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_wc__poll_cq_fail - ibv_poll_cq() returns -1 */ static void get_wc__poll_cq_fail(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* configure mock */ expect_value(poll_cq, cq, MOCK_IBV_CQ); expect_value(poll_cq, num_entries, 1); will_return(poll_cq, -1); /* run test */ struct ibv_wc wc = {0}; int ret = rpma_cq_get_wc(cq, 1, &wc, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * get_wc__poll_cq_no_data - ibv_poll_cq() returns 0 (no data) */ static void get_wc__poll_cq_no_data(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* configure mock */ expect_value(poll_cq, cq, MOCK_IBV_CQ); expect_value(poll_cq, num_entries, 1); will_return(poll_cq, 0); /* run test */ struct ibv_wc wc = {0}; int ret = rpma_cq_get_wc(cq, 1, &wc, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_NO_COMPLETION); } /* * get_wc__poll_cq_more_data - ibv_poll_cq() returns 3 (more than * num_entries) which is an abnormal situation */ static void get_wc__poll_cq_more_data(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* configure mock */ expect_value(poll_cq, cq, MOCK_IBV_CQ); expect_value(poll_cq, num_entries, 2); will_return(poll_cq, 3); /* run test */ struct ibv_wc wc[2]; memset(wc, 0, sizeof(wc)); int num_entries_got = 0; int ret = rpma_cq_get_wc(cq, 2, wc, &num_entries_got); /* verify the result */ assert_int_equal(ret, RPMA_E_UNKNOWN); assert_int_equal(num_entries_got, 0); } /* * get_wc__success_each_opcode - handle ibv_poll_cq() successfully * with each possible value of opcode */ static void get_wc__success_each_opcode(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; for (int i = 0; i < All_values; i++) { struct ibv_wc orig_wc = {0}; /* configure mock */ expect_value(poll_cq, cq, MOCK_IBV_CQ); expect_value(poll_cq, num_entries, 1); will_return(poll_cq, 1); orig_wc.wr_id = (uint64_t)MOCK_OP_CONTEXT; orig_wc.status = IBV_WC_SUCCESS; orig_wc.opcode = opcodes[i]; orig_wc.byte_len = MOCK_LEN; if (flags[i] == IBV_WC_WITH_IMM) { /* * 'wc_flags' is of 'int' type * in older versions of libibverbs. */ orig_wc.wc_flags = (typeof(orig_wc.wc_flags))flags[i]; orig_wc.imm_data = htonl(MOCK_IMM_DATA); } will_return(poll_cq, &orig_wc); /* run test */ struct ibv_wc wc = {0}; int ret = 0, num_entries_got = 0; /* alternate num_entries_got and NULL */ int use_num_entries_got = i % 2; if (use_num_entries_got) ret = rpma_cq_get_wc(cq, 1, &wc, &num_entries_got); else ret = rpma_cq_get_wc(cq, 1, &wc, NULL); /* verify the result */ assert_int_equal(ret, 0); assert_int_equal((memcmp(&orig_wc, &wc, sizeof(wc))), 0); if (use_num_entries_got) assert_int_equal(num_entries_got, 1); } } /* * get_wc__success_all_opcodes - handle ibv_poll_cq() successfully * with all possible values of opcode */ static void get_wc__success_all_opcodes(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; int n_values[3] = {2, 3, All_values}; for (int i = 0; i < 3; i++) { struct ibv_wc orig_wc[n_values[i]]; /* configure mock */ expect_value(poll_cq, cq, MOCK_IBV_CQ); expect_value(poll_cq, num_entries, n_values[i]); will_return(poll_cq, n_values[i]); for (int j = 0; j < n_values[i]; j++) { orig_wc[j].wr_id = (uint64_t)MOCK_OP_CONTEXT; orig_wc[j].status = IBV_WC_SUCCESS; orig_wc[j].opcode = opcodes[j]; orig_wc[j].byte_len = MOCK_LEN; if (flags[j] == IBV_WC_WITH_IMM) { /* * 'wc_flags' is of 'int' type * in older versions of libibverbs. */ orig_wc[j].wc_flags = (typeof(orig_wc[j].wc_flags))flags[j]; orig_wc[j].imm_data = htonl(MOCK_IMM_DATA); } } will_return(poll_cq, orig_wc); /* run test */ struct ibv_wc wc[n_values[i]]; memset(wc, 0, sizeof(wc)); int num_entries_got = 0; int ret = rpma_cq_get_wc(cq, n_values[i], wc, &num_entries_got); /* verify the result */ assert_int_equal(ret, 0); assert_int_equal((memcmp(orig_wc, wc, sizeof(wc))), 0); assert_int_equal(num_entries_got, n_values[i]); } } /* * group_setup_get -- prepare resources for all tests in the group */ static int group_setup_get_wc(void **unused) { /* set the poll_cq callback in mock of IBV CQ */ MOCK_VERBS->ops.poll_cq = poll_cq; return group_setup_common_cq(NULL); } static const struct CMUnitTest tests_get_wc[] = { /* rpma_cq_get_wc() unit tests */ cmocka_unit_test(get_wc__cq_NULL), cmocka_unit_test_setup_teardown(get_wc__num_entries_non_positive, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown(get_wc__wc_NULL, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown( get_wc__num_entries_2_num_entries_got_NULL, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown(get_wc__poll_cq_fail, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown(get_wc__poll_cq_no_data, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown(get_wc__poll_cq_more_data, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown(get_wc__success_each_opcode, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown(get_wc__success_all_opcodes, setup__cq_new, teardown__cq_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_wc, group_setup_get_wc, NULL); } rpma-1.3.0/tests/unit/cq/cq-new_delete.c000066400000000000000000000200661443364775400201030ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * cq-new_delete.c -- the rpma_cq_new/delete() unit tests * * APIs covered: * - rpma_cq_new() * - rpma_cq_delete() */ #include #include #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "mocks-rpma-conn_cfg.h" #include "cq-common.h" /* * new__create_comp_channel_ERRNO -- ibv_create_comp_channel() * fails with MOCK_ERRNO */ static void new__create_comp_channel_ERRNO(void **unused) { struct rpma_cq *cq = NULL; /* configure mocks */ will_return(ibv_create_comp_channel, NULL); will_return(ibv_create_comp_channel, MOCK_ERRNO); /* run test */ int ret = rpma_cq_new(MOCK_VERBS, MOCK_CQ_SIZE_DEFAULT, NULL, &cq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * new__create_cq_ERRNO -- ibv_create_cq() fails with MOCK_ERRNO */ static void new__create_cq_ERRNO(void **unused) { struct rpma_cq *cq = NULL; /* configure mocks */ will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(ibv_create_cq, cqe, MOCK_CQ_SIZE_DEFAULT); will_return(ibv_create_cq, NULL); will_return(ibv_create_cq, MOCK_ERRNO); will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_cq_new(MOCK_VERBS, MOCK_CQ_SIZE_DEFAULT, NULL, &cq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * new__create_cq_ERRNO_subsequent_ERRNO2 -- ibv_destroy_comp_channel() * fails with MOCK_ERRNO2 after ibv_create_cq() failed with MOCK_ERRNO */ static void new__create_cq_ERRNO_subsequent_ERRNO2(void **unused) { struct rpma_cq *cq = NULL; /* configure mocks */ will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(ibv_create_cq, cqe, MOCK_CQ_SIZE_DEFAULT); will_return(ibv_create_cq, NULL); will_return(ibv_create_cq, MOCK_ERRNO); will_return(ibv_destroy_comp_channel, MOCK_ERRNO2); /* run test */ int ret = rpma_cq_new(MOCK_VERBS, MOCK_CQ_SIZE_DEFAULT, NULL, &cq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * new__req_notify_cq_ERRNO -- ibv_req_notify_cq() fails with MOCK_ERRNO */ static void new__req_notify_cq_ERRNO(void **unused) { struct rpma_cq *cq = NULL; /* configure mocks */ will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(ibv_create_cq, cqe, MOCK_CQ_SIZE_DEFAULT); will_return(ibv_create_cq, MOCK_IBV_CQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_CQ); will_return(ibv_req_notify_cq_mock, MOCK_ERRNO); will_return(ibv_destroy_cq, MOCK_OK); will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_cq_new(MOCK_VERBS, MOCK_CQ_SIZE_DEFAULT, NULL, &cq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * new__req_notify_cq_ERRNO_subsequent_ERRNO2 -- ibv_req_notify_cq() * fails with MOCK_ERRNO whereas subsequent (ibv_destroy_cq(), * ibv_destroy_comp_channel()) fail with MOCK_ERRNO2 */ static void new__req_notify_cq_ERRNO_subsequent_ERRNO2(void **unused) { struct rpma_cq *cq = NULL; /* configure mocks */ will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(ibv_create_cq, cqe, MOCK_CQ_SIZE_DEFAULT); will_return(ibv_create_cq, MOCK_IBV_CQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_CQ); will_return(ibv_req_notify_cq_mock, MOCK_ERRNO); will_return(ibv_destroy_cq, MOCK_ERRNO2); will_return(ibv_destroy_comp_channel, MOCK_ERRNO2); /* run test */ int ret = rpma_cq_new(MOCK_VERBS, MOCK_CQ_SIZE_DEFAULT, NULL, &cq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * new__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **unused) { struct rpma_cq *cq = NULL; /* configure mocks */ will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(ibv_create_cq, cqe, MOCK_CQ_SIZE_DEFAULT); will_return(ibv_create_cq, MOCK_IBV_CQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_CQ); will_return(ibv_req_notify_cq_mock, MOCK_OK); will_return(__wrap__test_malloc, MOCK_ERRNO); will_return(ibv_destroy_cq, MOCK_OK); will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_cq_new(MOCK_VERBS, MOCK_CQ_SIZE_DEFAULT, NULL, &cq); /* verify the result */ assert_int_equal(ret, RPMA_E_NOMEM); } /* * new__malloc_ERRNO_subsequent_ERRNO2 -- malloc() fails with MOCK_ERRNO * whereas subsequent (ibv_destroy_cq(), ibv_destroy_comp_channel()) fail * with MOCK_ERRNO2 */ static void new__malloc_ERRNO_subsequent_ERRNO2(void **unused) { struct rpma_cq *cq = NULL; /* configure mocks */ will_return(ibv_create_comp_channel, MOCK_COMP_CHANNEL); expect_value(ibv_create_cq, cqe, MOCK_CQ_SIZE_DEFAULT); will_return(ibv_create_cq, MOCK_IBV_CQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_CQ); will_return(ibv_req_notify_cq_mock, MOCK_OK); will_return(__wrap__test_malloc, MOCK_ERRNO); will_return(ibv_destroy_cq, MOCK_ERRNO2); will_return(ibv_destroy_comp_channel, MOCK_ERRNO2); /* run test */ int ret = rpma_cq_new(MOCK_VERBS, MOCK_CQ_SIZE_DEFAULT, NULL, &cq); /* verify the result */ assert_int_equal(ret, RPMA_E_NOMEM); } /* * test_lifecycle - happy day scenario */ static void test_lifecycle(void **unused) { /* * the thing is done by setup__cq_new() and teardown__cq_delete() */ } /* * delete__cq_NULL - *cq_ptr NULL should cause quick exit */ static void delete__cq_NULL(void **unused) { struct rpma_cq *cq = NULL; /* run test */ int ret = rpma_cq_delete(&cq); /* verify the results */ assert_int_equal(ret, 0); } /* * delete__destroy_cq_ERRNO -- ibv_destroy_cq() fails with MOCK_ERRNO */ static void delete__destroy_cq_ERRNO(void **unused) { struct cq_test_state *cstate = NULL; /* WA for cmocka/issues#47 */ assert_int_equal(setup__cq_new((void **)&cstate), 0); struct rpma_cq *cq = cstate->cq; /* configure mocks */ will_return(ibv_destroy_cq, MOCK_ERRNO); will_return(ibv_destroy_comp_channel, MOCK_OK); /* run test */ int ret = rpma_cq_delete(&cq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * delete__destroy_cq_ERRNO_subsequent_ERRNO2 -- ibv_destroy_comp_channel() * fails with MOCK_ERRNO2 after ibv_destroy_cq() failed with MOCK_ERRNO */ static void delete__destroy_cq_ERRNO_subsequent_ERRNO2(void **unused) { struct cq_test_state *cstate = NULL; /* WA for cmocka/issues#47 */ assert_int_equal(setup__cq_new((void **)&cstate), 0); struct rpma_cq *cq = cstate->cq; /* configure mocks */ will_return(ibv_destroy_cq, MOCK_ERRNO); will_return(ibv_destroy_comp_channel, MOCK_ERRNO2); /* run test */ int ret = rpma_cq_delete(&cq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * delete__destroy_comp_channel_ERRNO -- ibv_destroy_comp_channel() * fails with MOCK_ERRNO */ static void delete__destroy_comp_channel_ERRNO(void **unused) { struct cq_test_state *cstate = NULL; /* WA for cmocka/issues#47 */ assert_int_equal(setup__cq_new((void **)&cstate), 0); struct rpma_cq *cq = cstate->cq; /* configure mocks */ will_return(ibv_destroy_cq, MOCK_OK); will_return(ibv_destroy_comp_channel, MOCK_ERRNO); /* run test */ int ret = rpma_cq_delete(&cq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } static const struct CMUnitTest tests_new_delete[] = { /* rpma_cq_new() unit tests */ cmocka_unit_test(new__create_comp_channel_ERRNO), cmocka_unit_test(new__create_cq_ERRNO), cmocka_unit_test(new__create_cq_ERRNO_subsequent_ERRNO2), cmocka_unit_test(new__req_notify_cq_ERRNO), cmocka_unit_test(new__req_notify_cq_ERRNO_subsequent_ERRNO2), cmocka_unit_test(new__malloc_ERRNO), cmocka_unit_test(new__malloc_ERRNO_subsequent_ERRNO2), /* rpma_cq_new()/delete() lifecycle */ cmocka_unit_test_setup_teardown(test_lifecycle, setup__cq_new, teardown__cq_delete), /* rpma_cq_delete() unit tests */ cmocka_unit_test(delete__cq_NULL), cmocka_unit_test(delete__destroy_cq_ERRNO), cmocka_unit_test(delete__destroy_cq_ERRNO_subsequent_ERRNO2), cmocka_unit_test(delete__destroy_comp_channel_ERRNO), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_new_delete, group_setup_common_cq, NULL); } rpma-1.3.0/tests/unit/cq/cq-wait.c000066400000000000000000000062101443364775400167270ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * cq-wait.c -- the rpma_cq_wait() unit tests * * API covered: * - rpma_cq_wait() */ #include "librpma.h" #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "cq-common.h" /* * wait__cq_NULL - cq NULL is invalid */ static void wait__cq_NULL(void **unused) { /* run test */ int ret = rpma_cq_wait(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * wait__E_SHARED_CHANNEL - completion event channel is shared */ static void wait__E_SHARED_CHANNEL(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* run test */ int ret = rpma_cq_wait(cq); /* verify the result */ assert_int_equal(ret, RPMA_E_SHARED_CHANNEL); } /* * wait__get_cq_event_ERRNO - ibv_get_cq_event() fails with MOCK_ERRNO */ static void wait__get_cq_event_ERRNO(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* configure mock */ expect_value(ibv_get_cq_event, channel, MOCK_COMP_CHANNEL); will_return(ibv_get_cq_event, MOCK_ERRNO); /* run test */ int ret = rpma_cq_wait(cq); /* verify the result */ assert_int_equal(ret, RPMA_E_NO_COMPLETION); } /* * wait__req_notify_cq_ERRNO - ibv_req_notify_cq() fails with MOCK_ERRNO */ static void wait__req_notify_cq_ERRNO(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* configure mocks */ expect_value(ibv_get_cq_event, channel, MOCK_COMP_CHANNEL); will_return(ibv_get_cq_event, MOCK_OK); will_return(ibv_get_cq_event, MOCK_IBV_CQ); expect_value(ibv_ack_cq_events, cq, MOCK_IBV_CQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_CQ); will_return(ibv_req_notify_cq_mock, MOCK_ERRNO); /* run test */ int ret = rpma_cq_wait(cq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * wait__success - happy day scenario */ static void wait__success(void **cq_ptr) { struct cq_test_state *cstate = *cq_ptr; struct rpma_cq *cq = cstate->cq; /* configure mocks */ expect_value(ibv_get_cq_event, channel, MOCK_COMP_CHANNEL); will_return(ibv_get_cq_event, MOCK_OK); will_return(ibv_get_cq_event, MOCK_IBV_CQ); expect_value(ibv_ack_cq_events, cq, MOCK_IBV_CQ); expect_value(ibv_req_notify_cq_mock, cq, MOCK_IBV_CQ); will_return(ibv_req_notify_cq_mock, MOCK_OK); /* run test */ int ret = rpma_cq_wait(cq); /* verify the result */ assert_int_equal(ret, MOCK_OK); } static const struct CMUnitTest tests_wait[] = { /* rpma_cq_wait() unit tests */ cmocka_unit_test(wait__cq_NULL), cmocka_unit_test_prestate_setup_teardown( wait__E_SHARED_CHANNEL, setup__cq_new, teardown__cq_delete, &CQ_with_channel), cmocka_unit_test_setup_teardown( wait__get_cq_event_ERRNO, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown( wait__req_notify_cq_ERRNO, setup__cq_new, teardown__cq_delete), cmocka_unit_test_setup_teardown( wait__success, setup__cq_new, teardown__cq_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_wait, group_setup_common_cq, NULL); } rpma-1.3.0/tests/unit/ep/000077500000000000000000000000001443364775400152205ustar00rootroot00000000000000rpma-1.3.0/tests/unit/ep/CMakeLists.txt000066400000000000000000000013541443364775400177630ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) function(add_test_ep name) set(src_name ep-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c ep-common.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-conn_cfg.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${LIBRPMA_SOURCE_DIR}/rpma_err.c ${LIBRPMA_SOURCE_DIR}/ep.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_ep(get_fd) add_test_ep(listen) add_test_ep(next_conn_req) rpma-1.3.0/tests/unit/ep/ep-common.c000066400000000000000000000203051443364775400172560ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * ep-common.c -- common part of the endpoint unit tests */ #include "librpma.h" #include "ep-common.h" #include "info.h" #include "cmocka_headers.h" #include "test-common.h" const struct rdma_cm_id Cmid_zero = {0}; const struct rdma_event_channel Evch_zero = {.fd = MOCK_FD}; /* * The following graph depicts the relationships between mocked function * calls: * * _create_evench_channel--->_create_id-->\ * \ * _info_new--------------------------------->_info_bind---->_listen * * malloc (may happen at any time) * * Additionally, this test assumes each successful creation of the resource will * be paired with destroying it before the end of the setup->test->teardown * sequence e.g.: * _create_event_channel -> _destroy_event_channel */ /* * Cmocka does not allow call expect_* from setup whereas check_* will be called * on teardown. So, function creating an object which is called during setup * cannot queue any expect_* regarding the function destroying the object * which will be called in the teardown. */ int Mock_ctrl_defer_destruction = MOCK_CTRL_NO_DEFER; /* * rpma_info_bind() function requires successful creation of two types of * objects so both of them have to be created before queuing any expect_* * against rpma_info_bind(). */ static struct rdma_cm_id *Mock_ctrl_info_bind_id = NULL; static struct rpma_info *Mock_ctrl_info_bind_info = NULL; static void expect_info_bind() { if (!Mock_ctrl_info_bind_id || !Mock_ctrl_info_bind_info) return; expect_value(rpma_info_bind_addr, id, Mock_ctrl_info_bind_id); expect_value(rpma_info_bind_addr, info, Mock_ctrl_info_bind_info); Mock_ctrl_info_bind_id = NULL; Mock_ctrl_info_bind_info = NULL; } static void expect_info_bind_info(struct rpma_info *info) { Mock_ctrl_info_bind_info = info; expect_info_bind(); } static void expect_info_bind_id(struct rdma_cm_id *id) { Mock_ctrl_info_bind_id = id; expect_info_bind(); } /* mocks */ /* * rdma_create_event_channel -- rdma_create_event_channel() mock */ struct rdma_event_channel * rdma_create_event_channel(void) { struct rdma_event_channel *evch = mock_type(struct rdma_event_channel *); if (evch == NULL) { errno = mock_type(int); return NULL; } /* queue expects */ expect_value(rdma_create_id, channel, evch); if (!Mock_ctrl_defer_destruction) expect_value(rdma_destroy_event_channel, channel, evch); return evch; } /* * rdma_destroy_event_channel -- rdma_destroy_event_channel() mock */ void rdma_destroy_event_channel(struct rdma_event_channel *channel) { check_expected_ptr(channel); } /* * rdma_create_id -- rdma_create_id() mock */ int rdma_create_id(struct rdma_event_channel *channel, struct rdma_cm_id **id_ptr, void *context, enum rdma_port_space ps) { check_expected_ptr(channel); assert_non_null(id_ptr); assert_null(context); assert_int_equal(ps, RDMA_PS_TCP); struct rdma_cm_id *id = mock_type(struct rdma_cm_id *); if (id == NULL) { errno = mock_type(int); return -1; } *id_ptr = id; /* queue expects */ expect_info_bind_id(id); if (!Mock_ctrl_defer_destruction) expect_value(rdma_destroy_id, id, id); return 0; } /* * rdma_destroy_id -- rdma_destroy_id() mock */ int rdma_destroy_id(struct rdma_cm_id *id) { check_expected_ptr(id); Mock_ctrl_info_bind_id = NULL; errno = mock_type(int); if (errno) return -1; return 0; } /* * rpma_info_new -- rpma_info_new() mock */ int rpma_info_new(const char *addr, const char *port, enum rpma_info_side side, struct rpma_info **info_ptr) { assert_string_equal(addr, MOCK_IP_ADDRESS); assert_string_equal(port, MOCK_PORT); assert_int_equal(side, RPMA_INFO_PASSIVE); assert_non_null(info_ptr); struct rpma_info *info = mock_type(struct rpma_info *); if (info == NULL) return mock_type(int); *info_ptr = info; /* queue expects */ expect_info_bind_info(info); if (!Mock_ctrl_defer_destruction) expect_value(rpma_info_delete, *info_ptr, info); return 0; } /* * rpma_info_delete -- rpma_info_delete() mock */ int rpma_info_delete(struct rpma_info **info_ptr) { assert_non_null(info_ptr); check_expected_ptr(*info_ptr); *info_ptr = NULL; Mock_ctrl_info_bind_info = NULL; /* if arg is valid this function cannot fail otherwise */ return 0; } /* * rpma_info_bind_addr -- rpma_info_bind_addr() mock * Note: CM ID is not modified. */ int rpma_info_bind_addr(const struct rpma_info *info, struct rdma_cm_id *id) { check_expected_ptr(info); check_expected_ptr(id); assert_ptr_equal(info, MOCK_INFO); /* XXX validate the errno handling */ errno = mock_type(int); if (errno) return RPMA_E_PROVIDER; expect_value(rdma_listen, id, id); return 0; } /* * rdma_listen -- rdma_listen() mock */ int rdma_listen(struct rdma_cm_id *id, int backlog) { check_expected_ptr(id); assert_int_equal(backlog, 0); errno = mock_type(int); if (errno) return -1; return 0; } /* * rdma_get_cm_event -- rdma_get_cm_event() mock */ int rdma_get_cm_event(struct rdma_event_channel *channel, struct rdma_cm_event **event_ptr) { check_expected_ptr(channel); assert_non_null(event_ptr); struct rdma_cm_event *event = mock_type(struct rdma_cm_event *); if (!event) { errno = mock_type(int); return -1; } *event_ptr = event; return 0; } /* * rpma_conn_req_new_from_cm_event -- rpma_conn_req_new_from_cm_event() mock */ int rpma_conn_req_new_from_cm_event(struct rpma_peer *peer, struct rdma_cm_event *event, const struct rpma_conn_cfg *cfg, struct rpma_conn_req **req_ptr) { check_expected_ptr(peer); check_expected_ptr(event); check_expected_ptr(cfg); assert_non_null(req_ptr); struct rpma_conn_req *req = mock_type(struct rpma_conn_req *); if (!req) return mock_type(int); *req_ptr = req; return 0; } /* * rpma_conn_req_delete -- rpma_conn_req_delete() mock */ int rpma_conn_req_delete(struct rpma_conn_req **req_ptr) { assert_non_null(req_ptr); check_expected_ptr(*req_ptr); *req_ptr = NULL; return 0; } /* * rdma_ack_cm_event -- rdma_ack_cm_event() mock */ int rdma_ack_cm_event(struct rdma_cm_event *event) { check_expected_ptr(event); return mock_type(int); } /* * rdma_event_str -- rdma_event_str() mock */ const char * rdma_event_str(enum rdma_cm_event_type event) { return ""; } /* * prestate_init -- initialize ep_listen prestate */ void prestate_init(struct ep_test_state *prestate, struct rpma_conn_cfg *cfg) { memset(prestate, 0, sizeof(struct ep_test_state)); prestate->cfg = cfg; } /* setups and teardowns */ /* * setup__ep_listen - prepare a valid rpma_ep object */ int setup__ep_listen(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; memset(&estate->cmid, 0, sizeof(struct rdma_cm_id)); estate->evch.fd = MOCK_FD; /* configure mocks: */ Mock_ctrl_defer_destruction = MOCK_CTRL_DEFER; will_return(rdma_create_event_channel, &estate->evch); will_return(rdma_create_id, &estate->cmid); will_return(rpma_info_new, MOCK_INFO); will_return(rpma_info_bind_addr, MOCK_OK); will_return(rdma_listen, MOCK_OK); will_return(__wrap__test_malloc, MOCK_OK); expect_value(rpma_info_delete, *info_ptr, MOCK_INFO); /* prepare an object */ int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, &estate->ep); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(estate->ep); assert_int_equal(memcmp(&estate->cmid, &Cmid_zero, sizeof(struct rdma_cm_id)), 0); assert_int_equal(memcmp(&estate->evch, &Evch_zero, sizeof(struct rdma_event_channel)), 0); /* restore default mock configuration */ Mock_ctrl_defer_destruction = MOCK_CTRL_NO_DEFER; return 0; } /* * teardown__ep_shutdown - delete the rpma_ep object */ int teardown__ep_shutdown(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; /* configure mocks: */ expect_value(rdma_destroy_id, id, &estate->cmid); will_return(rdma_destroy_id, MOCK_OK); expect_value(rdma_destroy_event_channel, channel, &estate->evch); /* delete the object */ int ret = rpma_ep_shutdown(&estate->ep); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(estate->ep); assert_int_equal(memcmp(&estate->cmid, &Cmid_zero, sizeof(estate->cmid)), 0); assert_int_equal(memcmp(&estate->evch, &Evch_zero, sizeof(estate->evch)), 0); return 0; } rpma-1.3.0/tests/unit/ep/ep-common.h000066400000000000000000000016611443364775400172670ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * ep-common.h -- header of the common part of the endpoint unit tests */ #ifndef EP_COMMON_H #define EP_COMMON_H 1 #include #define MOCK_CONN_REQ (struct rpma_conn_req *)0xCFEF #define MOCK_FD 0x00FD /* mock control entities */ #define MOCK_CTRL_DEFER 1 #define MOCK_CTRL_NO_DEFER 0 extern const struct rdma_cm_id Cmid_zero; extern const struct rdma_event_channel Evch_zero; extern int Mock_ctrl_defer_destruction; int setup__ep_listen(void **estate_ptr); int teardown__ep_shutdown(void **estate_ptr); /* * All the resources used between setup__ep_listen and teardown__ep_shutdown. */ struct ep_test_state { struct rdma_event_channel evch; struct rdma_cm_id cmid; struct rpma_ep *ep; struct rpma_conn_cfg *cfg; }; void prestate_init(struct ep_test_state *prestate, struct rpma_conn_cfg *cfg); #endif /* EP_COMMON_H */ rpma-1.3.0/tests/unit/ep/ep-get_fd.c000066400000000000000000000037001443364775400172160ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ep-get_fd.c -- the endpoint unit tests * * API covered: * - rpma_ep_get_fd() */ #include "librpma.h" #include "ep-common.h" #include "cmocka_headers.h" #include "test-common.h" /* * get_fd__ep_NULL -- ep NULL is invalid */ static void get_fd__ep_NULL(void **unused) { /* run test */ int fd = 0; int ret = rpma_ep_get_fd(NULL, &fd); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); assert_int_equal(fd, 0); } /* * get_fd__fd_NULL - fd NULL is invalid */ static void get_fd__fd_NULL(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; /* run test */ int ret = rpma_ep_get_fd(estate->ep, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_fd__ep_fd_NULL -- ep and fd NULL are invalid */ static void get_fd__ep_fd_NULL(void **unused) { /* run test */ int ret = rpma_ep_get_fd(NULL, NULL); /* verify the results */ assert_ptr_equal(ret, RPMA_E_INVAL); } /* * get_fd__success - happy day scenario */ static void get_fd__success(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; /* run test */ int fd = 0; int ret = rpma_ep_get_fd(estate->ep, &fd); /* verify the results */ assert_ptr_equal(ret, MOCK_OK); assert_ptr_equal(fd, MOCK_FD); } int main(int argc, char *argv[]) { /* prepare prestates */ struct ep_test_state prestate_conn_cfg_default; prestate_init(&prestate_conn_cfg_default, NULL); const struct CMUnitTest tests[] = { /* rpma_ep_get_fd() unit tests */ cmocka_unit_test(get_fd__ep_NULL), cmocka_unit_test_prestate_setup_teardown( get_fd__fd_NULL, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), cmocka_unit_test(get_fd__ep_fd_NULL), cmocka_unit_test_prestate_setup_teardown( get_fd__success, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/ep/ep-listen.c000066400000000000000000000225201443364775400172650ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * ep-listen.c -- the endpoint unit tests * * APIs covered: * - rpma_ep_listen() * - rpma_ep_shutdown() */ #include "librpma.h" #include "ep-common.h" #include "cmocka_headers.h" #include "test-common.h" static struct ep_test_state prestate_conn_cfg_default; /* * listen__peer_NULL - NULL peer is invalid */ static void listen__peer_NULL(void **unused) { /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(NULL, MOCK_IP_ADDRESS, MOCK_PORT, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(ep); } /* * listen__addr_NULL - NULL addr is invalid */ static void listen__addr_NULL(void **unused) { /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(MOCK_PEER, NULL, MOCK_PORT, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(ep); } /* * listen__port_NULL - NULL port is invalid */ static void listen__port_NULL(void **unused) { /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, NULL, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(ep); } /* * listen__ep_ptr_NULL - NULL ep_ptr is invalid */ static void listen__ep_ptr_NULL(void **unused) { /* run test */ int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * listen__peer_addr_port_ep_ptr_NULL - peer, addr, port * and ep_ptr == NULL are invalid */ static void listen__peer_addr_port_ep_ptr_NULL(void **unused) { /* run test */ int ret = rpma_ep_listen(NULL, NULL, NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * listen__create_evch_ERRNO - rdma_create_event_channel() fails * with MOCK_ERRNO */ static void listen__create_evch_ERRNO(void **unused) { /* * configure mocks for: * - constructing: */ will_return(rdma_create_event_channel, NULL); will_return(rdma_create_event_channel, MOCK_ERRNO); /* - things which may happen: */ will_return_maybe(rpma_info_new, MOCK_INFO); will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ep); } /* * listen__create_id_ERRNO - rdma_create_id() fails with MOCK_ERRNO */ static void listen__create_id_ERRNO(void **unused) { /* * configure mocks: * - constructing */ struct rdma_event_channel evch; will_return(rdma_create_event_channel, &evch); will_return(rdma_create_id, NULL); will_return(rdma_create_id, MOCK_ERRNO); /* - things which may happen: */ will_return_maybe(rpma_info_new, MOCK_INFO); will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ep); } /* * listen__info_new_E_NOMEM - rpma_info_new() returns RPMA_E_NOMEM */ static void listen__info_new_E_NOMEM(void **unused) { /* * configure mocks for: * - constructing */ will_return(rpma_info_new, NULL); will_return(rpma_info_new, RPMA_E_NOMEM); /* - things which may happen: */ struct rdma_event_channel evch; will_return_maybe(rdma_create_event_channel, &evch); struct rdma_cm_id id; will_return_maybe(rdma_create_id, &id); will_return_maybe(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(ep); } /* * listen__info_bind_addr_E_PROVIDER - rpma_info_bind_addr() returns * RPMA_E_PROVIDER */ static void listen__info_bind_addr_E_PROVIDER(void **unused) { /* * configure mocks for: * - constructing */ struct rdma_event_channel evch; will_return(rdma_create_event_channel, &evch); struct rdma_cm_id id; will_return(rdma_create_id, &id); will_return(rpma_info_new, MOCK_INFO); will_return(rpma_info_bind_addr, MOCK_ERRNO); /* - deconstructing */ will_return(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ep); } /* * listen__listen_ERRNO - rdma_listen() fails with MOCK_ERRNO */ static void listen__listen_ERRNO(void **unused) { /* * configure mocks for: * - constructing */ struct rdma_event_channel evch; will_return(rdma_create_event_channel, &evch); struct rdma_cm_id id; will_return(rdma_create_id, &id); will_return(rpma_info_new, MOCK_INFO); will_return(rpma_info_bind_addr, MOCK_OK); will_return(rdma_listen, MOCK_ERRNO); /* - deconstructing */ will_return(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ep); } /* * listen__malloc_ERRNO - malloc() fails with MOCK_ERRNO */ static void listen__malloc_ERRNO(void **unused) { /* * configure mocks for: * - constructing */ will_return(__wrap__test_malloc, MOCK_ERRNO); /* - things which may happen: */ struct rdma_event_channel evch; will_return_maybe(rdma_create_event_channel, &evch); struct rdma_cm_id id; will_return_maybe(rdma_create_id, &id); will_return_maybe(rpma_info_new, MOCK_INFO); will_return_maybe(rpma_info_bind_addr, MOCK_OK); will_return_maybe(rdma_listen, MOCK_OK); will_return_maybe(rdma_destroy_id, MOCK_OK); /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(ep); } /* * listen__malloc_ERRNO_destroy_id_ERRNO2 - malloc() fails with MOCK_ERRNO * rdma_destroy_id() fails with MOCK_ERRNO2 consequently during the handling * of the first error * * Note: test assumes rdma_create_id() is called before the first failing * malloc() */ static void listen__malloc_ERRNO_destroy_id_ERRNO2(void **unused) { /* * configure mocks for: * - constructing */ struct rdma_event_channel evch; will_return(rdma_create_event_channel, &evch); struct rdma_cm_id id; will_return(rdma_create_id, &id); will_return(rpma_info_new, MOCK_INFO); will_return(rpma_info_bind_addr, MOCK_OK); will_return(rdma_listen, MOCK_OK); will_return(__wrap__test_malloc, MOCK_ERRNO); /* first error */ /* - deconstructing */ will_return(rdma_destroy_id, MOCK_ERRNO2); /* second error */ /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_listen(MOCK_PEER, MOCK_IP_ADDRESS, MOCK_PORT, &ep); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(ep); } /* * shutdown__ep_ptr_NULL - NULL ep_ptr is invalid */ static void shutdown__ep_ptr_NULL(void **unused) { /* run test */ int ret = rpma_ep_shutdown(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * shutdown__ep_NULL - NULL ep is valid */ static void shutdown__ep_NULL(void **unused) { /* run test */ struct rpma_ep *ep = NULL; int ret = rpma_ep_shutdown(&ep); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(ep); } /* * ep__lifecycle - happy day scenario */ static void ep__lifecycle(void **unused) { /* * The thing is done by setup__ep_listen() * and teardown__ep_shutdown(). */ } /* * shutdown__destroy_id_ERRNO -- rdma_destroy_id() fails with MOCK_ERRNO */ static void shutdown__destroy_id_ERRNO(void **unused) { struct ep_test_state *estate = &prestate_conn_cfg_default; setup__ep_listen((void **)&estate); /* configure mocks */ expect_value(rdma_destroy_id, id, &estate->cmid); will_return(rdma_destroy_id, MOCK_ERRNO); expect_value(rdma_destroy_event_channel, channel, &estate->evch); /* run test */ int ret = rpma_ep_shutdown(&estate->ep); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(estate->ep); assert_int_equal(memcmp(&estate->cmid, &Cmid_zero, sizeof(estate->cmid)), 0); assert_int_equal(memcmp(&estate->evch, &Evch_zero, sizeof(estate->evch)), 0); } int main(int argc, char *argv[]) { /* prepare prestates */ prestate_init(&prestate_conn_cfg_default, NULL); const struct CMUnitTest tests[] = { /* rpma_ep_listen() unit tests */ cmocka_unit_test(listen__peer_NULL), cmocka_unit_test(listen__addr_NULL), cmocka_unit_test(listen__port_NULL), cmocka_unit_test(listen__ep_ptr_NULL), cmocka_unit_test(listen__peer_addr_port_ep_ptr_NULL), cmocka_unit_test(listen__create_evch_ERRNO), cmocka_unit_test(listen__create_id_ERRNO), cmocka_unit_test(listen__info_new_E_NOMEM), cmocka_unit_test(listen__info_bind_addr_E_PROVIDER), cmocka_unit_test(listen__listen_ERRNO), cmocka_unit_test(listen__malloc_ERRNO), cmocka_unit_test(listen__malloc_ERRNO_destroy_id_ERRNO2), /* rpma_ep_listen()/_shutdown() lifecycle */ cmocka_unit_test_prestate_setup_teardown(ep__lifecycle, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), /* rpma_ep_shutdown() unit tests */ cmocka_unit_test(shutdown__ep_ptr_NULL), cmocka_unit_test(shutdown__ep_NULL), cmocka_unit_test(shutdown__destroy_id_ERRNO), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/ep/ep-next_conn_req.c000066400000000000000000000232301443364775400206300ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * ep-next_conn_req.c -- the endpoint unit tests * * API covered: * - rpma_ep_next_conn_req() */ #include "librpma.h" #include "ep-common.h" #include "cmocka_headers.h" #include "mocks-rpma-conn_cfg.h" #include "test-common.h" /* * next_conn_req__ep_NULL - NULL ep is invalid */ static void next_conn_req__ep_NULL(void **unused) { /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_ep_next_conn_req(NULL, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * next_conn_req__req_NULL - NULL req is invalid */ static void next_conn_req__req_NULL(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; /* run test */ int ret = rpma_ep_next_conn_req(estate->ep, NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * next_conn_req__ep_NULL_req_NULL - NULL ep and NULL req are invalid */ static void next_conn_req__ep_NULL_req_NULL(void **unused) { /* run test */ int ret = rpma_ep_next_conn_req(NULL, NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * next_conn_req__get_cm_event_ERRNO - * rdma_get_cm_event() fails with MOCK_ERRNO */ static void next_conn_req__get_cm_event_ERRNO(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; expect_value(rdma_get_cm_event, channel, &estate->evch); will_return(rdma_get_cm_event, NULL); will_return(rdma_get_cm_event, MOCK_ERRNO); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_ep_next_conn_req(estate->ep, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * next_conn_req__get_cm_event_ENODATA - * rdma_get_cm_event() fails with ENODATA */ static void next_conn_req__get_cm_event_ENODATA(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; expect_value(rdma_get_cm_event, channel, &estate->evch); will_return(rdma_get_cm_event, NULL); will_return(rdma_get_cm_event, ENODATA); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_ep_next_conn_req(estate->ep, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_NO_EVENT); assert_null(req); } /* * next_conn_req__event_REJECTED - * RDMA_CM_EVENT_REJECTED is unexpected */ static void next_conn_req__event_REJECTED(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; expect_value(rdma_get_cm_event, channel, &estate->evch); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_REJECTED; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_ep_next_conn_req(estate->ep, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * next_conn_req__event_REJECTED_ack_ERRNO - * rdma_ack_cm_event() fails with MOCK_ERRNO after obtaining * an RDMA_CM_EVENT_REJECTED event (!= RDMA_CM_EVENT_CONNECT_REQUEST) */ static void next_conn_req__event_REJECTED_ack_ERRNO(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; expect_value(rdma_get_cm_event, channel, &estate->evch); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_REJECTED; will_return(rdma_get_cm_event, &event); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_ERRNO); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_ep_next_conn_req(estate->ep, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(req); } /* * next_conn_req__from_cm_event_E_NOMEM - * rpma_conn_req_new_from_cm_event() returns RPMA_E_NOMEM */ static void next_conn_req__from_cm_event_E_NOMEM(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; expect_value(rdma_get_cm_event, channel, &estate->evch); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_CONNECT_REQUEST; will_return(rdma_get_cm_event, &event); expect_value(rpma_conn_req_new_from_cm_event, peer, MOCK_PEER); expect_value(rpma_conn_req_new_from_cm_event, event, &event); expect_value(rpma_conn_req_new_from_cm_event, cfg, MOCK_CONN_CFG_DEFAULT); will_return(rpma_conn_req_new_from_cm_event, NULL); will_return(rpma_conn_req_new_from_cm_event, RPMA_E_NOMEM); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_ep_next_conn_req(estate->ep, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(req); } /* * next_conn_req__from_cm_event_E_NOMEM_ack_ERRNO - * rpma_conn_req_new_from_cm_event() returns RPMA_E_NOMEM * and rdma_ack_cm_event() fails with MOCK_ERRNO */ static void next_conn_req__from_cm_event_E_NOMEM_ack_ERRNO(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; expect_value(rdma_get_cm_event, channel, &estate->evch); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_CONNECT_REQUEST; will_return(rdma_get_cm_event, &event); expect_value(rpma_conn_req_new_from_cm_event, peer, MOCK_PEER); expect_value(rpma_conn_req_new_from_cm_event, event, &event); expect_value(rpma_conn_req_new_from_cm_event, cfg, MOCK_CONN_CFG_DEFAULT); will_return(rpma_conn_req_new_from_cm_event, NULL); will_return(rpma_conn_req_new_from_cm_event, RPMA_E_NOMEM); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_ERRNO); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_ep_next_conn_req(estate->ep, NULL, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(req); } /* * next_conn_req__rdma_ack_cm_event_ERRNO - rdma_ack_cm_event() fails with MOCK_ERRNO */ static void next_conn_req__rdma_ack_cm_event_ERRNO(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; expect_value(rdma_get_cm_event, channel, &estate->evch); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_CONNECT_REQUEST; will_return(rdma_get_cm_event, &event); expect_value(rpma_conn_req_new_from_cm_event, peer, MOCK_PEER); expect_value(rpma_conn_req_new_from_cm_event, event, &event); expect_value(rpma_conn_req_new_from_cm_event, cfg, (estate->cfg == NULL ? MOCK_CONN_CFG_DEFAULT : estate->cfg)); will_return(rpma_conn_req_new_from_cm_event, MOCK_CONN_REQ); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_ERRNO); expect_value(rpma_conn_req_delete, *req_ptr, MOCK_CONN_REQ); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_ep_next_conn_req(estate->ep, estate->cfg, &req); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(req); } /* * next_conn_req__success - happy day scenario */ static void next_conn_req__success(void **estate_ptr) { struct ep_test_state *estate = *estate_ptr; expect_value(rdma_get_cm_event, channel, &estate->evch); struct rdma_cm_event event; event.event = RDMA_CM_EVENT_CONNECT_REQUEST; will_return(rdma_get_cm_event, &event); expect_value(rpma_conn_req_new_from_cm_event, peer, MOCK_PEER); expect_value(rpma_conn_req_new_from_cm_event, event, &event); expect_value(rpma_conn_req_new_from_cm_event, cfg, (estate->cfg == NULL ? MOCK_CONN_CFG_DEFAULT : estate->cfg)); will_return(rpma_conn_req_new_from_cm_event, MOCK_CONN_REQ); expect_value(rdma_ack_cm_event, event, &event); will_return(rdma_ack_cm_event, MOCK_OK); /* run test */ struct rpma_conn_req *req = NULL; int ret = rpma_ep_next_conn_req(estate->ep, estate->cfg, &req); /* verify the results */ assert_ptr_equal(req, MOCK_CONN_REQ); assert_int_equal(ret, 0); } int main(int argc, char *argv[]) { /* prepare prestates */ struct ep_test_state prestate_conn_cfg_default; prestate_init(&prestate_conn_cfg_default, NULL); struct ep_test_state prestate_conn_cfg_custom; prestate_init(&prestate_conn_cfg_default, MOCK_CONN_CFG_CUSTOM); const struct CMUnitTest tests[] = { /* rpma_ep_next_conn_req() unit tests */ cmocka_unit_test(next_conn_req__ep_NULL), cmocka_unit_test_prestate_setup_teardown( next_conn_req__req_NULL, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), cmocka_unit_test(next_conn_req__ep_NULL_req_NULL), cmocka_unit_test_prestate_setup_teardown( next_conn_req__get_cm_event_ERRNO, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), cmocka_unit_test_prestate_setup_teardown( next_conn_req__get_cm_event_ENODATA, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), cmocka_unit_test_prestate_setup_teardown( next_conn_req__event_REJECTED, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), cmocka_unit_test_prestate_setup_teardown( next_conn_req__event_REJECTED_ack_ERRNO, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), cmocka_unit_test_prestate_setup_teardown( next_conn_req__from_cm_event_E_NOMEM, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), cmocka_unit_test_prestate_setup_teardown( next_conn_req__from_cm_event_E_NOMEM_ack_ERRNO, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), cmocka_unit_test_prestate_setup_teardown( next_conn_req__rdma_ack_cm_event_ERRNO, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default), {"next_conn_req__success_conn_cfg_default", next_conn_req__success, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_default}, {"next_conn_req__success_conn_cfg_custom", next_conn_req__success, setup__ep_listen, teardown__ep_shutdown, &prestate_conn_cfg_custom}, }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/error/000077500000000000000000000000001443364775400157455ustar00rootroot00000000000000rpma-1.3.0/tests/unit/error/CMakeLists.txt000066400000000000000000000004011443364775400205000ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) build_test_src(UNIT NAME ut-error SRCS error.c ${LIBRPMA_SOURCE_DIR}/rpma_err.c) add_test_generic(NAME ut-error TRACERS none) rpma-1.3.0/tests/unit/error/error.c000066400000000000000000000057111443364775400172460ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * error.c -- unit tests for error-handling rpma-err module * * APIs covered: * - rpma_err_2str() */ #include "cmocka_headers.h" #include "librpma.h" /* * err_2str__SUCCESS - sanity test for rpma_err_2str() */ static void err_2str__SUCCESS(void **unused) { assert_string_equal(rpma_err_2str(0), "Success"); } /* * err_2str__E_NOSUPP - sanity test for rpma_err_2str() */ static void err_2str__E_NOSUPP(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_NOSUPP), "Not supported"); } /* * err_2str__E_PROVIDER - sanity test for rpma_err_2str() */ static void err_2str__E_PROVIDER(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_PROVIDER), "Provider error occurred"); } /* * err_2str__E_NOMEM - sanity test for rpma_err_2str() */ static void err_2str__E_NOMEM(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_NOMEM), "Out of memory"); } /* * err_2str__E_INVAL - sanity test for rpma_err_2str() */ static void err_2str__E_INVAL(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_INVAL), "Invalid argument"); } /* * err_2str__E_NO_COMPLETION - sanity test for rpma_err_2str() */ static void err_2str__E_NO_COMPLETION(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_NO_COMPLETION), "No next completion available"); } /* * err_2str__E_NO_NEXT - sanity test for rpma_err_2str() */ static void err_2str__E_NO_NEXT(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_NO_EVENT), "No next event available"); } /* * err_2str__E_AGAIN - sanity test for rpma_err_2str() */ static void err_2str__E_AGAIN(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_AGAIN), "Temporary error, try again"); } /* * err_2str__E_SHARED_CHANNEL - sanity test for rpma_err_2str() */ static void err_2str__E_SHARED_CHANNEL(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_SHARED_CHANNEL), "Completion channel is shared"); } /* * err_2str__E_NOT_SHARED_CHNL - sanity test for rpma_err_2str() */ static void err_2str__E_NOT_SHARED_CHNL(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_NOT_SHARED_CHNL), "Completion channel is not shared"); } /* * err_2str__E_UNKOWN - sanity test for rpma_err_2str() */ static void err_2str__E_UNKNOWN(void **unused) { assert_string_equal(rpma_err_2str(RPMA_E_UNKNOWN), "Unknown error"); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { cmocka_unit_test(err_2str__SUCCESS), cmocka_unit_test(err_2str__E_NOSUPP), cmocka_unit_test(err_2str__E_PROVIDER), cmocka_unit_test(err_2str__E_NOMEM), cmocka_unit_test(err_2str__E_INVAL), cmocka_unit_test(err_2str__E_NO_COMPLETION), cmocka_unit_test(err_2str__E_NO_NEXT), cmocka_unit_test(err_2str__E_AGAIN), cmocka_unit_test(err_2str__E_SHARED_CHANNEL), cmocka_unit_test(err_2str__E_NOT_SHARED_CHNL), cmocka_unit_test(err_2str__E_UNKNOWN), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/flush/000077500000000000000000000000001443364775400157355ustar00rootroot00000000000000rpma-1.3.0/tests/unit/flush/CMakeLists.txt000066400000000000000000000015741443364775400205040ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright (c) 2023 Fujitsu Limited # include(../../cmake/ctest_helpers.cmake) function(add_test_flush name) set(src_name flush-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c flush-common.c ${LIBRPMA_SOURCE_DIR}/rpma_err.c ${LIBRPMA_SOURCE_DIR}/flush.c ${TEST_UNIT_COMMON_DIR}/mocks-ibverbs.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-mr.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${TEST_UNIT_COMMON_DIR}/mocks-unistd.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc,--wrap=mmap,--wrap=munmap,--wrap=sysconf") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_flush(execute) add_test_flush(new) rpma-1.3.0/tests/unit/flush/flush-common.c000066400000000000000000000067101443364775400205140ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* Copyright (c) 2023 Fujitsu Limited */ /* * flush-common.c -- common part of unit tests of the flush module */ #include "cmocka_headers.h" #include "flush.h" #include "flush-common.h" #include "mocks-stdlib.h" #include "mocks-unistd.h" #include "test-common.h" /* * setup__apm_flush_new - prepare a valid rpma_flush object */ int setup__apm_flush_new(void **fstate_ptr) { static struct flush_test_state fstate = {0}; /* configure mocks */ will_return_always(__wrap__test_malloc, MOCK_OK); #ifdef NATIVE_FLUSH_SUPPORTED expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP); will_return(ibv_qp_to_qp_ex, NULL); #endif will_return(__wrap_sysconf, MOCK_OK); will_return(__wrap_mmap, MOCK_OK); will_return(__wrap_mmap, &fstate.allocated_raw); expect_value(rpma_mr_reg, peer, MOCK_PEER); expect_value(rpma_mr_reg, size, 8); expect_value(rpma_mr_reg, usage, RPMA_MR_USAGE_READ_DST); will_return(rpma_mr_reg, &fstate.allocated_raw.addr); will_return(rpma_mr_reg, MOCK_RPMA_MR_LOCAL); /* run test */ int ret = rpma_flush_new(MOCK_PEER, MOCK_QP, &fstate.flush); /* verify the results */ assert_int_equal(ret, 0); assert_non_null(fstate.flush); *fstate_ptr = &fstate; return 0; } /* * teardown__apm_flush_delete - delete the rpma_flush object */ int teardown__apm_flush_delete(void **fstate_ptr) { struct flush_test_state *fstate = *fstate_ptr; /* configure mock */ expect_value(rpma_mr_dereg, *mr_ptr, MOCK_RPMA_MR_LOCAL); will_return(rpma_mr_dereg, MOCK_OK); will_return(__wrap_munmap, &fstate->allocated_raw); will_return(__wrap_munmap, MOCK_OK); /* delete the object */ int ret = rpma_flush_delete(&fstate->flush); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(fstate->flush); return 0; } #ifdef NATIVE_FLUSH_SUPPORTED /* * setup__native_flush_new - prepare a valid rpma_flush object */ int setup__native_flush_new(void **fstate_ptr) { static struct flush_test_state fstate = {0}; /* configure mocks */ will_return_always(__wrap__test_malloc, MOCK_OK); expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP); will_return(ibv_qp_to_qp_ex, MOCK_QPX); /* run test */ int ret = rpma_flush_new(MOCK_PEER, MOCK_QP, &fstate.flush); /* verify the results */ assert_int_equal(ret, 0); assert_non_null(fstate.flush); *fstate_ptr = &fstate; return 0; } /* * teardown__native_flush_delete - delete the rpma_flush object */ int teardown__native_flush_delete(void **fstate_ptr) { struct flush_test_state *fstate = *fstate_ptr; /* delete the object */ int ret = rpma_flush_delete(&fstate->flush); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(fstate->flush); return 0; } #endif /* * group_setup_flush_common -- prepare resources for all tests in the group */ int group_setup_flush_common(void **unused) { #ifdef NATIVE_FLUSH_SUPPORTED /* configure global mocks */ /* * ibv_wr_start(), ibv_wr_flush() and ibv_wr_complete() are defined * as static inline functions in the included header , * so we cannot define them again. They are defined as: * { * return qp->wr_start(qp); * } * { * return qp->wr_flush(qp, rkey, remote_addr, len, type, level); * } * { * return qp->wr_complete(qp); * } * so we can set these three function pointers to our mock functions. */ Ibv_qp_ex.wr_start = ibv_wr_start_mock; Ibv_qp_ex.wr_flush = ibv_wr_flush; Ibv_qp_ex.wr_complete = ibv_wr_complete_mock; #endif return 0; } rpma-1.3.0/tests/unit/flush/flush-common.h000066400000000000000000000021541443364775400205170ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2021, Intel Corporation */ /* Copyright (c) 2023 Fujitsu Limited */ /* * flush-common.h -- header of the common part of unit tests * of the flush module */ #ifndef FLUSH_COMMON_H #define FLUSH_COMMON_H 1 #include "mocks-stdlib.h" #include "mocks-ibverbs.h" #define MOCK_RPMA_MR_REMOTE (struct rpma_mr_remote *)0xC412 #define MOCK_RPMA_MR_LOCAL (struct rpma_mr_local *)0xC411 #define MOCK_REMOTE_OFFSET (size_t)0xC414 #define MOCK_LEN (size_t)0xC415 #define MOCK_FLAGS (int)0xC416 #define MOCK_OP_CONTEXT (void *)0xC417 #define MOCK_RAW_LEN 8 /* * All the resources used between setup__{apm, native}_flush_new and * teardown__{apm, native}_flush_delete. */ struct flush_test_state { struct rpma_flush *flush; struct mmap_args allocated_raw; }; int setup__apm_flush_new(void **fstate_ptr); int teardown__apm_flush_delete(void **fstate_ptr); #ifdef NATIVE_FLUSH_SUPPORTED int setup__native_flush_new(void **fstate_ptr); int teardown__native_flush_delete(void **fstate_ptr); #endif int group_setup_flush_common(void **unused); #endif /* FLUSH_COMMON_H */ rpma-1.3.0/tests/unit/flush/flush-execute.c000066400000000000000000000052711443364775400206670ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2023 Fujitsu Limited */ /* * flush-apm_execute.c -- unit tests of the flush module * * API covered: * - rpma_flush_apm_execute * - rpma_flush_native_execute */ #include "cmocka_headers.h" #include "flush.h" #include "mocks-ibverbs.h" #include "mocks-unistd.h" #include "test-common.h" #include "flush-common.h" /* * apm_execute__success -- rpma_flush_apm_execute() success */ static void apm_execute__success(void **fstate_ptr) { /* configure mocks */ expect_value(rpma_mr_read, qp, MOCK_QP); expect_value(rpma_mr_read, dst, MOCK_RPMA_MR_LOCAL); expect_value(rpma_mr_read, dst_offset, 0); expect_value(rpma_mr_read, src, MOCK_RPMA_MR_REMOTE); expect_value(rpma_mr_read, src_offset, MOCK_REMOTE_OFFSET); expect_value(rpma_mr_read, len, MOCK_RAW_LEN); expect_value(rpma_mr_read, flags, MOCK_FLAGS); expect_value(rpma_mr_read, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_read, MOCK_OK); /* run test */ struct flush_test_state *fstate = *fstate_ptr; int ret = fstate->flush->func(MOCK_QP, fstate->flush, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_VISIBILITY, MOCK_FLAGS, MOCK_OP_CONTEXT); assert_int_equal(ret, MOCK_OK); } #ifdef NATIVE_FLUSH_SUPPORTED /* * native_execute__success -- rpma_flush_native_execute() success */ static void native_execute__success(void **fstate_ptr) { /* configure mocks */ expect_value(rpma_mr_flush, qp, MOCK_QP); expect_value(rpma_mr_flush, dst, MOCK_RPMA_MR_REMOTE); expect_value(rpma_mr_flush, dst_offset, MOCK_REMOTE_OFFSET); expect_value(rpma_mr_flush, len, MOCK_LEN); expect_value(rpma_mr_flush, type, RPMA_FLUSH_TYPE_VISIBILITY); expect_value(rpma_mr_flush, flags, MOCK_FLAGS); expect_value(rpma_mr_flush, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_flush, MOCK_OK); /* run test */ struct flush_test_state *fstate = *fstate_ptr; int ret = fstate->flush->func(MOCK_QP, fstate->flush, MOCK_RPMA_MR_REMOTE, MOCK_REMOTE_OFFSET, MOCK_LEN, RPMA_FLUSH_TYPE_VISIBILITY, MOCK_FLAGS, MOCK_OP_CONTEXT); assert_int_equal(ret, MOCK_OK); } #endif int main(int argc, char *argv[]) { enable_unistd_mocks(); const struct CMUnitTest tests[] = { /* rpma_flush_apm_execute() unit tests */ cmocka_unit_test_setup_teardown(apm_execute__success, setup__apm_flush_new, teardown__apm_flush_delete), #ifdef NATIVE_FLUSH_SUPPORTED /* rpma_flush_native_execute() unit tests */ cmocka_unit_test_setup_teardown(native_execute__success, setup__native_flush_new, teardown__native_flush_delete), #endif }; int ret = cmocka_run_group_tests(tests, group_setup_flush_common, NULL); disable_unistd_mocks(); return ret; } rpma-1.3.0/tests/unit/flush/flush-new.c000066400000000000000000000150411443364775400200120ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* Copyright (c) 2021-2023, Fujitsu Limited */ /* * flush-new.c -- unit tests of the flush module * * APIs covered: * - rpma_flush_new * - rpma_flush_delete */ #include "cmocka_headers.h" #include "flush.h" #include "flush-common.h" #include "mocks-stdlib.h" #include "mocks-unistd.h" #include "test-common.h" #include /* * new__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **unused) { /* configure mocks */ will_return(__wrap__test_malloc, MOCK_ERRNO); /* run test */ struct rpma_flush *flush = NULL; int ret = rpma_flush_new(MOCK_PEER, MOCK_QP, &flush); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(flush); } /* * new__apm_sysconf_ERRNO -- sysconf() fails with MOCK_ERRNO */ static void new__apm_sysconf_ERRNO(void **unused) { /* configure mocks */ will_return_always(__wrap__test_malloc, MOCK_OK); #ifdef NATIVE_FLUSH_SUPPORTED expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP); will_return(ibv_qp_to_qp_ex, NULL); #endif will_return(__wrap_sysconf, MOCK_ERRNO); /* run test */ struct rpma_flush *flush = NULL; int ret = rpma_flush_new(MOCK_PEER, MOCK_QP, &flush); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(flush); } /* * new__apm_mmap_MAP_FAILED -- mmap() fails with MAP_FAILED */ static void new__apm_mmap_MAP_FAILED(void **unused) { /* configure mocks */ will_return_always(__wrap__test_malloc, MOCK_OK); #ifdef NATIVE_FLUSH_SUPPORTED expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP); will_return(ibv_qp_to_qp_ex, NULL); #endif will_return(__wrap_sysconf, MOCK_OK); will_return(__wrap_mmap, MAP_FAILED); /* run test */ struct rpma_flush *flush = NULL; int ret = rpma_flush_new(MOCK_PEER, MOCK_QP, &flush); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(flush); } /* * new__apm_mr_reg_E_NOMEM_munmap_ERRNO -- munmap() fails with MOCK_ERRNO * after rpma_mr_reg() failed with RPMA_E_NOMEM */ static void new__apm_mr_reg_E_NOMEM_munmap_ERRNO(void **unused) { /* configure mocks */ will_return_always(__wrap__test_malloc, MOCK_OK); #ifdef NATIVE_FLUSH_SUPPORTED expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP); will_return(ibv_qp_to_qp_ex, NULL); #endif will_return(__wrap_sysconf, MOCK_OK); struct mmap_args allocated_raw = {0}; will_return(__wrap_mmap, MOCK_OK); will_return(__wrap_mmap, &allocated_raw); expect_value(rpma_mr_reg, peer, MOCK_PEER); expect_value(rpma_mr_reg, size, 8); expect_value(rpma_mr_reg, usage, RPMA_MR_USAGE_READ_DST); will_return(rpma_mr_reg, &allocated_raw.addr); will_return(rpma_mr_reg, NULL); will_return(rpma_mr_reg, RPMA_E_NOMEM); will_return(__wrap_munmap, &allocated_raw); will_return(__wrap_munmap, MOCK_ERRNO); /* run test */ struct rpma_flush *flush = NULL; int ret = rpma_flush_new(MOCK_PEER, MOCK_QP, &flush); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(flush); } /* * new__apm_malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void new__apm_malloc_ERRNO(void **unused) { /* configure mocks */ will_return(__wrap__test_malloc, MOCK_OK); #ifdef NATIVE_FLUSH_SUPPORTED expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP); will_return(ibv_qp_to_qp_ex, NULL); #endif will_return(__wrap_sysconf, MOCK_OK); struct mmap_args allocated_raw = {0}; will_return(__wrap_mmap, MOCK_OK); will_return(__wrap_mmap, &allocated_raw); expect_value(rpma_mr_reg, peer, MOCK_PEER); expect_value(rpma_mr_reg, size, 8); expect_value(rpma_mr_reg, usage, RPMA_MR_USAGE_READ_DST); will_return(rpma_mr_reg, &allocated_raw.addr); will_return(rpma_mr_reg, MOCK_RPMA_MR_LOCAL); will_return(__wrap__test_malloc, MOCK_ERRNO); will_return(rpma_mr_dereg, MOCK_OK); will_return(__wrap_munmap, &allocated_raw); will_return(__wrap_munmap, MOCK_OK); expect_value(rpma_mr_dereg, *mr_ptr, MOCK_RPMA_MR_LOCAL); /* run test */ struct rpma_flush *flush = NULL; int ret = rpma_flush_new(MOCK_PEER, MOCK_QP, &flush); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(flush); } /* * new__apm_success -- happy day scenario */ static void new__apm_success(void **unused) { /* * The thing is done by setup__apm_flush_new() * and teardown__apm_flush_delete(). */ } #ifdef NATIVE_FLUSH_SUPPORTED /* * new__native_success -- happy day scenario */ static void new__native_success(void **unused) { /* * The thing is done by setup__native_flush_new() * and teardown__native_flush_delete(). */ } #endif /* * delete__apm_dereg_ERRNO -- rpma_mr_dereg() fails with MOCK_ERRNO */ static void delete__apm_dereg_ERRNO(void **unused) { struct flush_test_state *fstate; setup__apm_flush_new((void **)&fstate); /* configure mocks */ expect_value(rpma_mr_dereg, *mr_ptr, MOCK_RPMA_MR_LOCAL); will_return(__wrap_munmap, &fstate->allocated_raw); will_return_maybe(__wrap_munmap, MOCK_OK); will_return(rpma_mr_dereg, RPMA_E_PROVIDER); will_return(rpma_mr_dereg, MOCK_ERRNO); /* delete the object */ int ret = rpma_flush_delete(&fstate->flush); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(fstate->flush); } /* * delete__apm_munmap_ERRNO -- munmap() fails with MOCK_ERRNO */ static void delete__apm_munmap_ERRNO(void **unused) { struct flush_test_state *fstate; setup__apm_flush_new((void **)&fstate); /* configure mocks */ expect_value(rpma_mr_dereg, *mr_ptr, MOCK_RPMA_MR_LOCAL); will_return_maybe(rpma_mr_dereg, MOCK_OK); will_return(__wrap_munmap, &fstate->allocated_raw); will_return(__wrap_munmap, MOCK_ERRNO); /* delete the object */ int ret = rpma_flush_delete(&fstate->flush); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(fstate->flush); } int main(int argc, char *argv[]) { enable_unistd_mocks(); const struct CMUnitTest tests[] = { /* rpma_flush_new() unit tests */ cmocka_unit_test(new__malloc_ERRNO), cmocka_unit_test(new__apm_sysconf_ERRNO), cmocka_unit_test(new__apm_mmap_MAP_FAILED), cmocka_unit_test(new__apm_mr_reg_E_NOMEM_munmap_ERRNO), cmocka_unit_test(new__apm_malloc_ERRNO), cmocka_unit_test_setup_teardown(new__apm_success, setup__apm_flush_new, teardown__apm_flush_delete), #ifdef NATIVE_FLUSH_SUPPORTED cmocka_unit_test_setup_teardown(new__native_success, setup__native_flush_new, teardown__native_flush_delete), #endif /* rpma_flush_delete() unit tests */ cmocka_unit_test(delete__apm_dereg_ERRNO), cmocka_unit_test(delete__apm_munmap_ERRNO), }; int ret = cmocka_run_group_tests(tests, group_setup_flush_common, NULL); disable_unistd_mocks(); return ret; } rpma-1.3.0/tests/unit/info/000077500000000000000000000000001443364775400155475ustar00rootroot00000000000000rpma-1.3.0/tests/unit/info/CMakeLists.txt000066400000000000000000000016541443364775400203150ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021, Fujitsu # include(../../cmake/ctest_helpers.cmake) function(add_test_info name) set(src_name info-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c info-common.c ${TEST_UNIT_COMMON_DIR}/mocks-ibverbs.c ${TEST_UNIT_COMMON_DIR}/mocks-rdma_cm.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${TEST_UNIT_COMMON_DIR}/mocks-string.c ${TEST_UNIT_COMMON_DIR}/mocks-netdb.c ${LIBRPMA_SOURCE_DIR}/rpma_err.c ${LIBRPMA_SOURCE_DIR}/info.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc,--wrap=strerror,--wrap=gai_strerror") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_info(bind_addr) add_test_info(new) add_test_info(resolve_addr) rpma-1.3.0/tests/unit/info/info-bind_addr.c000066400000000000000000000060651443364775400205610ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * info-bind_addr.c -- unit tests of the info module * * API covered: * - rpma_info_bind_addr() */ #include #include #include "cmocka_headers.h" #include "test-common.h" #include "conn_req.h" #include "info.h" #include "librpma.h" #include "info-common.h" #include "mocks-rdma_cm.h" #include "mocks-string.h" #include /* * bind_addr__id_NULL -- NULL id is invalid */ static void bind_addr__id_NULL(void **info_state_ptr) { struct info_state *istate = *info_state_ptr; /* run test */ int ret = rpma_info_bind_addr(istate->info, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * bind_addr__info_NULL -- NULL info is invalid */ static void bind_addr__info_NULL(void **unused) { /* run test */ struct rdma_cm_id cmid = {0}; int ret = rpma_info_bind_addr(NULL, &cmid); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); assert_int_equal(memcmp(&cmid, &Cmid_zero, sizeof(cmid)), 0); } /* * bind_addr__id_info_NULL -- NULL id and info are invalid */ static void bind_addr__id_info_NULL(void **unused) { /* run test */ int ret = rpma_info_bind_addr(NULL, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * bind_addr__bind_addr_ERRNO -- rpma_info_bind_addr() fails * with MOCK_ERRNO */ static void bind_addr__bind_addr_ERRNO(void **info_state_ptr) { struct info_state *istate = *info_state_ptr; /* configure mocks */ struct rdma_cm_id cmid = {0}; expect_value(rdma_bind_addr, id, &cmid); expect_value(rdma_bind_addr, addr, MOCK_SRC_ADDR); will_return(rdma_bind_addr, MOCK_ERRNO); expect_value(__wrap_strerror, errnum, MOCK_ERRNO); will_return(__wrap_strerror, MOCK_ERROR); /* run test */ int ret = rpma_info_bind_addr(istate->info, &cmid); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_int_equal(memcmp(&cmid, &Cmid_zero, sizeof(cmid)), 0); } /* * bind_addr__success -- happy day scenario */ static void bind_addr__success(void **info_state_ptr) { struct info_state *istate = *info_state_ptr; /* configure mocks */ struct rdma_cm_id cmid = {0}; expect_value(rdma_bind_addr, id, &cmid); expect_value(rdma_bind_addr, addr, MOCK_SRC_ADDR); will_return(rdma_bind_addr, MOCK_OK); /* run test */ int ret = rpma_info_bind_addr(istate->info, &cmid); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_int_equal(memcmp(&cmid, &Cmid_zero, sizeof(cmid)), 0); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* rpma_info_bind_addr() unit tests */ cmocka_unit_test_setup_teardown(bind_addr__id_NULL, setup__new_passive, teardown__delete), cmocka_unit_test(bind_addr__info_NULL), cmocka_unit_test(bind_addr__id_info_NULL), cmocka_unit_test_setup_teardown(bind_addr__bind_addr_ERRNO, setup__new_passive, teardown__delete), cmocka_unit_test_setup_teardown(bind_addr__success, setup__new_passive, teardown__delete), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/info/info-common.c000066400000000000000000000050631443364775400201400ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * info-common.c -- common part of unit tests of the info module */ #include #include #include "cmocka_headers.h" #include "conn_req.h" #include "info.h" #include "librpma.h" #include "info-common.h" #include "test-common.h" #include "mocks-rdma_cm.h" #include /* * setup__new_passive -- prepare a valid rpma_info object (passive side) */ int setup__new_passive(void **info_state_ptr) { /* * configure mocks for rpma_info_new(): * NOTE: it is not allowed to call rdma_freeaddrinfo() if * rdma_getaddrinfo() succeeded. */ static struct info_state istate; memset(&istate, 0, sizeof(istate)); istate.rai.ai_src_addr = MOCK_SRC_ADDR; istate.rai.ai_dst_addr = MOCK_DST_ADDR; struct rdma_addrinfo_args args = {MOCK_VALIDATE, &istate.rai}; will_return(rdma_getaddrinfo, &args); expect_value(rdma_getaddrinfo, hints->ai_flags, RAI_PASSIVE); will_return(__wrap__test_malloc, MOCK_OK); int ret = rpma_info_new(MOCK_IP_ADDRESS, MOCK_PORT, RPMA_INFO_PASSIVE, &istate.info); assert_int_equal(ret, 0); assert_non_null(istate.info); *info_state_ptr = &istate; return 0; } /* * setup__new_active -- prepare a valid rpma_info object (active side) */ int setup__new_active(void **info_state_ptr) { /* * configure mocks for rpma_info_new(): * NOTE: it is not allowed to call rdma_freeaddrinfo() if * rdma_getaddrinfo() succeeded. */ static struct info_state istate; memset(&istate, 0, sizeof(istate)); istate.rai.ai_src_addr = MOCK_SRC_ADDR; istate.rai.ai_dst_addr = MOCK_DST_ADDR; struct rdma_addrinfo_args args = {MOCK_VALIDATE, &istate.rai}; will_return(rdma_getaddrinfo, &args); expect_value(rdma_getaddrinfo, hints->ai_flags, 0); will_return(__wrap__test_malloc, MOCK_OK); int ret = rpma_info_new(MOCK_IP_ADDRESS, MOCK_PORT, RPMA_INFO_ACTIVE, &istate.info); assert_int_equal(ret, 0); assert_non_null(istate.info); *info_state_ptr = &istate; return 0; } /* * teardown__delete -- delete the rpma_info object * (either active or passive side) */ int teardown__delete(void **info_state_ptr) { struct info_state *istate = *info_state_ptr; /* * configure mocks for rdma_freeaddrinfo(): * NOTE: it is not allowed to call rdma_freeaddrinfo() nor malloc() in * rpma_info_delete(). */ struct rdma_addrinfo_args args = {MOCK_VALIDATE, &istate->rai}; will_return(rdma_freeaddrinfo, &args); /* teardown */ int ret = rpma_info_delete(&istate->info); assert_int_equal(ret, MOCK_OK); assert_null(istate->info); return 0; } rpma-1.3.0/tests/unit/info/info-common.h000066400000000000000000000014461443364775400201460ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * info-common.h -- header of the common part of unit tests of the info module */ #ifndef INFO_COMMON_H #define INFO_COMMON_H 1 #include #include "cmocka_headers.h" #include "conn_req.h" #include "info.h" #include "librpma.h" #include #define MOCK_SRC_ADDR (struct sockaddr *)0x0ADD #define MOCK_DST_ADDR (struct sockaddr *)0x0ADE /* * All the resources used between setup__new_* and teardown__delete. */ struct info_state { struct rdma_addrinfo rai; struct rpma_info *info; }; int setup__new_active(void **info_state_ptr); int setup__new_passive(void **info_state_ptr); int teardown__delete(void **info_state_ptr); #endif /* INFO_COMMON_H */ rpma-1.3.0/tests/unit/info/info-new.c000066400000000000000000000156001443364775400174370ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * info-new.c -- unit tests of the info module * * APIs covered: * - rpma_info_new() * - rpma_info_delete() */ #include #include "cmocka_headers.h" #include "test-common.h" #include "conn_req.h" #include "info.h" #include "librpma.h" #include "info-common.h" #include "mocks-rdma_cm.h" #include "mocks-string.h" #include "mocks-netdb.h" #include #include static int rets[] = {EAI_SYSTEM, -1, MOCK_EAI_ERRNO}; static int num_rets = sizeof(rets) / sizeof(rets[0]); /* * new__addr_NULL -- NULL addr is not valid */ static void new__addr_NULL(void **unused) { /* * NOTE: it is not allowed for info to allocate any resource before * validating arguments. */ /* run test */ struct rpma_info *info = NULL; int ret = rpma_info_new(NULL, "", RPMA_INFO_PASSIVE, &info); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(info); } /* * new__info_ptr_NULL -- NULL info_ptr is not valid */ static void new__info_ptr_NULL(void **unused) { /* * NOTE: it is not allowed for info to allocate any resource before * validating arguments. */ /* run test */ int ret = rpma_info_new("", "", RPMA_INFO_PASSIVE, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__addr_port_info_ptr_NULL -- NULL addr, NULL port * and NULL info_ptr are not valid */ static void new__addr_port_info_ptr_NULL(void **unused) { /* * NOTE: it is not allowed for info to allocate any resource before * validating arguments. */ /* run test */ int ret = rpma_info_new(NULL, NULL, RPMA_INFO_PASSIVE, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__getaddrinfo_ERRNO_ACTIVE -- rdma_getaddrinfo() fails with * MOCK_ERRNO when side == RPMA_INFO_ACTIVE */ static void new__getaddrinfo_ERRNO_ACTIVE(void **unused) { /* * configure mocks: * - NOTE: it is not allowed to call rdma_freeaddrinfo() if * rdma_getaddrinfo() has failed. */ struct rdma_addrinfo_args get_args = {MOCK_VALIDATE, NULL}; will_return_maybe(__wrap__test_malloc, MOCK_OK); for (int i = 0; i < num_rets; i++) { will_return(rdma_getaddrinfo, &get_args); expect_value(rdma_getaddrinfo, hints->ai_flags, 0); will_return(rdma_getaddrinfo, rets[i]); will_return(rdma_getaddrinfo, MOCK_ERRNO); if (rets[i] == -1 || rets[i] == EAI_SYSTEM) { expect_value(__wrap_strerror, errnum, MOCK_ERRNO); will_return(__wrap_strerror, MOCK_ERROR); } else { expect_value(__wrap_gai_strerror, errcode, MOCK_EAI_ERRNO); will_return(__wrap_gai_strerror, MOCK_EAI_ERROR); } /* run test */ struct rpma_info *info = NULL; int ret = rpma_info_new(MOCK_IP_ADDRESS, MOCK_PORT, RPMA_INFO_ACTIVE, &info); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(info); } } /* * new__getaddrinfo_ERRNO_PASSIVE -- rdma_getaddrinfo() fails with * MOCK_ERRNO when side == RPMA_INFO_PASSIVE */ static void new__getaddrinfo_ERRNO_PASSIVE(void **unused) { /* * configure mocks: * - NOTE: it is not allowed to call rdma_freeaddrinfo() if * rdma_getaddrinfo() has failed. */ struct rdma_addrinfo_args get_args = {MOCK_VALIDATE, NULL}; will_return_maybe(__wrap__test_malloc, MOCK_OK); for (int i = 0; i < num_rets; i++) { will_return(rdma_getaddrinfo, &get_args); expect_value(rdma_getaddrinfo, hints->ai_flags, RAI_PASSIVE); will_return(rdma_getaddrinfo, rets[i]); will_return(rdma_getaddrinfo, MOCK_ERRNO); if (rets[i] == -1 || rets[i] == EAI_SYSTEM) { expect_value(__wrap_strerror, errnum, MOCK_ERRNO); will_return(__wrap_strerror, MOCK_ERROR); } else { expect_value(__wrap_gai_strerror, errcode, MOCK_EAI_ERRNO); will_return(__wrap_gai_strerror, MOCK_EAI_ERROR); } /* run test */ struct rpma_info *info = NULL; int ret = rpma_info_new(MOCK_IP_ADDRESS, MOCK_PORT, RPMA_INFO_PASSIVE, &info); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(info); } } /* * new__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **unused) { /* configure mocks */ struct rdma_addrinfo rai = {0}; struct rdma_addrinfo_args args = {MOCK_PASSTHROUGH, &rai}; will_return_maybe(rdma_getaddrinfo, &args); will_return_maybe(rdma_freeaddrinfo, &args); will_return(__wrap__test_malloc, MOCK_ERRNO); /* run test */ struct rpma_info *info = NULL; int ret = rpma_info_new(MOCK_IP_ADDRESS, MOCK_PORT, RPMA_INFO_PASSIVE, &info); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(info); } /* * new__lifecycle -- happy day scenario */ static void new__lifecycle(void **unused) { /* * configure mocks for rpma_info_new(): * NOTE: it is not allowed to call rdma_freeaddrinfo() if * rdma_getaddrinfo() succeeded. */ struct rdma_addrinfo rai = {0}; struct rdma_addrinfo_args args = {MOCK_VALIDATE, &rai}; will_return(rdma_getaddrinfo, &args); expect_value(rdma_getaddrinfo, hints->ai_flags, RAI_PASSIVE); will_return(__wrap__test_malloc, MOCK_OK); /* run test - step 1 */ struct rpma_info *info = NULL; int ret = rpma_info_new(MOCK_IP_ADDRESS, MOCK_PORT, RPMA_INFO_PASSIVE, &info); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(info); /* * configure mocks for rpma_info_delete(): * NOTE: it is not allowed to call rdma_getaddrinfo() nor malloc() in * rpma_info_delete(). */ will_return(rdma_freeaddrinfo, &args); /* run test - step 2 */ ret = rpma_info_delete(&info); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(info); } /* * delete__info_ptr_NULL -- NULL info_ptr is not valid */ static void delete__info_ptr_NULL(void **unused) { /* * NOTE: it is not allowed for info to allocate any resource before * validating arguments. */ /* run test */ int ret = rpma_info_delete(NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * delete__null_info -- valid NULL info */ static void delete__null_info(void **unused) { /* * NOTE: it is not allowed for info to allocate any resource when * quick-exiting. */ /* run test */ struct rpma_info *info = NULL; int ret = rpma_info_delete(&info); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_null(info); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* rpma_info_new() unit tests */ cmocka_unit_test(new__addr_NULL), cmocka_unit_test(new__info_ptr_NULL), cmocka_unit_test(new__addr_port_info_ptr_NULL), cmocka_unit_test(new__getaddrinfo_ERRNO_ACTIVE), cmocka_unit_test(new__getaddrinfo_ERRNO_PASSIVE), cmocka_unit_test(new__malloc_ERRNO), /* rpma_info_delete() unit tests */ cmocka_unit_test(delete__info_ptr_NULL), cmocka_unit_test(delete__null_info), /* rpma_info_new()/_delete() lifecycle */ cmocka_unit_test(new__lifecycle), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/info/info-resolve_addr.c000066400000000000000000000046401443364775400213210ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * info-resolve_addr.c -- unit tests of the info module * * API covered: * - rpma_info_resolve_addr() */ #include #include #include "cmocka_headers.h" #include "test-common.h" #include "conn_req.h" #include "info.h" #include "librpma.h" #include "info-common.h" #include "mocks-rdma_cm.h" #include "mocks-string.h" #include /* * resolve_addr__resolve_addr_ERRNO -- rdma_resolve_addr() fails * with MOCK_ERRNO */ static void resolve_addr__resolve_addr_ERRNO(void **info_state_ptr) { struct info_state *istate = *info_state_ptr; /* configure mocks */ struct rdma_cm_id cmid = {0}; expect_value(rdma_resolve_addr, id, &cmid); expect_value(rdma_resolve_addr, src_addr, MOCK_SRC_ADDR); expect_value(rdma_resolve_addr, dst_addr, MOCK_DST_ADDR); expect_value(rdma_resolve_addr, timeout_ms, RPMA_DEFAULT_TIMEOUT_MS); will_return(rdma_resolve_addr, MOCK_ERRNO); expect_value(__wrap_strerror, errnum, MOCK_ERRNO); will_return(__wrap_strerror, MOCK_ERROR); /* run test */ int ret = rpma_info_resolve_addr(istate->info, &cmid, RPMA_DEFAULT_TIMEOUT_MS); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_int_equal(memcmp(&cmid, &Cmid_zero, sizeof(cmid)), 0); } /* * resolve_addr__success -- happy day scenario */ static void resolve_addr__success(void **info_state_ptr) { struct info_state *istate = *info_state_ptr; /* configure mocks */ struct rdma_cm_id cmid = {0}; expect_value(rdma_resolve_addr, id, &cmid); expect_value(rdma_resolve_addr, src_addr, MOCK_SRC_ADDR); expect_value(rdma_resolve_addr, dst_addr, MOCK_DST_ADDR); expect_value(rdma_resolve_addr, timeout_ms, RPMA_DEFAULT_TIMEOUT_MS); will_return(rdma_resolve_addr, MOCK_OK); /* run test */ int ret = rpma_info_resolve_addr(istate->info, &cmid, RPMA_DEFAULT_TIMEOUT_MS); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_int_equal(memcmp(&cmid, &Cmid_zero, sizeof(cmid)), 0); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* rpma_info_resolve_addr() unit tests */ cmocka_unit_test_setup_teardown( resolve_addr__resolve_addr_ERRNO, setup__new_active, teardown__delete), cmocka_unit_test_setup_teardown(resolve_addr__success, setup__new_active, teardown__delete), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/librpma_constructor/000077500000000000000000000000001443364775400207075ustar00rootroot00000000000000rpma-1.3.0/tests/unit/librpma_constructor/CMakeLists.txt000066400000000000000000000006411443364775400234500ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) build_test_src(UNIT NAME ut-librpma_constructor SRCS librpma_constructor.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${LIBRPMA_SOURCE_DIR}/librpma.c) target_compile_definitions(ut-librpma_constructor PRIVATE MOCK_CONSTRUCTOR) add_test_generic(NAME ut-librpma_constructor TRACERS none) rpma-1.3.0/tests/unit/librpma_constructor/librpma_constructor.c000066400000000000000000000013311443364775400251440ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * librpma_constructor.c -- library constructor test */ #include "cmocka_headers.h" void librpma_init(void); void librpma_fini(void); /* * init__success -- librpma_init() unit test */ static void init__success(void **unused) { expect_function_call(rpma_log_init); librpma_init(); } /* * fini__success -- librpma_fini() unit test */ static void fini__success(void **unused) { expect_function_call(rpma_log_fini); librpma_fini(); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { cmocka_unit_test(init__success), cmocka_unit_test(fini__success), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/log/000077500000000000000000000000001443364775400153755ustar00rootroot00000000000000rpma-1.3.0/tests/unit/log/CMakeLists.txt000066400000000000000000000013171443364775400201370ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) function(add_test_log name) if ("${ARGV1}" STREQUAL "DEBUG") set(test ut-log-${name}-DEBUG) else() set(test ut-log-${name}) endif() build_test_src(UNIT NAME ${test} SRCS ${name}.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log_default.c ${LIBRPMA_SOURCE_DIR}/log.c) target_compile_definitions(${test} PRIVATE RPMA_UNIT_TESTS) if ("${ARGV1}" STREQUAL "DEBUG") target_compile_definitions(${test} PRIVATE DEBUG) endif() add_test_generic(NAME ${test} TRACERS none) endfunction() add_test_log(init-fini) add_test_log(init-fini DEBUG) add_test_log(macros) add_test_log(threshold) rpma-1.3.0/tests/unit/log/init-fini.c000066400000000000000000000026201443364775400174270ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * init-fini.c -- rpma_log_init/_fini() unit tests */ #include "cmocka_headers.h" #include "log_internal.h" #include "log_default.h" /* * Default levels of the logging thresholds */ #ifdef DEBUG #define RPMA_LOG_THRESHOLD_DEFAULT RPMA_LOG_LEVEL_DEBUG #define RPMA_LOG_THRESHOLD_AUX_DEFAULT RPMA_LOG_LEVEL_WARNING #else #define RPMA_LOG_THRESHOLD_DEFAULT RPMA_LOG_LEVEL_WARNING #define RPMA_LOG_THRESHOLD_AUX_DEFAULT RPMA_LOG_DISABLED #endif /* * init_fini__lifecycle -- happy day scenario */ static void init_fini__lifecycle(void **unused) { /* verify the initial state of the module */ assert_null(Rpma_log_function); assert_int_equal(Rpma_log_threshold[RPMA_LOG_THRESHOLD], RPMA_LOG_THRESHOLD_DEFAULT); assert_int_equal(Rpma_log_threshold[RPMA_LOG_THRESHOLD_AUX], RPMA_LOG_THRESHOLD_AUX_DEFAULT); /* configure mocks, run test & verify the results */ expect_function_call(rpma_log_default_init); rpma_log_init(); assert_ptr_equal(Rpma_log_function, rpma_log_default_function); /* configure mocks, run test & verify the results */ expect_function_call(rpma_log_default_fini); rpma_log_fini(); assert_ptr_equal(Rpma_log_function, NULL); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { cmocka_unit_test(init_fini__lifecycle), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/log/macros.c000066400000000000000000000075061443364775400170350ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * macros.c -- RPMA_LOG_* macros unit tests */ #include "cmocka_headers.h" #include "log_internal.h" #include "log_default.h" #define MOCK_MESSAGE "Message" #define MOCK_OUTPUT 1024 #define MOCK_FILE_NAME __FILE__ /* * mock_log_function -- custom log function */ void mock_log_function(enum rpma_log_level level, const char *file_name, const int line_no, const char *function_name, const char *message_format, ...) { static char output[MOCK_OUTPUT]; check_expected(level); check_expected(file_name); check_expected(line_no); check_expected(function_name); va_list ap; va_start(ap, message_format); assert_true(vsnprintf(output, MOCK_OUTPUT, message_format, ap) > 0); va_end(ap); check_expected_ptr(output); } /* * setup_threshold -- set the primary threshold */ int setup_threshold(void **level_ptr) { enum rpma_log_level level = **(enum rpma_log_level **)level_ptr; while (RPMA_E_AGAIN == rpma_log_set_threshold( RPMA_LOG_THRESHOLD, level)) ; return 0; } /* * MOCK_CONFIGURE_LOG_FUNC -- configure the set of expects needed for * the mock_log_function function * */ #define MOCK_CONFIGURE_LOG_FUNC(l) \ expect_value(mock_log_function, level, (l)); \ expect_string(mock_log_function, file_name, MOCK_FILE_NAME); \ expect_value(mock_log_function, line_no, __LINE__ + 2); \ expect_string(mock_log_function, function_name, "log__all"); \ expect_string(mock_log_function, output, MOCK_MESSAGE "\n") \ /* * log__all -- happy day scenario */ static void log__all(void **level_ptr) { enum rpma_log_level primary = **(enum rpma_log_level **)level_ptr; for (enum rpma_log_level secondary = RPMA_LOG_DISABLED; secondary <= RPMA_LOG_LEVEL_DEBUG; ++secondary) { /* * The secondary threshold should not affect the macros * behaviour. */ while (RPMA_E_AGAIN == rpma_log_set_threshold( RPMA_LOG_THRESHOLD_AUX, secondary)) ; if (RPMA_LOG_LEVEL_NOTICE <= primary) { MOCK_CONFIGURE_LOG_FUNC(RPMA_LOG_LEVEL_NOTICE); } RPMA_LOG_NOTICE("%s", MOCK_MESSAGE); if (RPMA_LOG_LEVEL_WARNING <= primary) { MOCK_CONFIGURE_LOG_FUNC(RPMA_LOG_LEVEL_WARNING); } RPMA_LOG_WARNING("%s", MOCK_MESSAGE); if (RPMA_LOG_LEVEL_ERROR <= primary) { MOCK_CONFIGURE_LOG_FUNC(RPMA_LOG_LEVEL_ERROR); } RPMA_LOG_ERROR("%s", MOCK_MESSAGE); if (RPMA_LOG_LEVEL_FATAL <= primary) { MOCK_CONFIGURE_LOG_FUNC(RPMA_LOG_LEVEL_FATAL); } RPMA_LOG_FATAL("%s", MOCK_MESSAGE); /* RPMA_LOG_ALWAYS() has to always call rpma_log_default_function() */ expect_function_call(rpma_log_default_function); RPMA_LOG_ALWAYS("%s", MOCK_MESSAGE); } } int main(int argc, char *argv[]) { /* set a custom logging function */ while (RPMA_E_AGAIN == rpma_log_set_function( mock_log_function)) ; /* prestates */ enum rpma_log_level Level_disabled = RPMA_LOG_DISABLED; enum rpma_log_level Level_fatal = RPMA_LOG_LEVEL_FATAL; enum rpma_log_level Level_error = RPMA_LOG_LEVEL_ERROR; enum rpma_log_level Level_warning = RPMA_LOG_LEVEL_WARNING; enum rpma_log_level Level_notice = RPMA_LOG_LEVEL_NOTICE; enum rpma_log_level Level_info = RPMA_LOG_LEVEL_INFO; enum rpma_log_level Level_debug = RPMA_LOG_LEVEL_DEBUG; const struct CMUnitTest tests[] = { {"RPMA_LOG_DISABLED", log__all, setup_threshold, NULL, &Level_disabled}, {"RPMA_LOG_LEVEL_FATAL", log__all, setup_threshold, NULL, &Level_fatal}, {"RPMA_LOG_LEVEL_ERROR", log__all, setup_threshold, NULL, &Level_error}, {"RPMA_LOG_LEVEL_WARNING", log__all, setup_threshold, NULL, &Level_warning}, {"RPMA_LOG_LEVEL_NOTICE", log__all, setup_threshold, NULL, &Level_notice}, {"RPMA_LOG_LEVEL_INFO", log__all, setup_threshold, NULL, &Level_info}, {"RPMA_LOG_LEVEL_DEBUG", log__all, setup_threshold, NULL, &Level_debug}, }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/log/threshold.c000066400000000000000000000057741443364775400175520ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * threshold.c -- rpma_log_[get/set]_threshold unit tests */ #include #include "cmocka_headers.h" #include "log_internal.h" #include "log_default.h" #include "librpma.h" #define INVALID_THRESHOLD_MOCK ((enum rpma_log_threshold)(-1)) #define INVALID_LEVEL_MOCK (-2) /* * set_threshold__threshold_invalid -- use an invalid threshold */ void set_threshold__threshold_invalid(void **unused) { int ret = rpma_log_set_threshold(INVALID_THRESHOLD_MOCK, RPMA_LOG_DISABLED); assert_int_equal(ret, RPMA_E_INVAL); } /* * set_threshold__level_invalid -- use an invalid level */ void set_threshold__level_invalid(void **unused) { int ret = rpma_log_set_threshold(RPMA_LOG_THRESHOLD, INVALID_LEVEL_MOCK); assert_int_equal(ret, RPMA_E_INVAL); } /* * set_threshold__threshold_level_invalid -- use an invalid threshold and level */ void set_threshold__threshold_level_invalid(void **unused) { int ret = rpma_log_set_threshold(INVALID_THRESHOLD_MOCK, INVALID_LEVEL_MOCK); assert_int_equal(ret, RPMA_E_INVAL); } /* * get_threshold__threshold_invalid -- use an invalid threshold */ void get_threshold__threshold_invalid(void **unused) { enum rpma_log_level level; int ret = rpma_log_get_threshold(INVALID_THRESHOLD_MOCK, &level); assert_int_equal(ret, RPMA_E_INVAL); } /* * get_threshold__level_invalid -- NULL level is invalid */ void get_threshold__level_invalid(void **unused) { int ret = rpma_log_get_threshold(RPMA_LOG_THRESHOLD, NULL); assert_int_equal(ret, RPMA_E_INVAL); } /* * get_threshold__threshold_level_invalid -- use an invalid threshold and level */ void get_threshold__threshold_level_invalid(void **unused) { int ret = rpma_log_get_threshold(INVALID_THRESHOLD_MOCK, NULL); assert_int_equal(ret, RPMA_E_INVAL); } /* * threshold_lifecycle -- happy day scenario */ void threshold_lifecycle(void **unused) { enum rpma_log_level level; for (int i = RPMA_LOG_THRESHOLD; i <= RPMA_LOG_THRESHOLD_AUX; i++) { for (int j = RPMA_LOG_DISABLED; j <= RPMA_LOG_LEVEL_DEBUG; j++) { int ret; do { ret = rpma_log_set_threshold( (enum rpma_log_threshold)i, j); } while (ret == RPMA_E_AGAIN); assert_int_equal(ret, 0); ret = rpma_log_get_threshold( (enum rpma_log_threshold)i, &level); assert_int_equal(level, j); assert_int_equal(ret, 0); } } } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* rpma_log_set_threshold() unit tests */ cmocka_unit_test(set_threshold__threshold_invalid), cmocka_unit_test(set_threshold__level_invalid), cmocka_unit_test(set_threshold__threshold_level_invalid), /* rpma_log_get_threshold() unit tests */ cmocka_unit_test(get_threshold__threshold_invalid), cmocka_unit_test(get_threshold__level_invalid), cmocka_unit_test(get_threshold__threshold_level_invalid), /* rpma_log_[set/get]_threshold() success test */ cmocka_unit_test(threshold_lifecycle), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/log_default/000077500000000000000000000000001443364775400171015ustar00rootroot00000000000000rpma-1.3.0/tests/unit/log_default/CMakeLists.txt000066400000000000000000000017621443364775400216470ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) # # Undefine _FORTIFY_SOURCE in the Release build. # _FORTIFY_SOURCE replaces original '*printf' functions # and causes cmocka mocks of those functions to not work. # add_flag("-U_FORTIFY_SOURCE" RELEASE) function(build_log_default name) build_test_src(UNIT NAME ut-log_default-${name} SRCS ${name}.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-stdio.c ${TEST_UNIT_COMMON_DIR}/mocks-syslog.c ${TEST_UNIT_COMMON_DIR}/mocks-time.c ${TEST_UNIT_COMMON_DIR}/mocks-glibc.c ${LIBRPMA_SOURCE_DIR}/log_default.c) set_target_properties(ut-log_default-${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=vsnprintf,--wrap=snprintf,--wrap=fprintf,--wrap=clock_gettime,--wrap=localtime_r,--wrap=strftime,--wrap=syscall") add_test_generic(NAME ut-log_default-${name} TRACERS none) endfunction() build_log_default(init-fini) build_log_default(function) rpma-1.3.0/tests/unit/log_default/function.c000066400000000000000000000310421443364775400210720ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * function.c -- rpma_log_default_function() unit tests */ /* * These unit tests assume the following order of calls: * 1. In any order: * - vsnprintf - generating the message * - snprintf - generating the file info (if available) * 2. In any order: * - syslog * - writing to stderr (if level <= the secondary threshold level) * * Where 'writing to stderr' is: * 1. in the following order: * 1. clock_gettime * 2. localtime_r * 3. strftime * 4. snprintf * 2. fprintf (no matter if the time-related sequence will succeed) */ #include #include #include #include "cmocka_headers.h" #include "log_default.h" #include "log_internal.h" #include "mocks-stdio.h" #include "mocks-time.h" #include "mocks-glibc.h" #include "test-common.h" #define STR_HELPER(x) #x #define STR(x) STR_HELPER(x) #define MOCK_LOG_LEVEL RPMA_LOG_LEVEL_WARNING #define MOCK_FILE_NAME "foo_bar.c" #define MOCK_FILE_NAME_ABSOLUTE "/path/to/foo_bar.c" #define MOCK_LINE_NUMBER 199 #define MOCK_FUNCTION_NAME "foo_bar()" #define MOCK_MESSAGE "Message" #define MOCK_FILE_ERROR_STR "[file info error]: " /* * These two arrays should be the exact copy of the arrays you may find in * the log_default.c module. There is no another way of validating their * contents other than your cautiousness during editing these. */ static char const rpma_log_level_names[6][9] = { [RPMA_LOG_LEVEL_FATAL] = "*FATAL* ", [RPMA_LOG_LEVEL_ERROR] = "*ERROR* ", [RPMA_LOG_LEVEL_WARNING] = "*WARN* ", [RPMA_LOG_LEVEL_NOTICE] = "*NOTE* ", [RPMA_LOG_LEVEL_INFO] = "*INFO* ", [RPMA_LOG_LEVEL_DEBUG] = "*DEBUG* ", }; static const int rpma_log_level_syslog_severity[] = { [RPMA_LOG_LEVEL_FATAL] = LOG_CRIT, [RPMA_LOG_LEVEL_ERROR] = LOG_ERR, [RPMA_LOG_LEVEL_WARNING] = LOG_WARNING, [RPMA_LOG_LEVEL_NOTICE] = LOG_NOTICE, [RPMA_LOG_LEVEL_INFO] = LOG_INFO, [RPMA_LOG_LEVEL_DEBUG] = LOG_DEBUG, }; typedef struct { int clock_gettime_error; int localtime_r_error; int strftime_error; int snprintf_no_eol; enum rpma_log_level secondary; char *path; } mock_config; /* * setup_thresholds -- setup logging thresholds */ int setup_thresholds(void **config_ptr) { mock_config *cfg = (mock_config *)*config_ptr; /* * The main log threshold should not affect logging function behaviour. */ Rpma_log_threshold[RPMA_LOG_THRESHOLD] = RPMA_LOG_DISABLED; Rpma_log_threshold[RPMA_LOG_THRESHOLD_AUX] = cfg->secondary; return 0; } /* * function__RPMA_LOG_DISABLED -- call rpma_log_default_function() with RPMA_LOG_DISABLED */ void function__RPMA_LOG_DISABLED(void **unused) { /* run test */ rpma_log_default_function(RPMA_LOG_DISABLED, MOCK_FILE_NAME, MOCK_LINE_NUMBER, MOCK_FUNCTION_NAME, MOCK_MESSAGE); } /* * function__vsnprintf_fail -- vsnprintf() fails */ void function__vsnprintf_fail(void **unused) { /* configure mocks */ will_return(__wrap_vsnprintf, MOCK_STDIO_ERROR); will_return_maybe(__wrap_snprintf, MOCK_OK); /* run test */ rpma_log_default_function(MOCK_LOG_LEVEL, MOCK_FILE_NAME, MOCK_LINE_NUMBER, MOCK_FUNCTION_NAME, MOCK_MESSAGE); } /* * function__snprintf_fail -- snprintf() fails */ void function__snprintf_fail(void **unused) { /* configure mocks */ will_return(__wrap_vsnprintf, MOCK_OK); will_return(__wrap_snprintf, MOCK_STDIO_ERROR); will_return(syslog, MOCK_VALIDATE); expect_value(syslog, priority, rpma_log_level_syslog_severity[MOCK_LOG_LEVEL]); /* construct the resulting syslog message */ char msg[MOCK_BUFF_LEN] = ""; strcat(msg, rpma_log_level_names[MOCK_LOG_LEVEL]); strcat(msg, MOCK_FILE_ERROR_STR MOCK_MESSAGE); expect_string(syslog, syslog_output, msg); /* run test */ rpma_log_default_function(MOCK_LOG_LEVEL, MOCK_FILE_NAME, MOCK_LINE_NUMBER, MOCK_FUNCTION_NAME, MOCK_MESSAGE); } /* * function__syslog_no_path -- syslog() without a path */ void function__syslog_no_path(void **unused) { /* configure mocks */ will_return(__wrap_vsnprintf, MOCK_OK); will_return(syslog, MOCK_VALIDATE); expect_value(syslog, priority, rpma_log_level_syslog_severity[MOCK_LOG_LEVEL]); /* construct the resulting syslog message */ char msg[MOCK_BUFF_LEN] = ""; strcat(msg, rpma_log_level_names[MOCK_LOG_LEVEL]); strcat(msg, MOCK_MESSAGE); expect_string(syslog, syslog_output, msg); /* run test */ rpma_log_default_function(MOCK_LOG_LEVEL, NULL, 0, NULL, MOCK_MESSAGE); } /* * function__syslog -- syslog() using either an absolute or relative path */ void function__syslog(void **config_ptr) { mock_config *config = (mock_config *)*config_ptr; /* configure mocks */ will_return(__wrap_vsnprintf, MOCK_OK); will_return(__wrap_snprintf, MOCK_OK); will_return(syslog, MOCK_VALIDATE); expect_value(syslog, priority, rpma_log_level_syslog_severity[MOCK_LOG_LEVEL]); /* construct the resulting syslog message */ char msg[MOCK_BUFF_LEN] = ""; strcat(msg, rpma_log_level_names[MOCK_LOG_LEVEL]); strcat(msg, MOCK_FILE_NAME ": " STR(MOCK_LINE_NUMBER) ": " MOCK_FUNCTION_NAME ": " MOCK_MESSAGE); expect_string(syslog, syslog_output, msg); /* run test */ rpma_log_default_function(MOCK_LOG_LEVEL, config->path, MOCK_LINE_NUMBER, MOCK_FUNCTION_NAME, MOCK_MESSAGE); } #define MOCK_TIME_OF_DAY {00, 00, 00, 1, 0, 70, 0, 365, 0} #define MOCK_TIME_OF_DAY_STR "Jan 01 00:00:00" #define MOCK_TIME_STR MOCK_TIME_OF_DAY_STR ".000000 " #define MOCK_TIME_ERROR_STR "[time error] " #define MOCK_PID 123456 #define MOCK_PID_AS_STR "["STR(MOCK_PID)"] " static struct timespec Timespec = {0}; static struct tm Tm = MOCK_TIME_OF_DAY; /* * configure time.h mocks */ #define MOCK_GET_TIMESTAMP_CONFIGURE(x) \ if ((x)->clock_gettime_error) { \ will_return(__wrap_clock_gettime, NULL); \ } else if ((x)->localtime_r_error) { \ will_return(__wrap_clock_gettime, &Timespec); \ will_return(__wrap_localtime_r, &Timespec); \ will_return(__wrap_localtime_r, NULL); \ } else if ((x)->strftime_error) { \ will_return(__wrap_clock_gettime, &Timespec); \ will_return(__wrap_localtime_r, &Timespec); \ will_return(__wrap_localtime_r, &Tm); \ will_return(__wrap_strftime, MOCK_STRFTIME_ERROR); \ } else if ((x)->snprintf_no_eol) { \ will_return(__wrap_clock_gettime, &Timespec); \ will_return(__wrap_localtime_r, &Timespec); \ will_return(__wrap_localtime_r, &Tm); \ will_return(__wrap_strftime, MOCK_STRFTIME_SUCCESS); \ will_return(__wrap_snprintf, MOCK_SNPRINTF_NO_EOL); \ } else { \ will_return(__wrap_clock_gettime, &Timespec); \ will_return(__wrap_localtime_r, &Timespec); \ will_return(__wrap_localtime_r, &Tm); \ will_return(__wrap_strftime, MOCK_STRFTIME_SUCCESS); \ will_return(__wrap_snprintf, MOCK_OK); \ } #define MOCK_TIME_STR_EXPECTED(x) \ (((x)->clock_gettime_error || (x)->localtime_r_error || \ (x)->strftime_error || (x)->snprintf_no_eol) ? \ MOCK_TIME_ERROR_STR : MOCK_TIME_STR) /* * function__stderr_path -- fprintf(stderr) with a provided path */ static void function__stderr_path(void **config_ptr) { mock_config *config = (mock_config *)*config_ptr; /* configure mocks */ will_return(__wrap_vsnprintf, MOCK_OK); will_return(__wrap_snprintf, MOCK_OK); will_return(syslog, MOCK_PASSTHROUGH); MOCK_GET_TIMESTAMP_CONFIGURE(config); will_return(__wrap_syscall, MOCK_PID); /* construct the resulting fprintf message */ char msg[MOCK_BUFF_LEN] = ""; strcat(msg, MOCK_TIME_STR_EXPECTED(config)); strcat(msg, MOCK_PID_AS_STR); strcat(msg, rpma_log_level_names[MOCK_LOG_LEVEL]); strcat(msg, MOCK_FILE_NAME ": " STR(MOCK_LINE_NUMBER) ": " MOCK_FUNCTION_NAME ": " MOCK_MESSAGE); will_return(__wrap_fprintf, MOCK_VALIDATE); expect_string(__wrap_fprintf, fprintf_output, msg); /* enable syscall()'s mock only for test execution */ enabled__wrap_syscall = true; /* run test */ rpma_log_default_function(MOCK_LOG_LEVEL, config->path, MOCK_LINE_NUMBER, MOCK_FUNCTION_NAME, "%s", MOCK_MESSAGE); /* disable syscall()'s mock after test execution */ enabled__wrap_syscall = false; } /* * function__stderr_no_path -- fprintf(stderr) without a provided path */ static void function__stderr_no_path(void **config_ptr) { mock_config *config = (mock_config *)*config_ptr; for (enum rpma_log_level level = RPMA_LOG_LEVEL_FATAL; level <= RPMA_LOG_LEVEL_DEBUG; level++) { /* configure mocks */ will_return(__wrap_vsnprintf, MOCK_OK); will_return(syslog, MOCK_PASSTHROUGH); MOCK_GET_TIMESTAMP_CONFIGURE(config); will_return(__wrap_syscall, MOCK_PID); /* construct the resulting fprintf message */ char msg[MOCK_BUFF_LEN] = ""; strcat(msg, MOCK_TIME_STR_EXPECTED(config)); strcat(msg, MOCK_PID_AS_STR); strcat(msg, rpma_log_level_names[MOCK_LOG_LEVEL]); strcat(msg, MOCK_MESSAGE); will_return(__wrap_fprintf, MOCK_VALIDATE); expect_string(__wrap_fprintf, fprintf_output, msg); /* enable syscall()'s mock only for test execution */ enabled__wrap_syscall = true; /* run test */ rpma_log_default_function(MOCK_LOG_LEVEL, NULL, 0, NULL, "%s", MOCK_MESSAGE); /* disable syscall()'s mock after test execution */ enabled__wrap_syscall = false; } } /* * function__stderr_no_path_ALWAYS -- fprintf(stderr) without a provided path * for RPMA_LOG_LEVEL_ALWAYS */ static void function__stderr_no_path_ALWAYS(void **config_ptr) { mock_config *config = (mock_config *)*config_ptr; for (enum rpma_log_level level = RPMA_LOG_LEVEL_FATAL; level <= RPMA_LOG_LEVEL_DEBUG; level++) { /* configure mocks */ will_return(__wrap_vsnprintf, MOCK_OK); MOCK_GET_TIMESTAMP_CONFIGURE(config); will_return(__wrap_syscall, MOCK_PID); /* construct the resulting fprintf message */ char msg[MOCK_BUFF_LEN] = ""; strcat(msg, MOCK_TIME_STR_EXPECTED(config)); strcat(msg, MOCK_PID_AS_STR); strcat(msg, rpma_log_level_names[RPMA_LOG_LEVEL_DEBUG]); strcat(msg, MOCK_MESSAGE); will_return(__wrap_fprintf, MOCK_VALIDATE); expect_string(__wrap_fprintf, fprintf_output, msg); /* enable syscall()'s mock only for test execution */ enabled__wrap_syscall = true; /* run test */ rpma_log_default_function(RPMA_LOG_LEVEL_ALWAYS, NULL, 0, NULL, "%s", MOCK_MESSAGE); /* disable syscall()'s mock after test execution */ enabled__wrap_syscall = false; } } /* * test configurations */ static mock_config config_no_stderr = { 0, 0, 0, 0, RPMA_LOG_DISABLED, MOCK_FILE_NAME }; static mock_config config_no_stderr_path_absolute = { 0, 0, 0, 0, RPMA_LOG_DISABLED, MOCK_FILE_NAME_ABSOLUTE }; static mock_config config_no_error = { 0, 0, 0, 0, RPMA_LOG_LEVEL_DEBUG, MOCK_FILE_NAME }; static mock_config config_gettime_error = { 1, 0, 0, 0, RPMA_LOG_LEVEL_DEBUG, MOCK_FILE_NAME }; static mock_config config_localtime_r_error = { 0, 1, 0, 0, RPMA_LOG_LEVEL_DEBUG, MOCK_FILE_NAME }; static mock_config config_strftime_error = { 0, 0, 1, 0, RPMA_LOG_LEVEL_DEBUG, MOCK_FILE_NAME }; static mock_config config_snprintf_no_eol = { 0, 0, 0, 1, RPMA_LOG_LEVEL_DEBUG, MOCK_FILE_NAME }; int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* syslog & stderr common tests */ cmocka_unit_test_prestate_setup_teardown( function__RPMA_LOG_DISABLED, setup_thresholds, NULL, &config_no_stderr), cmocka_unit_test_prestate_setup_teardown( function__vsnprintf_fail, setup_thresholds, NULL, &config_no_stderr), cmocka_unit_test_prestate_setup_teardown( function__snprintf_fail, setup_thresholds, NULL, &config_no_stderr), /* syslog tests */ cmocka_unit_test_prestate_setup_teardown( function__syslog_no_path, setup_thresholds, NULL, &config_no_stderr), {"function__syslog_path_relative", function__syslog, setup_thresholds, NULL, &config_no_stderr}, {"function__syslog_path_absolute", function__syslog, setup_thresholds, NULL, &config_no_stderr_path_absolute}, /* stderr tests - time-related fails */ {"function__stderr_path_gettime_error", function__stderr_path, setup_thresholds, NULL, &config_gettime_error}, {"function__stderr_path_localtime_r_error", function__stderr_path, setup_thresholds, NULL, &config_localtime_r_error}, {"function__stderr_path_strftime_error", function__stderr_path, setup_thresholds, NULL, &config_strftime_error}, {"function__stderr_path_snprintf_no_eol", function__stderr_path, setup_thresholds, NULL, &config_snprintf_no_eol}, /* stderr tests - positive */ cmocka_unit_test_prestate_setup_teardown( function__stderr_path, setup_thresholds, NULL, &config_no_error), cmocka_unit_test_prestate_setup_teardown( function__stderr_no_path, setup_thresholds, NULL, &config_no_error), cmocka_unit_test_prestate_setup_teardown( function__stderr_no_path_ALWAYS, setup_thresholds, NULL, &config_no_error), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/log_default/init-fini.c000066400000000000000000000015671443364775400211440ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * init_fini.c -- rpma_log_default_init/_fini() unit tests */ #include #include "cmocka_headers.h" #include "log_default.h" /* * init__normal -- happy day scenario */ void init__normal(void **unused) { /* configure mocks */ expect_string(openlog, __ident, "rpma"); expect_value(openlog, __option, LOG_PID); expect_value(openlog, __facility, LOG_LOCAL7); /* run test */ rpma_log_default_init(); } /* * fini__normal -- happy day scenario */ void fini__normal(void **unused) { /* configure mocks */ expect_function_call(closelog); /* run test */ rpma_log_default_fini(); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { cmocka_unit_test(init__normal), cmocka_unit_test(fini__normal) }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/mr/000077500000000000000000000000001443364775400152325ustar00rootroot00000000000000rpma-1.3.0/tests/unit/mr/CMakeLists.txt000066400000000000000000000020201443364775400177640ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright (c) 2023 Fujitsu Limited # include(../../cmake/ctest_helpers.cmake) function(add_test_mr name) set(src_name mr-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c mr-common.c ${TEST_UNIT_COMMON_DIR}/mocks-ibverbs.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-peer.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${LIBRPMA_SOURCE_DIR}/rpma_err.c ${LIBRPMA_SOURCE_DIR}/mr.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_mr(advise) add_test_mr(atomic_write) add_test_mr(descriptor) if(NATIVE_FLUSH_SUPPORTED) add_test_mr(flush) endif() add_test_mr(get_flush_type) add_test_mr(local) add_test_mr(read) add_test_mr(recv) add_test_mr(reg) add_test_mr(send) add_test_mr(srq_recv) add_test_mr(write) rpma-1.3.0/tests/unit/mr/mr-advise.c000066400000000000000000000151751443364775400172760ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2021, Intel Corporation */ /* * mr-advise.c -- rpma_mr_advise() unit tests */ #include #include #include "cmocka_headers.h" #include "librpma.h" #include "mr-common.h" #include "mocks-ibverbs.h" #include "test-common.h" #ifdef IBV_ADVISE_MR_SUPPORTED /* * advise__failed_E_NOSUPP - rpma_mr_advise failed * with RPMA_E_NOSUPP */ static void advise__failed_E_NOSUPP(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; int error_no[] = { EOPNOTSUPP, ENOTSUP }; int n_values = sizeof(error_no) / sizeof(error_no[0]); for (int i = 0; i < n_values; i++) { /* configure mocks */ expect_value(ibv_advise_mr_mock, pd, MOCK_IBV_PD); expect_value(ibv_advise_mr_mock, advice, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE); expect_value(ibv_advise_mr_mock, flags, IB_UVERBS_ADVISE_MR_FLAG_FLUSH); expect_value(ibv_advise_mr_mock, sg_list->lkey, MOCK_LKEY); expect_value(ibv_advise_mr_mock, sg_list->addr, MOCK_LADDR + MOCK_SRC_OFFSET); expect_value(ibv_advise_mr_mock, sg_list->length, MOCK_LEN); expect_value(ibv_advise_mr_mock, num_sge, 1); will_return(ibv_advise_mr_mock, error_no[i]); /* run test */ int ret = rpma_mr_advise(mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, IB_UVERBS_ADVISE_MR_FLAG_FLUSH); /* verify the results */ assert_int_equal(ret, RPMA_E_NOSUPP); } } /* * advise__failed_E_INVAL - rpma_mr_advise failed * with RPMA_E_INVAL */ static void advise__failed_E_INVAL(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; int error_no[] = { EFAULT, EINVAL }; int n_values = sizeof(error_no) / sizeof(error_no[0]); for (int i = 0; i < n_values; i++) { /* configure mocks */ expect_value(ibv_advise_mr_mock, pd, MOCK_IBV_PD); expect_value(ibv_advise_mr_mock, advice, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE); expect_value(ibv_advise_mr_mock, flags, IB_UVERBS_ADVISE_MR_FLAG_FLUSH); expect_value(ibv_advise_mr_mock, sg_list->lkey, MOCK_LKEY); expect_value(ibv_advise_mr_mock, sg_list->addr, MOCK_LADDR + MOCK_SRC_OFFSET); expect_value(ibv_advise_mr_mock, sg_list->length, MOCK_LEN); expect_value(ibv_advise_mr_mock, num_sge, 1); will_return(ibv_advise_mr_mock, error_no[i]); /* run test */ int ret = rpma_mr_advise(mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, IB_UVERBS_ADVISE_MR_FLAG_FLUSH); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } } /* * advise__failed_E_PROVIDER - rpma_mr_advise failed * with RPMA_E_PROVIDER */ static void advise__failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ expect_value(ibv_advise_mr_mock, pd, MOCK_IBV_PD); expect_value(ibv_advise_mr_mock, advice, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE); expect_value(ibv_advise_mr_mock, flags, IB_UVERBS_ADVISE_MR_FLAG_FLUSH); expect_value(ibv_advise_mr_mock, sg_list->lkey, MOCK_LKEY); expect_value(ibv_advise_mr_mock, sg_list->addr, MOCK_LADDR + MOCK_SRC_OFFSET); expect_value(ibv_advise_mr_mock, sg_list->length, MOCK_LEN); expect_value(ibv_advise_mr_mock, num_sge, 1); will_return(ibv_advise_mr_mock, RPMA_E_PROVIDER); /* run test */ int ret = rpma_mr_advise(mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, IB_UVERBS_ADVISE_MR_FLAG_FLUSH); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * read__success - happy day scenario */ static void advise__success(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ expect_value(ibv_advise_mr_mock, pd, MOCK_IBV_PD); expect_value(ibv_advise_mr_mock, advice, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE); expect_value(ibv_advise_mr_mock, flags, IB_UVERBS_ADVISE_MR_FLAG_FLUSH); expect_value(ibv_advise_mr_mock, sg_list->lkey, MOCK_LKEY); expect_value(ibv_advise_mr_mock, sg_list->addr, MOCK_LADDR + MOCK_SRC_OFFSET); expect_value(ibv_advise_mr_mock, sg_list->length, MOCK_LEN); expect_value(ibv_advise_mr_mock, num_sge, 1); will_return(ibv_advise_mr_mock, MOCK_OK); /* run test */ int ret = rpma_mr_advise(mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, IB_UVERBS_ADVISE_MR_FLAG_FLUSH); /* verify the results */ assert_int_equal(ret, MOCK_OK); } #else /* * advise__failed_E_NOSUPP - rpma_mr_advise failed * with RPMA_E_NOSUPP when the operation is not supported by the system */ static void advise__failed_E_NOSUPP(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* run test */ int ret = rpma_mr_advise(mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, MOCK_ADVICE, MOCK_MR_FLAG); /* verify the results */ assert_int_equal(ret, RPMA_E_NOSUPP); } #endif /* * group_setup_mr_advise -- prepare resources for all tests in the group */ static int group_setup_mr_advise(void **unused) { #ifdef IBV_ADVISE_MR_SUPPORTED /* configure global mocks */ /* * ibv_advise_mr() is defined as a static inline function * in the included header , * so we cannot define it again. It is defined as: * { * return (struct verbs_context *)(((uint8_t *)ctx) * - offsetof(struct verbs_context, context)) * ->advise_mr(pd, advice, flags, sg_list, num_sge); * } * so we can set the advise_mr function pointer to our mock function. */ Verbs_context.advise_mr = ibv_advise_mr_mock; Verbs_context.sz = sizeof(Verbs_context); Ibv_pd.context = (struct ibv_context *)((uint8_t *)&Verbs_context + offsetof(struct verbs_context, context)); Ibv_pd.context->abi_compat = __VERBS_ABI_IS_EXTENDED; Ibv_mr.lkey = MOCK_LKEY; Ibv_mr.pd = MOCK_IBV_PD; #endif return 0; } #ifdef IBV_ADVISE_MR_SUPPORTED static const struct CMUnitTest test_mr_advise[] = { /* rpma_mr_adivse() unit tests */ cmocka_unit_test_setup_teardown(advise__failed_E_NOSUPP, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(advise__failed_E_INVAL, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(advise__failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(advise__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test(NULL) }; #else static const struct CMUnitTest test_mr_advise[] = { /* rpma_mr_adivse() unit tests */ cmocka_unit_test_setup_teardown(advise__failed_E_NOSUPP, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test(NULL) }; #endif int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_mr_advise, group_setup_mr_advise, NULL); } rpma-1.3.0/tests/unit/mr/mr-atomic_write.c000066400000000000000000000150511443364775400205020ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Intel Corporation */ /* Copyright (c) 2022-2023, Fujitsu Limited */ /* * mr-atomic_write.c -- rpma_mr_atomic_write() unit tests */ #include #include #include #include "cmocka_headers.h" #include "mr.h" #include "librpma.h" #include "mocks-ibverbs.h" #include "mr-common.h" #include "test-common.h" static const char Mock_src[8]; #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED static struct ibv_wr_atomic_write_mock_args atomic_write_args; static struct ibv_qp_ex *qpxs[] = {NULL, MOCK_QPX}; #else static struct ibv_qp_ex *qpxs[] = {NULL}; #endif static int num_qpxs = sizeof(qpxs) / sizeof(qpxs[0]); static struct ibv_post_send_mock_args args; /* * common_configure_mr_atomic_write -- common part of mocks for rpma_mr_atomic_write() */ static void common_configure_mr_atomic_write(int flags, int ret) { /* configure mocks */ args.qp = MOCK_QP; args.opcode = IBV_WR_RDMA_WRITE; args.send_flags = IBV_SEND_INLINE | IBV_SEND_FENCE; if (flags == RPMA_F_COMPLETION_ALWAYS) args.send_flags |= IBV_SEND_SIGNALED; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET; args.rkey = MOCK_RKEY; args.ret = ret; will_return(ibv_post_send_mock, &args); } /* * configure_mr_atomic_write -- configure mocks for rpma_mr_atomic_write() */ static void configure_mr_atomic_write(struct ibv_qp_ex *qpx, int flags, int ret) { /* configure mocks */ #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP); will_return(ibv_qp_to_qp_ex, qpx); if (qpx && qpx->wr_atomic_write) { expect_value(ibv_wr_start_mock, qp, MOCK_QPX); atomic_write_args.qp = MOCK_QPX; atomic_write_args.wr_id = (uint64_t)MOCK_OP_CONTEXT; atomic_write_args.wr_flags = (flags == RPMA_F_COMPLETION_ALWAYS) ? IBV_SEND_SIGNALED : 0; atomic_write_args.rkey = MOCK_RKEY; atomic_write_args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET; atomic_write_args.atomic_wr = Mock_src; will_return(ibv_wr_atomic_write_mock, &atomic_write_args); expect_value(ibv_wr_complete_mock, qp, MOCK_QPX); will_return(ibv_wr_complete_mock, ret); return; } #endif common_configure_mr_atomic_write(flags, ret); } /* * atomic_write__COMPL_ON_ERROR_failed_E_PROVIDER - * rpma_mr_atomic_write failed with RPMA_E_PROVIDER * when RPMA_F_COMPLETION_ON_ERROR is specified */ static void atomic_write__COMPL_ON_ERROR_failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; for (int i = 0; i < num_qpxs; i++) { /* configure mocks */ configure_mr_atomic_write(qpxs[i], RPMA_F_COMPLETION_ON_ERROR, MOCK_ERRNO); /* run test */ int ret = rpma_mr_atomic_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, Mock_src, RPMA_F_COMPLETION_ON_ERROR, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } } /* * atomic_write__COMPL_ON_SUCCESS_failed_E_PROVIDER - * rpma_mr_atomic_write failed with RPMA_E_PROVIDER * when RPMA_F_COMPLETION_ALWAYS is specified */ static void atomic_write__COMPL_ON_SUCCESS_failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; for (int i = 0; i < num_qpxs; i++) { /* configure mocks */ configure_mr_atomic_write(qpxs[i], RPMA_F_COMPLETION_ALWAYS, MOCK_ERRNO); /* run test */ int ret = rpma_mr_atomic_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, Mock_src, RPMA_F_COMPLETION_ALWAYS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } } /* * atomic_write__COMPLETION_ALWAYS_success - happy day scenario */ static void atomic_write__COMPLETION_ALWAYS_success(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; for (int i = 0; i < num_qpxs; i++) { /* configure mocks */ configure_mr_atomic_write(qpxs[i], RPMA_F_COMPLETION_ALWAYS, MOCK_OK); /* run test */ int ret = rpma_mr_atomic_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, Mock_src, RPMA_F_COMPLETION_ALWAYS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } } /* * atomic_write__COMPLETION_ON_ERROR_success */ static void atomic_write__COMPLETION_ON_ERROR_success(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; for (int i = 0; i < num_qpxs; i++) { /* configure mocks */ configure_mr_atomic_write(qpxs[i], RPMA_F_COMPLETION_ON_ERROR, MOCK_OK); /* run test */ int ret = rpma_mr_atomic_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, Mock_src, RPMA_F_COMPLETION_ON_ERROR, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } } /* * group_setup_mr_atomic_write -- prepare resources for all tests in the group */ static int group_setup_mr_atomic_write(void **unused) { /* configure global mocks */ #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED /* * ibv_wr_start(), ibv_wr_atomic_write() and ibv_wr_complete() are defined * as static inline functions in the included header , * so we cannot define them again. They are defined as: * { * return qp->wr_start(qp); * } * { * return qp->wr_atomic_write(qp, rkey, remote_addr, atomic_wr); * } * { * return qp->wr_complete(qp); * } * so we can set these three function pointers to our mock functions. */ Ibv_qp_ex.wr_start = ibv_wr_start_mock; Ibv_qp_ex.wr_atomic_write = ibv_wr_atomic_write_mock; Ibv_qp_ex.wr_complete = ibv_wr_complete_mock; #endif /* * ibv_post_send() is defined as a static inline function * in the included header , * so we cannot define it again. It is defined as: * { * return qp->context->ops.post_send(qp, wr, bad_wr); * } * so we can set the 'qp->context->ops.post_send' function pointer * to our mock function. */ MOCK_VERBS->ops.post_send = ibv_post_send_mock; Ibv_qp.context = MOCK_VERBS; return 0; } static const struct CMUnitTest tests_mr__atomic_write[] = { /* rpma_mr_atomic_write() unit tests */ cmocka_unit_test_setup_teardown( atomic_write__COMPL_ON_ERROR_failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown( atomic_write__COMPL_ON_SUCCESS_failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown( atomic_write__COMPLETION_ALWAYS_success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown( atomic_write__COMPLETION_ON_ERROR_success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_mr__atomic_write, group_setup_mr_atomic_write, NULL); } rpma-1.3.0/tests/unit/mr/mr-common.c000066400000000000000000000071131443364775400173040ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * mr-common.c -- the memory region unit tests's common functions */ #include "mocks-ibverbs.h" #include "mocks-rpma-peer.h" #include "mr-common.h" #include "test-common.h" const char Desc_exp_pmem[] = DESC_EXP_PMEM; const char Desc_exp_dram[] = DESC_EXP_DRAM; /* common setups & teardowns */ /* * setup__mr_local_and_remote -- create a local and a remote * memory region structures */ int setup__mr_local_and_remote(void **mrs_ptr) { static struct mrs mrs = {0}; int ret; struct prestate prestate = {MOCK_USAGE, MOCK_ACCESS, NULL}; struct prestate *pprestate = &prestate; /* create a local memory region structure */ ret = setup__reg_success((void **)&pprestate); mrs.local = prestate.mr; /* verify the result */ assert_int_equal(ret, MOCK_OK); /* create a remote memory region structure */ ret = setup__mr_remote((void **)&mrs.remote); /* verify the result */ assert_int_equal(ret, MOCK_OK); *mrs_ptr = &mrs; return 0; } /* * teardown__mr_local_and_remote -- delete a local and a remote * memory region structures */ int teardown__mr_local_and_remote(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; int ret; struct prestate prestate = {0}; struct prestate *pprestate = &prestate; prestate.mr = mrs->local; /* create a local memory region structure */ ret = teardown__dereg_success((void **)&pprestate); /* verify the result */ assert_int_equal(ret, MOCK_OK); /* create a remote memory region structure */ ret = teardown__mr_remote((void **)&mrs->remote); /* verify the result */ assert_int_equal(ret, MOCK_OK); return 0; } /* * setup__reg_success -- create a local memory registration object */ int setup__reg_success(void **pprestate) { struct prestate *prestate = *pprestate; /* configure mocks */ struct rpma_peer_setup_mr_reg_args mr_reg_args; mr_reg_args.usage = prestate->usage; mr_reg_args.access = prestate->access; mr_reg_args.mr = MOCK_MR; will_return(rpma_peer_setup_mr_reg, &mr_reg_args); will_return(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_mr_local *mr = NULL; int ret = rpma_mr_reg(MOCK_PEER, MOCK_PTR, MOCK_SIZE, mr_reg_args.usage, &mr); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_non_null(mr); /* pass mr to the test */ prestate->mr = mr; return 0; } /* * teardown__dereg_success -- delete the local memory registration object */ int teardown__dereg_success(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; /* configure mocks */ will_return(ibv_dereg_mr, MOCK_OK); int ret = rpma_mr_dereg(&mr); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_null(mr); return 0; } /* * setup__mr_remote -- create a remote memory region structure based * on a pre-prepared memory region's descriptor */ int setup__mr_remote(void **mr_ptr) { /* configure mock */ will_return(__wrap__test_malloc, MOCK_OK); /* * create a remote memory structure based on a pre-prepared descriptor */ struct rpma_mr_remote *mr = NULL; int ret = rpma_mr_remote_from_descriptor(Desc_exp_pmem, MR_DESC_SIZE, &mr); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(mr); *mr_ptr = mr; return 0; } /* * teardown__mr_remote -- delete the remote memory region's structure */ int teardown__mr_remote(void **mr_ptr) { struct rpma_mr_remote *mr = *mr_ptr; /* delete the remote memory region's structure */ int ret = rpma_mr_remote_delete(&mr); assert_int_equal(ret, MOCK_OK); assert_null(mr); *mr_ptr = NULL; return 0; } rpma-1.3.0/tests/unit/mr/mr-common.h000066400000000000000000000044441443364775400173150ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2021, Intel Corporation */ /* * mr-common.h -- the memory region unit tests's common definitions */ #ifndef MR_COMMON_H #define MR_COMMON_H #include #include #include "cmocka_headers.h" #include "mr.h" #include "librpma.h" #define MOCK_USAGE \ ((int)(RPMA_MR_USAGE_READ_SRC | RPMA_MR_USAGE_READ_DST |\ RPMA_MR_USAGE_WRITE_SRC | RPMA_MR_USAGE_WRITE_DST |\ RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT)) #define MOCK_ACCESS \ ((int)(IBV_ACCESS_REMOTE_READ | IBV_ACCESS_LOCAL_WRITE |\ IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE)) #define DESC_EXP_PMEM {0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, \ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, \ 0x13, 0x12, 0x11, 0x10, \ 0x21} #define DESC_EXP_DRAM {0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, \ 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, \ 0x13, 0x12, 0x11, 0x10, \ 0x11} #define MOCK_FLUSH_TYPE RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT #define MR_DESC_SIZE 21 /* sizeof(DESC_EXP_PMEM) */ #define INVALID_MR_DESC_SIZE 1 #define MOCK_DST_OFFSET (size_t)0xC413 #define MOCK_SRC_OFFSET (size_t)0xC414 #define MOCK_LEN (size_t)0xC415 #define MOCK_UNKNOWN_OP ((enum ibv_wr_opcode)(-1)) #define MOCK_OP_CONTEXT (void *)0xC417 #define MOCK_DESC (void *)0xC418 /* these values are derived from DESC_EXP_PMEM/DRAM above */ #define MOCK_RADDR (uint64_t)0x0001020304050607 #define MOCK_RKEY (uint32_t)0x10111213 #define MOCK_LADDR (uint64_t)0x0001020304050607 #define MOCK_LKEY (uint32_t)0x20212223 #define MOCK_ADVICE 1 #define MOCK_MR_FLAG (1 << 1) /* a state used for rpma_mr_read/_write tests */ struct mrs { struct rpma_mr_local *local; struct rpma_mr_remote *remote; }; /* prestate structure passed to unit test functions */ struct prestate { int usage; int access; /* mr passed from setup to test and to teardown */ struct rpma_mr_local *mr; }; extern const char Desc_exp_pmem[]; extern const char Desc_exp_dram[]; /* setups & teardowns */ int setup__reg_success(void **pprestate); int teardown__dereg_success(void **pprestate); int setup__mr_local_and_remote(void **mrs_ptr); int teardown__mr_local_and_remote(void **mrs_ptr); int setup__mr_remote(void **mr_ptr); int teardown__mr_remote(void **mr_ptr); #endif /* MR_COMMON_H */ rpma-1.3.0/tests/unit/mr/mr-descriptor.c000066400000000000000000000276151443364775400202030ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * mr-descriptor.c -- the memory region serialization unit tests * * APIs covered: * - rpma_mr_get_descriptor_size() * - rpma_mr_get_descriptor() * - rpma_mr_remote_from_descriptor() * - rpma_mr_remote_delete() * - rpma_mr_remote_get_size() */ #include #include #include "mocks-rpma-peer.h" #include "mr-common.h" #include "test-common.h" /* rpma_mr_get_descriptor_size() unit test */ /* * get_descriptor_size__mr_NULL - NULL mr is invalid */ static void get_descriptor_size__mr_NULL(void **unused) { size_t desc_size; /* run test */ int ret = rpma_mr_get_descriptor_size(NULL, &desc_size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_descriptor_size__desc_size_NULL - NULL desc_size is invalid */ static void get_descriptor_size__desc_size_NULL(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; /* run test */ int ret = rpma_mr_get_descriptor_size(mr, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_descriptor_size__mr_desc_size_NULL - NULL mr and NULL desc_size * are invalid */ static void get_descriptor_size__mr_desc_size_NULL(void **unused) { /* run test */ int ret = rpma_mr_get_descriptor_size(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_descriptor_size__success - happy day scenario */ static void get_descriptor_size__success(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; size_t desc_size; /* run test */ int ret = rpma_mr_get_descriptor_size(mr, &desc_size); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(desc_size, MR_DESC_SIZE); } /* rpma_mr_get_descriptor() unit test */ /* * get_descriptor__mr_NULL - NULL mr is invalid */ static void get_descriptor__mr_NULL(void **unused) { /* run test */ int ret = rpma_mr_get_descriptor(NULL, MOCK_DESC); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_descriptor__desc_NULL - NULL desc is invalid */ static void get_descriptor__desc_NULL(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; /* run test */ int ret = rpma_mr_get_descriptor(mr, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_descriptor__mr_NULL_desc_NULL - NULL mr and NULL desc are invalid */ static void get_descriptor__mr_NULL_desc_NULL(void **unused) { /* run test */ int ret = rpma_mr_get_descriptor(NULL, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* rpma_mr_remote_from_descriptor() unit test */ /* * remote_from_descriptor__desc_NULL - NULL desc is invalid */ static void remote_from_descriptor__desc_NULL(void **unused) { /* run test */ struct rpma_mr_remote *mr = NULL; int ret = rpma_mr_remote_from_descriptor(NULL, MR_DESC_SIZE, &mr); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(mr); } /* * remote_from_descriptor__mr_ptr_NULL - NULL mr_ptr is invalid */ static void remote_from_descriptor__mr_ptr_NULL(void **unused) { /* run test */ int ret = rpma_mr_remote_from_descriptor(&Desc_exp_pmem, MR_DESC_SIZE, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * remote_from_descriptor__mr_ptr_NULL_desc_NULL - NULL mr_ptr and NULL * desc are invalid */ static void remote_from_descriptor__mr_ptr_NULL_desc_NULL(void **unused) { /* run test */ int ret = rpma_mr_remote_from_descriptor(NULL, MR_DESC_SIZE, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * remote_from_descriptor__invalid_desc_size - invalid desc_size */ static void remote_from_descriptor__invalid_desc_size(void **unused) { /* run test */ struct rpma_mr_remote *mr = NULL; int ret = rpma_mr_remote_from_descriptor(&Desc_exp_pmem, INVALID_MR_DESC_SIZE, &mr); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * remote_from_descriptor__malloc_ERRNO - malloc() fails with MOCK_ERRNO */ static void remote_from_descriptor__malloc_ERRNO(void **unused) { /* configure mock */ will_return(__wrap__test_malloc, MOCK_ERRNO); /* run test */ struct rpma_mr_remote *mr = NULL; int ret = rpma_mr_remote_from_descriptor(&Desc_exp_pmem, MR_DESC_SIZE, &mr); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); } /* * remote_from_descriptor__buff_usage_equal_zero - buff with invalid contents * should be detected as long as it breaks placement value */ static void remote_from_descriptor__buff_usage_equal_zero(void **unused) { char desc_invalid[MR_DESC_SIZE]; memset(desc_invalid, 0xff, MR_DESC_SIZE - 1); /* set usage to 0 */ desc_invalid[MR_DESC_SIZE - 1] = 0; /* configure mock */ will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_mr_remote *mr = NULL; int ret = rpma_mr_remote_from_descriptor(&desc_invalid, MR_DESC_SIZE, &mr); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(mr); } /* rpma_mr_remote_delete() unit test */ /* * remote_delete__mr_ptr_NULL - NULL mr_ptr is invalid */ static void remote_delete__mr_ptr_NULL(void **unused) { /* run test */ int ret = rpma_mr_remote_delete(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * remote_delete__mr_NULL - NULL mr should exit quickly */ static void remote_delete__mr_NULL(void **unused) { /* run test */ struct rpma_mr_remote *mr = NULL; int ret = rpma_mr_remote_delete(&mr); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(mr); } /* rpma_mr_remote_get_size() unit test */ /* * remote_get_size__mr_ptr_NULL - NULL mr_ptr is invalid */ static void remote_get_size__mr_ptr_NULL(void **unused) { /* run test */ size_t size = 0; int ret = rpma_mr_remote_get_size(NULL, &size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_int_equal(size, 0); } /* * remote_get_size__size_NULL - NULL size pointer is invalid */ static void remote_get_size__size_NULL(void **mr_ptr) { struct rpma_mr_remote *mr = *mr_ptr; /* run test */ int ret = rpma_mr_remote_get_size(mr, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * remote_get_size__mr_ptr_NULL_size_NULL - NULL mr_ptr and NULL size * pointer are invalid */ static void remote_get_size__mr_ptr_NULL_size_NULL(void **unused) { /* run test */ int ret = rpma_mr_remote_get_size(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * remote_get_size__success - rpma_mr_remote_get_size() success */ static void remote_get_size__success(void **mr_ptr) { struct rpma_mr_remote *mr = *mr_ptr; /* verify the remote memory region correctness */ size_t size = 0; int ret = rpma_mr_remote_get_size(mr, &size); assert_int_equal(ret, MOCK_OK); assert_int_equal(size, MOCK_SIZE); } /* rpma_mr_serialiaze()/_remote_from_descriptor() buffer alignment */ /* * get_descriptor__desc_alignment - try rpma_mr_get_descriptor() with * a miscellaneous input descriptor alignment just to be sure the implementation * does not prefer certain alignments. */ static void get_descriptor__desc_alignment(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; char buff_base[MR_DESC_SIZE * 2]; char pattern[MR_DESC_SIZE * 2]; memset(pattern, 0xff, MR_DESC_SIZE * 2); void *desc = NULL; int ret = 0; /* * Generate a miscellaneous output descriptor alignment just to be sure * the implementation does not prefer certain alignments. */ for (uintptr_t i = 0; i < MR_DESC_SIZE; ++i) { memset(buff_base, 0xff, MR_DESC_SIZE * 2); /* run test */ desc = buff_base + i; ret = rpma_mr_get_descriptor(mr, desc); /* verify the results */ assert_int_equal(ret, 0); assert_memory_equal(desc, &Desc_exp_pmem, MR_DESC_SIZE); assert_memory_equal(buff_base, pattern, i); assert_memory_equal( buff_base + i + MR_DESC_SIZE, pattern + i + MR_DESC_SIZE, MR_DESC_SIZE - i); } } /* * remote_from_descriptor__desc_alignment -- try * rpma_mr_remote_from_descriptor() with a miscellaneous output descriptor * alignment just to be sure the implementation does not prefer certain * alignments. */ static void remote_from_descriptor__desc_alignment(void **unused) { char buff_base[MR_DESC_SIZE * 2]; char pattern[MR_DESC_SIZE * 2]; memset(pattern, 0xff, MR_DESC_SIZE * 2); void *desc = NULL; struct rpma_mr_remote *mr = NULL; size_t size = 0; int usage = 0; int ret = 0; /* configure mock */ will_return_always(__wrap__test_malloc, MOCK_OK); /* * Generate a miscellaneous input descriptor alignment just to be sure * the implementation does not prefer certain alignments. */ for (uintptr_t i = 0; i < MR_DESC_SIZE; ++i) { memset(buff_base, 0xff, MR_DESC_SIZE * 2); /* prepare a buffer contents */ desc = buff_base + i; const void *desc_src = (i % 2) ? &Desc_exp_pmem : &Desc_exp_dram; memcpy(desc, desc_src, MR_DESC_SIZE); /* specify the flush_type */ int flush_type = (i % 2) ? RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT : RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY; /* run test */ ret = rpma_mr_remote_from_descriptor(desc, MR_DESC_SIZE, &mr); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(mr); ret = rpma_mr_remote_get_size(mr, &size); assert_int_equal(ret, MOCK_OK); assert_int_equal(size, MOCK_SIZE); ret = rpma_mr_remote_get_flush_type(mr, &usage); assert_int_equal(ret, MOCK_OK); assert_int_equal(usage, flush_type); /* * It is not easy to verify whether the values * addr and rkey are correct. */ /* cleanup */ ret = rpma_mr_remote_delete(&mr); assert_int_equal(ret, MOCK_OK); assert_null(mr); } } static struct prestate prestate = {RPMA_MR_USAGE_READ_SRC | RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT, IBV_ACCESS_REMOTE_READ, NULL}; static const struct CMUnitTest tests_descriptor[] = { /* rpma_mr_get_descriptor_size() unit test */ cmocka_unit_test(get_descriptor_size__mr_NULL), cmocka_unit_test_prestate_setup_teardown( get_descriptor_size__desc_size_NULL, setup__reg_success, teardown__dereg_success, &prestate), cmocka_unit_test(get_descriptor_size__mr_desc_size_NULL), cmocka_unit_test_prestate_setup_teardown( get_descriptor_size__success, setup__reg_success, teardown__dereg_success, &prestate), /* rpma_mr_get_descriptor() unit test */ cmocka_unit_test(get_descriptor__mr_NULL), cmocka_unit_test_prestate_setup_teardown( get_descriptor__desc_NULL, setup__reg_success, teardown__dereg_success, &prestate), cmocka_unit_test(get_descriptor__mr_NULL_desc_NULL), /* rpma_mr_remote_from_descriptor() unit test */ cmocka_unit_test(remote_from_descriptor__desc_NULL), cmocka_unit_test(remote_from_descriptor__mr_ptr_NULL), cmocka_unit_test( remote_from_descriptor__mr_ptr_NULL_desc_NULL), cmocka_unit_test(remote_from_descriptor__invalid_desc_size), cmocka_unit_test(remote_from_descriptor__malloc_ERRNO), cmocka_unit_test(remote_from_descriptor__buff_usage_equal_zero), /* rpma_mr_remote_delete() unit test */ cmocka_unit_test(remote_delete__mr_ptr_NULL), cmocka_unit_test(remote_delete__mr_NULL), /* rpma_mr_remote_get_size() unit test */ cmocka_unit_test(remote_get_size__mr_ptr_NULL), cmocka_unit_test_setup_teardown(remote_get_size__size_NULL, setup__mr_remote, teardown__mr_remote), cmocka_unit_test(remote_get_size__mr_ptr_NULL_size_NULL), cmocka_unit_test_setup_teardown(remote_get_size__success, setup__mr_remote, teardown__mr_remote), /* * rpma_mr_get_descriptor()/rpma_mr_remote_from_descriptor() * buffer alignment */ cmocka_unit_test_prestate_setup_teardown( get_descriptor__desc_alignment, setup__reg_success, teardown__dereg_success, &prestate), cmocka_unit_test(remote_from_descriptor__desc_alignment), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_descriptor, NULL, NULL); } rpma-1.3.0/tests/unit/mr/mr-flush.c000066400000000000000000000075001443364775400171350ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2023 Fujitsu Limited */ /* * mr-flush.c -- rpma_mr_flush() unit tests */ #include #include #include #include "cmocka_headers.h" #include "mr.h" #include "librpma.h" #include "mocks-ibverbs.h" #include "mr-common.h" #include "test-common.h" static struct ibv_wr_flush_mock_args flush_args; static enum rpma_flush_type types[] = {RPMA_FLUSH_TYPE_VISIBILITY, RPMA_FLUSH_TYPE_PERSISTENT}; static int flags[] = {RPMA_F_COMPLETION_ON_ERROR, RPMA_F_COMPLETION_ALWAYS}; static int num_types = sizeof(types) / sizeof(types[0]); static int num_flags = sizeof(flags) / sizeof(flags[0]); /* * configure_flush -- configure mocks for rpma_mr_flush() */ static void configure_mr_flush(enum rpma_flush_type type, int flags, int ret) { /* configure mocks */ expect_value(ibv_qp_to_qp_ex, qp, MOCK_QP); will_return(ibv_qp_to_qp_ex, MOCK_QPX); expect_value(ibv_wr_start_mock, qp, MOCK_QPX); flush_args.qp = MOCK_QPX; flush_args.wr_id = (uint64_t)MOCK_OP_CONTEXT; flush_args.wr_flags = (flags == RPMA_F_COMPLETION_ALWAYS) ? IBV_SEND_SIGNALED : 0; flush_args.rkey = MOCK_RKEY; flush_args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET; flush_args.len = MOCK_LEN; switch (type) { case RPMA_FLUSH_TYPE_VISIBILITY: flush_args.type = IBV_FLUSH_GLOBAL; break; case RPMA_FLUSH_TYPE_PERSISTENT: flush_args.type = IBV_FLUSH_PERSISTENT; } flush_args.level = IBV_FLUSH_RANGE; will_return(ibv_wr_flush_mock, &flush_args); expect_value(ibv_wr_complete_mock, qp, MOCK_QPX); will_return(ibv_wr_complete_mock, ret); } /* * flush__failed_E_PROVIDER - rpma_mr_flush failed with RPMA_E_PROVIDER */ static void flush__failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; for (int i = 0; i < num_types; i++) { for (int j = 0; j < num_flags; j++) { /* configure mocks */ configure_mr_flush(types[i], flags[j], MOCK_ERRNO); /* run test */ int ret = rpma_mr_flush(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, MOCK_LEN, types[i], flags[j], MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } } } /* * flush__success - rpma_mr_flush succeeded */ static void flush__success(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; for (int i = 0; i < num_types; i++) { for (int j = 0; j < num_flags; j++) { /* configure mocks */ configure_mr_flush(types[i], flags[j], MOCK_OK); /* run test */ int ret = rpma_mr_flush(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, MOCK_LEN, types[i], flags[j], MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } } } /* * group_setup_mr_flush -- prepare resources for all tests in the group */ static int group_setup_mr_flush(void **unused) { /* configure global mocks */ /* * ibv_wr_start(), ibv_wr_flush() and ibv_wr_complete() are defined * as static inline functions in the included header , * so we cannot define them again. They are defined as: * { * return qp->wr_start(qp); * } * { * return qp->wr_flush(qp, rkey, remote_addr, len, type, level); * } * { * return qp->wr_complete(qp); * } * so we can set these three function pointers to our mock functions. */ Ibv_qp_ex.wr_start = ibv_wr_start_mock; Ibv_qp_ex.wr_flush = ibv_wr_flush_mock; Ibv_qp_ex.wr_complete = ibv_wr_complete_mock; return 0; } static const struct CMUnitTest tests_mr__flush[] = { /* rpma_mr_flush() unit tests */ cmocka_unit_test_setup_teardown( flush__failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown( flush__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_mr__flush, group_setup_mr_flush, NULL); } rpma-1.3.0/tests/unit/mr/mr-get_flush_type.c000066400000000000000000000044211443364775400210340ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * mr-get_flush_type.c -- the memory region get_flush_type unit tests * * APIs covered: * - rpma_mr_remote_get_flush_type() */ #include #include #include "mocks-rpma-peer.h" #include "mr-common.h" #include "test-common.h" /* rpma_mr_remote_get_flush_type() unit test */ /* * remote_get_flush_type__mr_NULL - NULL mr is invalid */ static void remote_get_flush_type__mr_NULL(void **unused) { /* run test */ int flush_type = 0; int ret = rpma_mr_remote_get_flush_type(NULL, &flush_type); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_int_equal(flush_type, 0); } /* * remote_get_flush_type__flush_type_NULL - NULL flush_type pointer is invalid */ static void remote_get_flush_type__flush_type_NULL(void **mr_ptr) { struct rpma_mr_remote *mr = *mr_ptr; /* run test */ int ret = rpma_mr_remote_get_flush_type(mr, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * remote_get_flush_type__mr_NULL_flush_type_NULL - NULL mr and NULL flush_type * pointer are invalid */ static void remote_get_flush_type__mr_NULL_flush_type_NULL(void **unused) { /* run test */ int ret = rpma_mr_remote_get_flush_type(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * remote_get_flush_type__success - rpma_mr_remote_get_flush_type() success */ static void remote_get_flush_type__success(void **mr_ptr) { struct rpma_mr_remote *mr = *mr_ptr; /* verify the remote memory region correctness */ int flush_type = 0; int ret = rpma_mr_remote_get_flush_type(mr, &flush_type); assert_int_equal(ret, MOCK_OK); assert_int_equal(flush_type, MOCK_FLUSH_TYPE); } static const struct CMUnitTest tests_get_flush_type[] = { /* rpma_mr_remote_get_flush_type() unit test */ cmocka_unit_test(remote_get_flush_type__mr_NULL), cmocka_unit_test_setup_teardown(remote_get_flush_type__flush_type_NULL, setup__mr_remote, teardown__mr_remote), cmocka_unit_test(remote_get_flush_type__mr_NULL_flush_type_NULL), cmocka_unit_test_setup_teardown(remote_get_flush_type__success, setup__mr_remote, teardown__mr_remote), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_flush_type, NULL, NULL); } rpma-1.3.0/tests/unit/mr/mr-local.c000066400000000000000000000072341443364775400171120ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2021 Fujitsu */ /* * mr-local.c -- local memory registration object getters unit tests * * APIs covered: * - rpma_mr_get_size() * - rpma_mr_get_ptr() */ #include #include #include "mocks-rpma-peer.h" #include "mr-common.h" #include "test-common.h" /* rpma_mr_get_size() unit tests */ /* * get_size__mr_NULL - NULL mr is invalid */ static void get_size__mr_NULL(void **unused) { size_t size; /* run test */ int ret = rpma_mr_get_size(NULL, &size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_size__size_NULL - NULL size is invalid */ static void get_size__size_NULL(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; /* run test */ int ret = rpma_mr_get_size(mr, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_size__mr_size_NULL - NULL mr and NULL size are invalid */ static void get_size__mr_size_NULL(void **unused) { /* run test */ int ret = rpma_mr_get_size(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_size__success - happy day scenario */ static void get_size__success(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; size_t size; /* run test */ int ret = rpma_mr_get_size(mr, &size); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(size, MOCK_SIZE); } /* rpma_mr_get_ptr() unit tests */ /* * get_ptr__mr_NULL - NULL mr is invalid */ static void get_ptr__mr_NULL(void **unused) { void *ptr = NULL; /* run test */ int ret = rpma_mr_get_ptr(NULL, &ptr); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_ptr__ptr_NULL - NULL ptr is invalid */ static void get_ptr__ptr_NULL(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; /* run test */ int ret = rpma_mr_get_ptr(mr, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_ptr__mr_ptr_NULL - NULL mr and NULL ptr are invalid */ static void get_ptr__mr_ptr_NULL(void **unused) { /* run test */ int ret = rpma_mr_get_ptr(NULL, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_ptr__success - happy day scenario */ static void get_ptr__success(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; void *ptr = NULL; /* run test */ int ret = rpma_mr_get_ptr(mr, &ptr); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(ptr, MOCK_PTR); } static struct prestate prestate = {RPMA_MR_USAGE_READ_SRC | RPMA_MR_USAGE_READ_DST, IBV_ACCESS_REMOTE_READ | IBV_ACCESS_LOCAL_WRITE, NULL}; static const struct CMUnitTest tests_mr_local[] = { /* rpma_mr_get_size() unit tests */ cmocka_unit_test(get_size__mr_NULL), cmocka_unit_test_prestate_setup_teardown( get_size__size_NULL, setup__reg_success, teardown__dereg_success, &prestate), cmocka_unit_test(get_size__mr_size_NULL), cmocka_unit_test_prestate_setup_teardown( get_size__success, setup__reg_success, teardown__dereg_success, &prestate), /* rpma_mr_get_ptr() unit tests */ cmocka_unit_test(get_ptr__mr_NULL), cmocka_unit_test_prestate_setup_teardown( get_ptr__ptr_NULL, setup__reg_success, teardown__dereg_success, &prestate), cmocka_unit_test(get_ptr__mr_ptr_NULL), cmocka_unit_test_prestate_setup_teardown( get_ptr__success, setup__reg_success, teardown__dereg_success, &prestate), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_mr_local, NULL, NULL); } rpma-1.3.0/tests/unit/mr/mr-read.c000066400000000000000000000112101443364775400167200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * mr-read.c -- rpma_mr_read() unit tests */ #include #include #include "cmocka_headers.h" #include "mr.h" #include "librpma.h" #include "mr-common.h" #include "mocks-ibverbs.h" #include "test-common.h" /* * read__COMPL_ALWAYS_failed_E_PROVIDER - rpma_mr_read failed * with RPMA_E_PROVIDER when send_flags == RPMA_F_COMPLETION_ON_SUCCESS */ static void read__COMPL_ALWAYS_failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_send_mock_args args; args.qp = MOCK_QP; args.opcode = IBV_WR_RDMA_READ; args.send_flags = IBV_SEND_SIGNALED; /* for RPMA_F_COMPLETION_ALWAYS */ args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.remote_addr = MOCK_RADDR + MOCK_SRC_OFFSET; args.rkey = MOCK_RKEY; args.ret = MOCK_ERRNO; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_read(MOCK_QP, mrs->local, MOCK_DST_OFFSET, mrs->remote, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ALWAYS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * read__COMPL_ON_ERROR_failed_E_PROVIDER - rpma_mr_read failed * with RPMA_E_PROVIDER when send_flags == 0 for RPMA_F_COMPLETION_ON_ERROR */ static void read__COMPL_ON_ERROR_failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_send_mock_args args; args.qp = MOCK_QP; args.opcode = IBV_WR_RDMA_READ; args.send_flags = 0; /* for RPMA_F_COMPLETION_ON_ERROR */ args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.remote_addr = MOCK_RADDR + MOCK_SRC_OFFSET; args.rkey = MOCK_RKEY; args.ret = MOCK_ERRNO; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_read(MOCK_QP, mrs->local, MOCK_DST_OFFSET, mrs->remote, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ON_ERROR, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * read__success - happy day scenario */ static void read__success(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_send_mock_args args; args.qp = MOCK_QP; args.opcode = IBV_WR_RDMA_READ; args.send_flags = IBV_SEND_SIGNALED; /* for RPMA_F_COMPLETION_ALWAYS */ args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.remote_addr = MOCK_RADDR + MOCK_SRC_OFFSET; args.rkey = MOCK_RKEY; args.ret = MOCK_OK; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_read(MOCK_QP, mrs->local, MOCK_DST_OFFSET, mrs->remote, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ALWAYS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * read_0B_message__success - happy day scenario */ static void read_0B_message__success(void **mrs_ptr) { /* configure mocks */ struct ibv_post_send_mock_args args; args.qp = MOCK_QP; args.opcode = IBV_WR_RDMA_READ; args.send_flags = IBV_SEND_SIGNALED; /* for RPMA_F_COMPLETION_ALWAYS */ args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.remote_addr = 0; args.rkey = 0; args.ret = MOCK_OK; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_read(MOCK_QP, NULL, 0, NULL, 0, 0, RPMA_F_COMPLETION_ALWAYS, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_mr_read -- prepare resources for all tests in the group */ static int group_setup_mr_read(void **unused) { /* configure global mocks */ /* * ibv_post_send() is defined as a static inline function * in the included header , * so we cannot define it again. It is defined as: * { * return qp->context->ops.post_send(qp, wr, bad_wr); * } * so we can set the 'qp->context->ops.post_send' function pointer * to our mock function. */ MOCK_VERBS->ops.post_send = ibv_post_send_mock; Ibv_qp.context = MOCK_VERBS; return 0; } static const struct CMUnitTest tests_mr_read[] = { /* rpma_mr_read() unit tests */ cmocka_unit_test_setup_teardown( read__COMPL_ALWAYS_failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown( read__COMPL_ON_ERROR_failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(read__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(read_0B_message__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_mr_read, group_setup_mr_read, NULL); } rpma-1.3.0/tests/unit/mr/mr-recv.c000066400000000000000000000056411443364775400167570ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * mr-recv.c -- rpma_mr_recv() unit tests */ #include #include #include "cmocka_headers.h" #include "mr.h" #include "librpma.h" #include "mocks-ibverbs.h" #include "mr-common.h" #include "test-common.h" /* * recv__failed_E_PROVIDER - rpma_mr_recv failed with RPMA_E_PROVIDER */ static void recv__failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_recv_mock_args args; args.qp = MOCK_QP; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.ret = MOCK_ERRNO; will_return(ibv_post_recv_mock, &args); /* run test */ int ret = rpma_mr_recv(MOCK_QP, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * recv__success - happy day scenario */ static void recv__success(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_recv_mock_args args; args.qp = MOCK_QP; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.ret = MOCK_OK; will_return(ibv_post_recv_mock, &args); /* run test */ int ret = rpma_mr_recv(MOCK_QP, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * recv_0B_message__success - happy day scenario */ static void recv_0B_message__success(void **mrs_ptr) { /* configure mocks */ struct ibv_post_recv_mock_args args; args.qp = MOCK_QP; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.ret = MOCK_OK; will_return(ibv_post_recv_mock, &args); /* run test */ int ret = rpma_mr_recv(MOCK_QP, NULL, 0, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_mr_recv -- prepare resources for all tests in the group */ static int group_setup_mr_recv(void **unused) { /* configure global mocks */ /* * ibv_post_recv() is defined as a static inline function * in the included header , * so we cannot define it again. It is defined as: * { * return qp->context->ops.post_recv(qp, wr, bad_wr); * } * so we can set the 'qp->context->ops.post_recv' function pointer * to our mock function. */ MOCK_VERBS->ops.post_recv = ibv_post_recv_mock; Ibv_qp.context = MOCK_VERBS; return 0; } static const struct CMUnitTest tests_mr_recv[] = { /* rpma_mr_recv() unit tests */ cmocka_unit_test_setup_teardown(recv__failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(recv__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(recv_0B_message__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_mr_recv, group_setup_mr_recv, NULL); } rpma-1.3.0/tests/unit/mr/mr-reg.c000066400000000000000000000143121443364775400165700ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * mr-reg.c -- the memory region registration/deregistration unit tests * * APIs covered: * - rpma_mr_reg() * - rpma_mr_dereg() */ #include #include #include "mocks-ibverbs.h" #include "mocks-rpma-peer.h" #include "mr-common.h" #include "test-common.h" #define USAGE_WRONG (~((int)0)) /* not allowed value of usage */ /* array of prestate structures */ static struct prestate prestates[] = { /* values used in reg_dereg__success called with (prestates + 0) */ {RPMA_MR_USAGE_READ_SRC, IBV_ACCESS_REMOTE_READ, NULL}, /* values used in reg_dereg__success called with (prestates + 1) */ {(RPMA_MR_USAGE_READ_SRC | RPMA_MR_USAGE_READ_DST), (IBV_ACCESS_REMOTE_READ | IBV_ACCESS_LOCAL_WRITE), NULL}, }; /* * reg__NULL_peer -- NULL peer is invalid */ static void reg__NULL_peer(void **unused) { /* run test */ struct rpma_mr_local *mr = NULL; int ret = rpma_mr_reg(NULL, MOCK_PTR, MOCK_SIZE, MOCK_USAGE, &mr); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(mr); } /* * reg__NULL_ptr -- NULL ptr is invalid */ static void reg__NULL_ptr(void **unused) { /* run test */ struct rpma_mr_local *mr = NULL; int ret = rpma_mr_reg(MOCK_PEER, NULL, MOCK_SIZE, MOCK_USAGE, &mr); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(mr); } /* * reg__NULL_mr_ptr -- NULL mr_ptr is invalid */ static void reg__NULL_mr_ptr(void **unused) { /* run test */ int ret = rpma_mr_reg(MOCK_PEER, MOCK_PTR, MOCK_SIZE, MOCK_USAGE, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * reg__NULL_peer_ptr_mr_ptr -- NULL peer, ptr and mr_ptr are invalid */ static void reg__NULL_peer_ptr_mr_ptr(void **unused) { /* run test */ int ret = rpma_mr_reg(NULL, NULL, MOCK_SIZE, MOCK_USAGE, NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * reg__0_size -- size == 0 is invalid */ static void reg__0_size(void **unused) { /* run test */ struct rpma_mr_local *mr = NULL; int ret = rpma_mr_reg(MOCK_PEER, MOCK_PTR, 0, MOCK_USAGE, &mr); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(mr); } /* * reg__0_usage -- usage == 0 is invalid */ static void reg__0_usage(void **unused) { /* run test */ struct rpma_mr_local *mr = NULL; int ret = rpma_mr_reg(MOCK_PEER, MOCK_PTR, MOCK_SIZE, 0, &mr); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(mr); } /* * reg__wrong_usage -- not allowed value of usage */ static void reg__wrong_usage(void **unused) { /* run test */ struct rpma_mr_local *mr = NULL; int ret = rpma_mr_reg(MOCK_PEER, MOCK_PTR, MOCK_SIZE, USAGE_WRONG, &mr); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(mr); } /* * reg__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void reg__malloc_ERRNO(void **unused) { /* configure mocks */ struct rpma_peer_setup_mr_reg_args mr_reg_args; mr_reg_args.usage = RPMA_MR_USAGE_READ_SRC; mr_reg_args.access = IBV_ACCESS_REMOTE_READ; mr_reg_args.mr = MOCK_MR; will_return(__wrap__test_malloc, MOCK_ERRNO); will_return_maybe(rpma_peer_setup_mr_reg, &mr_reg_args); will_return_maybe(ibv_dereg_mr, MOCK_OK); /* run test */ struct rpma_mr_local *mr = NULL; int ret = rpma_mr_reg(MOCK_PEER, MOCK_PTR, MOCK_SIZE, mr_reg_args.usage, &mr); /* verify the result */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(mr); } /* * reg__peer_mr_reg_ERRNO -- rpma_peer_setup_mr_reg() fails with MOCK_ERRNO */ static void reg__peer_mr_reg_ERRNO(void **unused) { /* configure mocks */ struct rpma_peer_setup_mr_reg_args mr_reg_args; mr_reg_args.usage = RPMA_MR_USAGE_READ_DST; mr_reg_args.access = IBV_ACCESS_LOCAL_WRITE; mr_reg_args.mr = NULL; mr_reg_args.verrno = MOCK_ERRNO; will_return(rpma_peer_setup_mr_reg, &mr_reg_args); will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_mr_local *mr = NULL; int ret = rpma_mr_reg(MOCK_PEER, MOCK_PTR, MOCK_SIZE, mr_reg_args.usage, &mr); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(mr); } /* * reg_dereg__success -- happy day scenario */ static void reg_dereg__success(void **unused) { /* * The whole thing is done by setup__reg_success() * and teardown__dereg_success(). */ } /* rpma_mr_dereg() unit tests */ /* * dereg__NULL_mr_ptr -- NULL mr_ptr is invalid */ static void dereg__NULL_mr_ptr(void **unused) { /* run test */ int ret = rpma_mr_dereg(NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * dereg__NULL_mr -- NULL mr is OK */ static void dereg__NULL_mr(void **unused) { /* run test */ struct rpma_mr_local *mr = NULL; int ret = rpma_mr_dereg(&mr); /* verify the result */ assert_int_equal(ret, MOCK_OK); } /* * dereg__dereg_mr_ERRNO -- ibv_dereg_mr() fails with MOCK_ERRNO */ static void dereg__dereg_mr_ERRNO(void **pprestate) { /* * Create a local memory registration object. * * It is a workaround for the following issue: * https://gitlab.com/cmocka/cmocka/-/issues/47 */ int ret = setup__reg_success(pprestate); assert_int_equal(ret, MOCK_OK); struct prestate *prestate = *pprestate; struct rpma_mr_local *mr = prestate->mr; /* configure mocks */ will_return(ibv_dereg_mr, MOCK_ERRNO); /* run test */ ret = rpma_mr_dereg(&mr); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(mr); } static const struct CMUnitTest tests_reg[] = { /* rpma_mr_reg() unit tests */ cmocka_unit_test(reg__NULL_peer), cmocka_unit_test(reg__NULL_ptr), cmocka_unit_test(reg__NULL_mr_ptr), cmocka_unit_test(reg__NULL_peer_ptr_mr_ptr), cmocka_unit_test(reg__0_size), cmocka_unit_test(reg__0_usage), cmocka_unit_test(reg__wrong_usage), cmocka_unit_test(reg__malloc_ERRNO), cmocka_unit_test(reg__peer_mr_reg_ERRNO), cmocka_unit_test_prestate_setup_teardown(reg_dereg__success, setup__reg_success, teardown__dereg_success, prestates), /* rpma_mr_dereg() unit tests */ cmocka_unit_test(dereg__NULL_mr_ptr), cmocka_unit_test(dereg__NULL_mr), cmocka_unit_test_prestate(dereg__dereg_mr_ERRNO, prestates + 1), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_reg, NULL, NULL); } rpma-1.3.0/tests/unit/mr/mr-send.c000066400000000000000000000107561443364775400167540ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2021, Intel Corporation */ /* * mr-send.c -- rpma_mr_send() unit tests */ #include #include #include #include "cmocka_headers.h" #include "mr.h" #include "librpma.h" #include "mocks-ibverbs.h" #include "mr-common.h" #include "test-common.h" /* * send__failed_E_NOSUPP - rpma_mr_send failed with RPMA_E_NOSUPP */ static void send__failed_E_NOSUPP(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* run test */ int ret = rpma_mr_send(MOCK_QP, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ON_ERROR, MOCK_UNKNOWN_OP, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_NOSUPP); } /* * send__failed_E_PROVIDER - rpma_mr_send failed with RPMA_E_PROVIDER */ static void send__failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_send_mock_args args; args.qp = MOCK_QP; args.opcode = IBV_WR_SEND; args.send_flags = 0; /* for RPMA_F_COMPLETION_ON_ERROR */ args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.ret = MOCK_ERRNO; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_send(MOCK_QP, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ON_ERROR, IBV_WR_SEND, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * send__success - happy day scenario */ static void send__success(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; struct ibv_post_send_mock_args args; enum ibv_wr_opcode opcodes[] = { IBV_WR_SEND, IBV_WR_SEND_WITH_IMM }; uint32_t imms[] = { 0, MOCK_IMM_DATA }; int n_values = sizeof(opcodes) / sizeof(opcodes[0]); for (int i = 0; i < n_values; i++) { /* configure mocks */ args.qp = MOCK_QP; args.opcode = opcodes[i]; /* for RPMA_F_COMPLETION_ALWAYS */ args.send_flags = IBV_SEND_SIGNALED; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; if (opcodes[i] == IBV_WR_SEND_WITH_IMM) args.imm_data = htonl(MOCK_IMM_DATA); args.ret = MOCK_OK; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_send(MOCK_QP, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ALWAYS, opcodes[i], imms[i], MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } } /* * send_0B_message__success - happy day scenario */ static void send_0B_message__success(void **mrs_ptr) { struct ibv_post_send_mock_args args; enum ibv_wr_opcode opcodes[] = { IBV_WR_SEND, IBV_WR_SEND_WITH_IMM }; uint32_t imms[] = { 0, MOCK_IMM_DATA }; int n_values = sizeof(opcodes) / sizeof(opcodes[0]); for (int i = 0; i < n_values; i++) { /* configure mocks */ args.qp = MOCK_QP; args.opcode = opcodes[i]; /* for RPMA_F_COMPLETION_ALWAYS */ args.send_flags = IBV_SEND_SIGNALED; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; if (opcodes[i] == IBV_WR_SEND_WITH_IMM) args.imm_data = htonl(MOCK_IMM_DATA); args.ret = MOCK_OK; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_send(MOCK_QP, NULL, 0, 0, RPMA_F_COMPLETION_ALWAYS, opcodes[i], imms[i], MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } } /* * group_setup_mr_send -- prepare resources for all tests in the group */ static int group_setup_mr_send(void **unused) { /* configure global mocks */ /* * ibv_post_send() is defined as a static inline function * in the included header , * so we cannot define it again. It is defined as: * { * return qp->context->ops.post_send(qp, wr, bad_wr); * } * so we can set the 'qp->context->ops.post_send' function pointer * to our mock function. */ MOCK_VERBS->ops.post_send = ibv_post_send_mock; Ibv_qp.context = MOCK_VERBS; return 0; } static const struct CMUnitTest tests_mr_send[] = { /* rpma_mr_send() unit tests */ cmocka_unit_test_setup_teardown(send__failed_E_NOSUPP, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(send__failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(send__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(send_0B_message__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_mr_send, group_setup_mr_send, NULL); } rpma-1.3.0/tests/unit/mr/mr-srq_recv.c000066400000000000000000000052361443364775400176440ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * mr-srq_recv.c -- rpma_mr_srq_recv() unit tests */ #include #include #include "cmocka_headers.h" #include "mr.h" #include "librpma.h" #include "mocks-ibverbs.h" #include "mr-common.h" #include "test-common.h" /* * recv__failed_E_PROVIDER - rpma_mr_srq_recv failed with RPMA_E_PROVIDER */ static void recv__failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_srq_recv_mock_args args; args.srq = MOCK_IBV_SRQ; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.ret = MOCK_ERRNO; will_return(ibv_post_srq_recv_mock, &args); /* run test */ int ret = rpma_mr_srq_recv(MOCK_IBV_SRQ, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * recv__success - happy day scenario */ static void recv__success(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_srq_recv_mock_args args; args.srq = MOCK_IBV_SRQ; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.ret = MOCK_OK; will_return(ibv_post_srq_recv_mock, &args); /* run test */ int ret = rpma_mr_srq_recv(MOCK_IBV_SRQ, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * recv_0B_message__success - happy day scenario */ static void recv_0B_message__success(void **mrs_ptr) { /* configure mocks */ struct ibv_post_srq_recv_mock_args args; args.srq = MOCK_IBV_SRQ; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.ret = MOCK_OK; will_return(ibv_post_srq_recv_mock, &args); /* run test */ int ret = rpma_mr_srq_recv(MOCK_IBV_SRQ, NULL, 0, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * group_setup_mr_recv -- prepare resources for all tests in the group */ static int group_setup_mr_srq_recv(void **unused) { /* configure global mocks */ MOCK_VERBS->ops.post_srq_recv = ibv_post_srq_recv_mock; Ibv_srq.context = MOCK_VERBS; return 0; } static const struct CMUnitTest tests_mr_recv[] = { /* rpma_mr_srq_recv() unit tests */ cmocka_unit_test_setup_teardown(recv__failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(recv__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(recv_0B_message__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_mr_recv, group_setup_mr_srq_recv, NULL); } rpma-1.3.0/tests/unit/mr/mr-write.c000066400000000000000000000137221443364775400171510ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * mr-write.c -- rpma_mr_write() unit tests */ #include #include #include #include "cmocka_headers.h" #include "mr.h" #include "librpma.h" #include "mocks-ibverbs.h" #include "mr-common.h" #include "test-common.h" /* * write__failed_E_NOSUPP - rpma_mr_write failed with RPMA_E_NOSUPP */ static void write__failed_E_NOSUPP(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* run test */ int ret = rpma_mr_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ALWAYS, MOCK_UNKNOWN_OP, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_NOSUPP); } /* * write__COMPL_ALWAYS_failed_E_PROVIDER - rpma_mr_write failed * with RPMA_E_PROVIDER when send_flags == RPMA_F_COMPLETION_ON_SUCCESS */ static void write__COMPL_ALWAYS_failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_send_mock_args args; args.qp = MOCK_QP; args.opcode = IBV_WR_RDMA_WRITE; args.send_flags = IBV_SEND_SIGNALED; /* for RPMA_F_COMPLETION_ALWAYS */ args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET; args.rkey = MOCK_RKEY; args.ret = MOCK_ERRNO; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ALWAYS, IBV_WR_RDMA_WRITE, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * write__COMPL_ON_ERROR_failed_E_PROVIDER - rpma_mr_write failed * with RPMA_E_PROVIDER when send_flags == 0 for RPMA_F_COMPLETION_ON_ERROR */ static void write__COMPL_ON_ERROR_failed_E_PROVIDER(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; /* configure mocks */ struct ibv_post_send_mock_args args; args.qp = MOCK_QP; args.opcode = IBV_WR_RDMA_WRITE; args.send_flags = 0; /* for RPMA_F_COMPLETION_ON_ERROR */ args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET; args.rkey = MOCK_RKEY; args.ret = MOCK_ERRNO; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ON_ERROR, IBV_WR_RDMA_WRITE, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * write__success - happy day scenario */ static void write__success(void **mrs_ptr) { struct mrs *mrs = (struct mrs *)*mrs_ptr; enum ibv_wr_opcode opcodes[] = { IBV_WR_RDMA_WRITE, IBV_WR_RDMA_WRITE_WITH_IMM }; uint32_t imms[] = { 0, MOCK_IMM_DATA }; int n_values = sizeof(opcodes) / sizeof(opcodes[0]); for (int i = 0; i < n_values; i++) { /* configure mocks */ struct ibv_post_send_mock_args args; args.qp = MOCK_QP; args.opcode = opcodes[i]; /* for RPMA_F_COMPLETION_ALWAYS */ args.send_flags = IBV_SEND_SIGNALED; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.remote_addr = MOCK_RADDR + MOCK_DST_OFFSET; args.rkey = MOCK_RKEY; if (opcodes[i] == IBV_WR_RDMA_WRITE_WITH_IMM) args.imm_data = htonl(MOCK_IMM_DATA); args.ret = MOCK_OK; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_write(MOCK_QP, mrs->remote, MOCK_DST_OFFSET, mrs->local, MOCK_SRC_OFFSET, MOCK_LEN, RPMA_F_COMPLETION_ALWAYS, opcodes[i], imms[i], MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } } /* * write_0B_message__success - happy day scenario */ static void write_0B_message__success(void **mrs_ptr) { enum ibv_wr_opcode opcodes[] = { IBV_WR_RDMA_WRITE, IBV_WR_RDMA_WRITE_WITH_IMM }; uint32_t imms[] = { 0, MOCK_IMM_DATA }; int n_values = sizeof(opcodes) / sizeof(opcodes[0]); for (int i = 0; i < n_values; i++) { /* configure mocks */ struct ibv_post_send_mock_args args; args.qp = MOCK_QP; args.opcode = opcodes[i]; /* for RPMA_F_COMPLETION_ALWAYS */ args.send_flags = IBV_SEND_SIGNALED; args.wr_id = (uint64_t)MOCK_OP_CONTEXT; args.remote_addr = 0; args.rkey = 0; if (opcodes[i] == IBV_WR_RDMA_WRITE_WITH_IMM) args.imm_data = htonl(MOCK_IMM_DATA); args.ret = MOCK_OK; will_return(ibv_post_send_mock, &args); /* run test */ int ret = rpma_mr_write(MOCK_QP, NULL, 0, NULL, 0, 0, RPMA_F_COMPLETION_ALWAYS, opcodes[i], imms[i], MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } } /* * group_setup_mr_write -- prepare resources for all tests in the group */ static int group_setup_mr_write(void **unused) { /* configure global mocks */ /* * ibv_post_send() is defined as a static inline function * in the included header , * so we cannot define it again. It is defined as: * { * return qp->context->ops.post_send(qp, wr, bad_wr); * } * so we can set the 'qp->context->ops.post_send' function pointer * to our mock function. */ MOCK_VERBS->ops.post_send = ibv_post_send_mock; Ibv_qp.context = MOCK_VERBS; return 0; } static const struct CMUnitTest tests_mr_write[] = { /* rpma_mr_write() unit tests */ cmocka_unit_test_setup_teardown(write__failed_E_NOSUPP, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown( write__COMPL_ALWAYS_failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown( write__COMPL_ON_ERROR_failed_E_PROVIDER, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(write__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test_setup_teardown(write_0B_message__success, setup__mr_local_and_remote, teardown__mr_local_and_remote), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_mr_write, group_setup_mr_write, NULL); } rpma-1.3.0/tests/unit/peer/000077500000000000000000000000001443364775400155475ustar00rootroot00000000000000rpma-1.3.0/tests/unit/peer/CMakeLists.txt000066400000000000000000000020611443364775400203060ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright 2021-2022, Fujitsu # include(../../cmake/ctest_helpers.cmake) function(add_test_peer name) set(src_name peer-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c peer-common.c ${TEST_UNIT_COMMON_DIR}/mocks-ibverbs.c ${TEST_UNIT_COMMON_DIR}/mocks-rdma_cm.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-conn_cfg.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-cq.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-utils.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-srq.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-srq_cfg.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${LIBRPMA_SOURCE_DIR}/rpma_err.c ${LIBRPMA_SOURCE_DIR}/peer.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_peer(create_qp) add_test_peer(create_srq) add_test_peer(mr_reg) add_test_peer(new) rpma-1.3.0/tests/unit/peer/peer-common.c000066400000000000000000000042301443364775400201330ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2022-2023, Fujitsu Limited */ /* * peer-common.c -- the common part of the peer unit test */ #include #include "cmocka_headers.h" #include "conn_req.h" #include "mocks-ibverbs.h" #include "mocks-rpma-utils.h" #include "peer.h" #include "peer-common.h" #include "test-common.h" struct prestate prestate_Capable = {IBV_TRANSPORT_IB, 0, 0, MOCK_ODP_CAPABLE, MOCK_ATOMIC_WRITE_CAPABLE, MOCK_FLUSH_CAPABLE, NULL}; struct prestate prestate_Incapable = {IBV_TRANSPORT_IB, 0, 0, MOCK_ODP_INCAPABLE, MOCK_ATOMIC_WRITE_INCAPABLE, MOCK_FLUSH_INCAPABLE, NULL}; /* * setup__peer -- prepare a valid rpma_peer object * (encapsulating the MOCK_IBV_PD) */ int setup__peer(void **pprestate) { struct prestate *prestate = *pprestate; /* * configure mocks for rpma_peer_new(): * NOTE: it is not allowed to call ibv_dealloc_pd() if ibv_alloc_pd() * succeeded. */ will_return(rpma_utils_ibv_context_is_atomic_write_capable, prestate->is_atomic_write_capable); will_return(rpma_utils_ibv_context_is_flush_capable, prestate->is_flush_capable); will_return(rpma_utils_ibv_context_is_odp_capable, prestate->is_odp_capable); struct ibv_alloc_pd_mock_args alloc_args = {MOCK_VALIDATE, MOCK_IBV_PD}; will_return(ibv_alloc_pd, &alloc_args); expect_value(ibv_alloc_pd, ibv_ctx, MOCK_VERBS); will_return(__wrap__test_malloc, MOCK_OK); /* setup */ int ret = rpma_peer_new(MOCK_VERBS, &prestate->peer); assert_int_equal(ret, 0); assert_non_null(prestate->peer); return 0; } /* * teardown__peer -- delete the rpma_peer object */ int teardown__peer(void **pprestate) { struct prestate *prestate = *pprestate; /* * configure mocks for rpma_peer_delete(): * NOTE: it is not allowed to call ibv_alloc_pd() nor malloc() in * rpma_peer_delete(). */ struct ibv_dealloc_pd_mock_args dealloc_args = {MOCK_VALIDATE, MOCK_OK}; will_return(ibv_dealloc_pd, &dealloc_args); expect_value(ibv_dealloc_pd, pd, MOCK_IBV_PD); /* teardown */ int ret = rpma_peer_delete(&prestate->peer); assert_int_equal(ret, MOCK_OK); assert_null(prestate->peer); return 0; } rpma-1.3.0/tests/unit/peer/peer-common.h000066400000000000000000000026541443364775400201500ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2022-2023, Fujitsu Limited */ /* * peer-common.h -- the header of the common part of the peer unit test */ #ifndef PEER_COMMON_H #define PEER_COMMON_H 1 #include #define MOCK_CM_ID (struct rdma_cm_id *)0xC41D #define MOCK_ADDR (void *)0x2B6A /* * The basic access value should be a combination of * IBV_ACCESS_(LOCAL|REMOTE)_(READ|WRITE) because IBV_ACCESS_ON_DEMAND * is added dynamically during the fall-back to using On-Demand Paging * registration type. */ #define MOCK_ACCESS (unsigned)(\ IBV_ACCESS_LOCAL_WRITE |\ IBV_ACCESS_REMOTE_WRITE |\ IBV_ACCESS_REMOTE_READ) /* * The test usage value is a combination of all possible * RPMA_MR_USAGE_* values. */ #define MOCK_USAGE (unsigned)(\ RPMA_MR_USAGE_READ_SRC |\ RPMA_MR_USAGE_READ_DST |\ RPMA_MR_USAGE_WRITE_SRC |\ RPMA_MR_USAGE_WRITE_DST |\ RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY |\ RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT |\ RPMA_MR_USAGE_SEND |\ RPMA_MR_USAGE_RECV) struct prestate { enum ibv_transport_type transport_type; int usage; unsigned access; int is_odp_capable; int is_atomic_write_capable; int is_flush_capable; struct rpma_peer *peer; }; extern struct prestate prestate_Capable; extern struct prestate prestate_Incapable; int setup__peer(void **pprestate); int teardown__peer(void **pprestate); #endif /* PEER_COMMON_H */ rpma-1.3.0/tests/unit/peer/peer-create_qp.c000066400000000000000000000133341443364775400206130ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2021-2023, Fujitsu Limited */ /* * peer-create_qp.c -- a peer unit test * * API covered: * - rpma_peer_setup_qp() */ #include #include "cmocka_headers.h" #include "conn_req.h" #include "mocks-ibverbs.h" #include "mocks-rpma-conn_cfg.h" #include "mocks-rpma-cq.h" #include "mocks-rpma-srq.h" #include "peer.h" #include "peer-common.h" #define MOCK_GET_IBV_RCQ(rcq) rcq == MOCK_RPMA_RCQ ? MOCK_IBV_RCQ : MOCK_IBV_SRQ_RCQ static struct conn_cfg_get_mock_args Get_args = { .cfg = MOCK_CONN_CFG_CUSTOM, .sq_size = MOCK_SQ_SIZE_CUSTOM, .rq_size = MOCK_RQ_SIZE_CUSTOM, }; static struct rpma_cq *rcqs[] = { NULL, MOCK_RPMA_RCQ, MOCK_RPMA_SRQ_RCQ, }; static int num_rcqs = sizeof(rcqs) / sizeof(rcqs[0]); /* * configure_create_qp_ex -- configure common mock for rdma_create_qp_ex() */ static void configure_create_qp_ex(struct prestate *prestate, struct rpma_cq *rcq) { uint32_t comp_mask = IBV_QP_INIT_ATTR_PD; will_return(rpma_conn_cfg_get_sq_size, &Get_args); will_return(rpma_conn_cfg_get_rq_size, &Get_args); will_return(rpma_conn_cfg_get_srq, &Get_args); if (Get_args.srq) { expect_value(rpma_srq_get_ibv_srq, srq, MOCK_RPMA_SRQ); will_return(rpma_srq_get_ibv_srq, MOCK_IBV_SRQ); } expect_value(rpma_cq_get_ibv_cq, cq, MOCK_RPMA_CQ); will_return(rpma_cq_get_ibv_cq, MOCK_IBV_CQ); if (rcq) { expect_value(rpma_cq_get_ibv_cq, cq, rcq); will_return(rpma_cq_get_ibv_cq, MOCK_GET_IBV_RCQ(rcq)); } expect_value(rdma_create_qp_ex, id, MOCK_CM_ID); expect_value(rdma_create_qp_ex, qp_init_attr->qp_context, NULL); expect_value(rdma_create_qp_ex, qp_init_attr->send_cq, MOCK_IBV_CQ); expect_value(rdma_create_qp_ex, qp_init_attr->recv_cq, rcq ? MOCK_GET_IBV_RCQ(rcq) : MOCK_IBV_CQ); expect_value(rdma_create_qp_ex, qp_init_attr->srq, Get_args.srq ? MOCK_IBV_SRQ : NULL); expect_value(rdma_create_qp_ex, qp_init_attr->cap.max_send_wr, MOCK_SQ_SIZE_CUSTOM); expect_value(rdma_create_qp_ex, qp_init_attr->cap.max_recv_wr, MOCK_RQ_SIZE_CUSTOM); expect_value(rdma_create_qp_ex, qp_init_attr->cap.max_send_sge, RPMA_MAX_SGE); expect_value(rdma_create_qp_ex, qp_init_attr->cap.max_recv_sge, RPMA_MAX_SGE); expect_value(rdma_create_qp_ex, qp_init_attr->cap.max_inline_data, RPMA_MAX_INLINE_DATA); expect_value(rdma_create_qp_ex, qp_init_attr->pd, MOCK_IBV_PD); #if defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || defined(NATIVE_FLUSH_SUPPORTED) comp_mask |= IBV_QP_INIT_ATTR_SEND_OPS_FLAGS; uint64_t send_ops_flags = 0; #endif #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED if (prestate->is_atomic_write_capable) send_ops_flags |= IBV_QP_EX_WITH_ATOMIC_WRITE; #endif #ifdef NATIVE_FLUSH_SUPPORTED if (prestate->is_flush_capable) send_ops_flags |= IBV_QP_EX_WITH_FLUSH; #endif expect_value(rdma_create_qp_ex, qp_init_attr->comp_mask, comp_mask); #if defined(NATIVE_ATOMIC_WRITE_SUPPORTED) || defined(NATIVE_FLUSH_SUPPORTED) expect_value(rdma_create_qp_ex, qp_init_attr->send_ops_flags, send_ops_flags); #endif } /* * create_qp__peer_NULL -- NULL peer is invalid */ static void create_qp__peer_NULL(void **unused) { /* run test */ int ret = rpma_peer_setup_qp(NULL, MOCK_CM_ID, MOCK_RPMA_CQ, NULL, MOCK_CONN_CFG_DEFAULT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * create_qp__id_NULL -- NULL id is invalid */ static void create_qp__id_NULL(void **pprestate) { struct prestate *prestate = *pprestate; /* run test */ int ret = rpma_peer_setup_qp(prestate->peer, NULL, MOCK_RPMA_CQ, NULL, MOCK_CONN_CFG_DEFAULT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * create_qp__cq_NULL -- NULL cq is invalid */ static void create_qp__cq_NULL(void **pprestate) { struct prestate *prestate = *pprestate; /* run test */ int ret = rpma_peer_setup_qp(prestate->peer, MOCK_CM_ID, NULL, NULL, MOCK_CONN_CFG_DEFAULT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * create_qp__rdma_create_qp_ex_ERRNO -- rdma_create_qp_ex() fails with MOCK_ERRNO */ static void create_qp__rdma_create_qp_ex_ERRNO(void **pprestate) { struct prestate *prestate = *pprestate; for (int i = 0; i < num_rcqs; i++) { /* configure mock */ Get_args.srq = (i == 2) ? MOCK_RPMA_SRQ : NULL; configure_create_qp_ex(prestate, rcqs[i]); will_return(rdma_create_qp_ex, MOCK_ERRNO); /* run test */ int ret = rpma_peer_setup_qp(prestate->peer, MOCK_CM_ID, MOCK_RPMA_CQ, rcqs[i], MOCK_CONN_CFG_CUSTOM); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } } /* * create_qp__success -- happy day scenario */ static void create_qp__success(void **pprestate) { struct prestate *prestate = *pprestate; for (int i = 0; i < num_rcqs; i++) { /* configure mock */ Get_args.srq = (i == 2) ? MOCK_RPMA_SRQ : NULL; configure_create_qp_ex(prestate, rcqs[i]); will_return(rdma_create_qp_ex, MOCK_OK); /* run test */ int ret = rpma_peer_setup_qp(prestate->peer, MOCK_CM_ID, MOCK_RPMA_CQ, rcqs[i], MOCK_CONN_CFG_CUSTOM); /* verify the results */ assert_int_equal(ret, MOCK_OK); } } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* rpma_peer_setup_qp() unit tests */ cmocka_unit_test(create_qp__peer_NULL), cmocka_unit_test_prestate_setup_teardown(create_qp__id_NULL, setup__peer, teardown__peer, &prestate_Capable), cmocka_unit_test_prestate_setup_teardown(create_qp__cq_NULL, setup__peer, teardown__peer, &prestate_Capable), cmocka_unit_test_prestate_setup_teardown( create_qp__rdma_create_qp_ex_ERRNO, setup__peer, teardown__peer, &prestate_Capable), cmocka_unit_test_prestate_setup_teardown(create_qp__success, setup__peer, teardown__peer, &prestate_Capable), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/peer/peer-create_srq.c000066400000000000000000000127031443364775400207770ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2022, Fujitsu Limited */ /* * peer-create_srq.c -- the rpma_peer_create_srq() unit tests * * API covered: * - rpma_peer_create_srq() */ #include "librpma.h" #include "peer.h" #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "mocks-rpma-srq.h" #include "mocks-rpma-srq_cfg.h" #include "peer-common.h" #include "test-common.h" static struct srq_cfg_get_mock_args Create_srq_cfg_default = { .cfg = MOCK_SRQ_CFG_DEFAULT, .rq_size = MOCK_SRQ_SIZE_DEFAULT, .rcq_size = MOCK_SRQ_RCQ_SIZE_DEFAULT, }; static struct srq_cfg_get_mock_args Create_srq_cfg_custom = { .cfg = MOCK_SRQ_CFG_CUSTOM, .rq_size = MOCK_SRQ_SIZE_CUSTOM, .rcq_size = MOCK_SRQ_RCQ_SIZE_CUSTOM, }; static struct srq_cfg_get_mock_args *cfgs[] = { &Create_srq_cfg_default, &Create_srq_cfg_custom, }; static int num_cfgs = sizeof(cfgs) / sizeof(cfgs[0]); /* * create_srq__ibv_create_srq_ERRNO -- ibv_create_srq() fails with MOCK_ERRNO */ static void create_srq__ibv_create_srq_ERRNO(void **pprestate) { struct prestate *prestate = *pprestate; /* configure mocks */ will_return(rpma_srq_cfg_get_rq_size, &Create_srq_cfg_default); expect_value(ibv_create_srq, srq_init_attr->attr.max_wr, Create_srq_cfg_default.rq_size); will_return(ibv_create_srq, NULL); will_return(ibv_create_srq, MOCK_ERRNO); /* run test */ struct ibv_srq *ibv_srq = NULL; struct rpma_cq *rcq = NULL; int ret = rpma_peer_create_srq(prestate->peer, Create_srq_cfg_default.cfg, &ibv_srq, &rcq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ibv_srq); assert_null(rcq); } /* * create_srq__rpma_cq_new_ERRNO -- rpma_cq_new() fails with MOCK_ERRNO */ static void create_srq__rpma_cq_new_ERRNO(void **pprestate) { struct prestate *prestate = *pprestate; /* configure mocks */ will_return(rpma_srq_cfg_get_rq_size, &Create_srq_cfg_default); expect_value(ibv_create_srq, srq_init_attr->attr.max_wr, Create_srq_cfg_default.rq_size); will_return(ibv_create_srq, MOCK_IBV_SRQ); will_return(rpma_srq_cfg_get_rcqe, &Create_srq_cfg_default); expect_value(rpma_cq_new, cqe, Create_srq_cfg_default.rcq_size); expect_value(rpma_cq_new, shared_channel, NULL); will_return(rpma_cq_new, NULL); will_return(rpma_cq_new, RPMA_E_PROVIDER); will_return(rpma_cq_new, MOCK_ERRNO); expect_value(ibv_destroy_srq, srq, MOCK_IBV_SRQ); will_return(ibv_destroy_srq, MOCK_OK); /* run test */ struct ibv_srq *ibv_srq = NULL; struct rpma_cq *rcq = NULL; int ret = rpma_peer_create_srq(prestate->peer, Create_srq_cfg_default.cfg, &ibv_srq, &rcq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ibv_srq); assert_null(rcq); } /* * create_srq__rpma_cq_new_ERRNO_subsequent_ERRNO2 -- ibv_destroy_srq() * fails with MOCK_ERRNO2 after rpma_cq_new() failed with MOCK_ERRNO */ static void create_srq__rpma_cq_new_ERRNO_subsequent_ERRNO2(void **pprestate) { struct prestate *prestate = *pprestate; /* configure mocks */ will_return(rpma_srq_cfg_get_rq_size, &Create_srq_cfg_default); expect_value(ibv_create_srq, srq_init_attr->attr.max_wr, Create_srq_cfg_default.rq_size); will_return(ibv_create_srq, MOCK_IBV_SRQ); will_return(rpma_srq_cfg_get_rcqe, &Create_srq_cfg_default); expect_value(rpma_cq_new, cqe, Create_srq_cfg_default.rcq_size); expect_value(rpma_cq_new, shared_channel, NULL); will_return(rpma_cq_new, NULL); will_return(rpma_cq_new, RPMA_E_PROVIDER); will_return(rpma_cq_new, MOCK_ERRNO); expect_value(ibv_destroy_srq, srq, MOCK_IBV_SRQ); will_return(ibv_destroy_srq, MOCK_ERRNO2); /* run test */ struct ibv_srq *ibv_srq = NULL; struct rpma_cq *rcq = NULL; int ret = rpma_peer_create_srq(prestate->peer, Create_srq_cfg_default.cfg, &ibv_srq, &rcq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ibv_srq); assert_null(rcq); } /* * create_srq__success -- happy day scenario */ static void create_srq__success(void **pprestate) { struct prestate *prestate = *pprestate; for (int i = 0; i < num_cfgs; i++) { /* configure mocks */ will_return(rpma_srq_cfg_get_rq_size, cfgs[i]); expect_value(ibv_create_srq, srq_init_attr->attr.max_wr, cfgs[i]->rq_size); will_return(ibv_create_srq, MOCK_IBV_SRQ); will_return(rpma_srq_cfg_get_rcqe, cfgs[i]); if (cfgs[i]->rcq_size) { expect_value(rpma_cq_new, cqe, cfgs[i]->rcq_size); expect_value(rpma_cq_new, shared_channel, NULL); will_return(rpma_cq_new, MOCK_RPMA_SRQ_RCQ); } /* run test */ struct ibv_srq *ibv_srq = NULL; struct rpma_cq *rcq = NULL; int ret = rpma_peer_create_srq(prestate->peer, cfgs[i]->cfg, &ibv_srq, &rcq); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(ibv_srq, MOCK_IBV_SRQ); assert_ptr_equal(rcq, cfgs[i]->rcq_size ? MOCK_RPMA_SRQ_RCQ : NULL); } } static const struct CMUnitTest tests_create_srq[] = { /* rpma_peer_create_srq() unit tests */ cmocka_unit_test_prestate_setup_teardown( create_srq__ibv_create_srq_ERRNO, setup__peer, teardown__peer, &prestate_Capable), cmocka_unit_test_prestate_setup_teardown( create_srq__rpma_cq_new_ERRNO, setup__peer, teardown__peer, &prestate_Capable), cmocka_unit_test_prestate_setup_teardown( create_srq__rpma_cq_new_ERRNO_subsequent_ERRNO2, setup__peer, teardown__peer, &prestate_Capable), cmocka_unit_test_prestate_setup_teardown( create_srq__success, setup__peer, teardown__peer, &prestate_Capable), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_create_srq, NULL, NULL); } rpma-1.3.0/tests/unit/peer/peer-mr_reg.c000066400000000000000000000244241443364775400201250ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2023, Intel Corporation */ /* Copyright (c) 2021-2023, Fujitsu Limited */ /* * peer-mr_reg.c -- a peer unit test * * API covered: * - rpma_peer_setup_mr_reg() */ #include #include "cmocka_headers.h" #include "conn_req.h" #include "mocks-ibverbs.h" #include "mocks-rpma-utils.h" #include "peer.h" #include "peer-common.h" #include "test-common.h" static struct prestate prestates[] = { /* prestates #0-1 are the same for non-iWARP and iWARP */ {IBV_TRANSPORT_IB, RPMA_MR_USAGE_READ_SRC, IBV_ACCESS_REMOTE_READ, MOCK_ODP_CAPABLE}, {IBV_TRANSPORT_IWARP, RPMA_MR_USAGE_READ_SRC, IBV_ACCESS_REMOTE_READ, MOCK_ODP_CAPABLE}, /* prestates #2-3 differ for non-iWARP and iWARP */ {IBV_TRANSPORT_IB, RPMA_MR_USAGE_READ_DST, IBV_ACCESS_LOCAL_WRITE, MOCK_ODP_CAPABLE}, {IBV_TRANSPORT_IWARP, RPMA_MR_USAGE_READ_DST, IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE, MOCK_ODP_CAPABLE}, /* prestates #4-5 are the same for non-iWARP and iWARP */ {IBV_TRANSPORT_IB, RPMA_MR_USAGE_WRITE_SRC, IBV_ACCESS_LOCAL_WRITE, MOCK_ODP_CAPABLE}, {IBV_TRANSPORT_IWARP, RPMA_MR_USAGE_WRITE_SRC, IBV_ACCESS_LOCAL_WRITE, MOCK_ODP_CAPABLE}, /* prestates #6-7 are the same for non-iWARP and iWARP */ {IBV_TRANSPORT_IB, RPMA_MR_USAGE_WRITE_DST, IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE, MOCK_ODP_CAPABLE}, {IBV_TRANSPORT_IWARP, RPMA_MR_USAGE_WRITE_DST, IBV_ACCESS_REMOTE_WRITE | IBV_ACCESS_LOCAL_WRITE, MOCK_ODP_CAPABLE}, /* prestates #8-9 are the same for non-iWARP and iWARP */ {IBV_TRANSPORT_IB, RPMA_MR_USAGE_RECV, IBV_ACCESS_LOCAL_WRITE, MOCK_ODP_CAPABLE}, {IBV_TRANSPORT_IWARP, RPMA_MR_USAGE_RECV, IBV_ACCESS_LOCAL_WRITE, MOCK_ODP_CAPABLE}, /* prestates #10-11 are the same for non-iWARP and iWARP */ {IBV_TRANSPORT_IB, (RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY | RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT), IBV_ACCESS_REMOTE_READ, MOCK_ODP_CAPABLE, 0, MOCK_FLUSH_INCAPABLE}, {IBV_TRANSPORT_IWARP, (RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY | RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT), IBV_ACCESS_REMOTE_READ, MOCK_ODP_CAPABLE, 0, MOCK_FLUSH_INCAPABLE}, #ifdef NATIVE_FLUSH_SUPPORTED /* prestates #12-13 are the same for non-iWARP and iWARP */ {IBV_TRANSPORT_IB, RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY, IBV_ACCESS_FLUSH_GLOBAL, MOCK_ODP_CAPABLE, 0, MOCK_FLUSH_CAPABLE}, {IBV_TRANSPORT_IWARP, RPMA_MR_USAGE_FLUSH_TYPE_VISIBILITY, IBV_ACCESS_FLUSH_GLOBAL, MOCK_ODP_CAPABLE, 0, MOCK_FLUSH_CAPABLE}, /* prestates #14-15 are the same for non-iWARP and iWARP */ {IBV_TRANSPORT_IB, RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT, IBV_ACCESS_FLUSH_PERSISTENT, MOCK_ODP_CAPABLE, 0, MOCK_FLUSH_CAPABLE}, {IBV_TRANSPORT_IWARP, RPMA_MR_USAGE_FLUSH_TYPE_PERSISTENT, IBV_ACCESS_FLUSH_PERSISTENT, MOCK_ODP_CAPABLE, 0, MOCK_FLUSH_CAPABLE}, #endif }; /* * mr_reg__reg_mr_ERRNO -- ibv_reg_mr() fails with MOCK_ERRNO */ static void mr_reg__reg_mr_ERRNO(void **pprestate) { struct prestate *prestate = *pprestate; prestate->access = MOCK_ACCESS; #ifdef NATIVE_FLUSH_SUPPORTED if (prestate->is_flush_capable) prestate->access |= (IBV_ACCESS_FLUSH_GLOBAL | IBV_ACCESS_FLUSH_PERSISTENT); #endif /* configure mocks */ expect_value(ibv_reg_mr, pd, MOCK_IBV_PD); expect_value(ibv_reg_mr, addr, MOCK_ADDR); expect_value(ibv_reg_mr, length, MOCK_LEN); expect_value(ibv_reg_mr, access, prestate->access); will_return(ibv_reg_mr, NULL); will_return(ibv_reg_mr, MOCK_ERRNO); /* run test */ struct ibv_mr *mr = NULL; int ret = rpma_peer_setup_mr_reg(prestate->peer, &mr, MOCK_ADDR, MOCK_LEN, MOCK_USAGE); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(mr); } /* * mr_reg__reg_mr_EOPNOTSUPP_no_odp -- ibv_reg_mr() fails with EOPNOTSUPP */ static void mr_reg__reg_mr_EOPNOTSUPP_no_odp(void **pprestate) { struct prestate *prestate = *pprestate; prestate->access = MOCK_ACCESS; #ifdef NATIVE_FLUSH_SUPPORTED if (prestate->is_flush_capable) prestate->access |= (IBV_ACCESS_FLUSH_GLOBAL | IBV_ACCESS_FLUSH_PERSISTENT); #endif /* configure mocks */ expect_value(ibv_reg_mr, pd, MOCK_IBV_PD); expect_value(ibv_reg_mr, addr, MOCK_ADDR); expect_value(ibv_reg_mr, length, MOCK_LEN); expect_value(ibv_reg_mr, access, prestate->access); will_return(ibv_reg_mr, NULL); will_return(ibv_reg_mr, EOPNOTSUPP); /* run test */ struct ibv_mr *mr = NULL; int ret = rpma_peer_setup_mr_reg(prestate->peer, &mr, MOCK_ADDR, MOCK_LEN, MOCK_USAGE); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(mr); } /* * mr_reg__reg_mr_EOPNOTSUPP_ERRNO -- the first ibv_reg_mr() fails with * EOPNOTSUPP whereas the second one fails with MOCK_ERRNO */ static void mr_reg__reg_mr_EOPNOTSUPP_ERRNO(void **pprestate) { struct prestate *prestate = *pprestate; prestate->access = MOCK_ACCESS; #ifdef NATIVE_FLUSH_SUPPORTED if (prestate->is_flush_capable) prestate->access |= (IBV_ACCESS_FLUSH_GLOBAL | IBV_ACCESS_FLUSH_PERSISTENT); #endif /* configure mocks */ expect_value(ibv_reg_mr, pd, MOCK_IBV_PD); expect_value(ibv_reg_mr, addr, MOCK_ADDR); expect_value(ibv_reg_mr, length, MOCK_LEN); expect_value(ibv_reg_mr, access, prestate->access); will_return(ibv_reg_mr, NULL); will_return(ibv_reg_mr, EOPNOTSUPP); #ifdef ON_DEMAND_PAGING_SUPPORTED prestate->access |= IBV_ACCESS_ON_DEMAND; expect_value(ibv_reg_mr, pd, MOCK_IBV_PD); expect_value(ibv_reg_mr, addr, MOCK_ADDR); expect_value(ibv_reg_mr, length, MOCK_LEN); expect_value(ibv_reg_mr, access, prestate->access); will_return(ibv_reg_mr, NULL); will_return(ibv_reg_mr, MOCK_ERRNO); #endif /* run test */ struct ibv_mr *mr = NULL; int ret = rpma_peer_setup_mr_reg(prestate->peer, &mr, MOCK_ADDR, MOCK_LEN, MOCK_USAGE); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(mr); } /* * mr_reg__success -- happy day scenario */ static void mr_reg__success(void **pprestate) { struct prestate *prestate = *pprestate; struct rpma_peer *peer = prestate->peer; struct ibv_pd *mock_ibv_pd = MOCK_IBV_PD; /* configure mocks */ mock_ibv_pd->context->device->transport_type = prestate->transport_type; expect_value(ibv_reg_mr, pd, mock_ibv_pd); expect_value(ibv_reg_mr, addr, MOCK_ADDR); expect_value(ibv_reg_mr, length, MOCK_LEN); expect_value(ibv_reg_mr, access, prestate->access); will_return(ibv_reg_mr, MOCK_MR); /* run test */ struct ibv_mr *mr; int ret = rpma_peer_setup_mr_reg(peer, &mr, MOCK_ADDR, MOCK_LEN, prestate->usage); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(mr, MOCK_MR); } /* * mr_reg__success_odp -- happy day scenario ODP style */ static void mr_reg__success_odp(void **pprestate) { struct prestate *prestate = *pprestate; prestate->access = MOCK_ACCESS; #ifdef NATIVE_FLUSH_SUPPORTED if (prestate->is_flush_capable) prestate->access |= (IBV_ACCESS_FLUSH_GLOBAL | IBV_ACCESS_FLUSH_PERSISTENT); #endif /* configure mocks */ expect_value(ibv_reg_mr, pd, MOCK_IBV_PD); expect_value(ibv_reg_mr, addr, MOCK_ADDR); expect_value(ibv_reg_mr, length, MOCK_LEN); expect_value(ibv_reg_mr, access, prestate->access); will_return(ibv_reg_mr, NULL); will_return(ibv_reg_mr, EOPNOTSUPP); #ifdef ON_DEMAND_PAGING_SUPPORTED prestate->access |= IBV_ACCESS_ON_DEMAND; expect_value(ibv_reg_mr, pd, MOCK_IBV_PD); expect_value(ibv_reg_mr, addr, MOCK_ADDR); expect_value(ibv_reg_mr, length, MOCK_LEN); expect_value(ibv_reg_mr, access, prestate->access); will_return(ibv_reg_mr, MOCK_MR); #endif /* run test */ struct ibv_mr *mr; int ret = rpma_peer_setup_mr_reg(prestate->peer, &mr, MOCK_ADDR, MOCK_LEN, MOCK_USAGE); /* verify the results */ #ifdef ON_DEMAND_PAGING_SUPPORTED assert_int_equal(ret, MOCK_OK); assert_ptr_equal(mr, MOCK_MR); #else assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(mr); #endif } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* rpma_peer_setup_mr_reg() unit tests */ { "mr_reg__reg_mr_ERRNO_no_odp", mr_reg__reg_mr_ERRNO, setup__peer, teardown__peer, &prestate_Incapable}, { "mr_reg__reg_mr_ERRNO_odp", mr_reg__reg_mr_ERRNO, setup__peer, teardown__peer, &prestate_Capable}, cmocka_unit_test_prestate_setup_teardown( mr_reg__reg_mr_EOPNOTSUPP_no_odp, setup__peer, teardown__peer, &prestate_Incapable), cmocka_unit_test_prestate_setup_teardown( mr_reg__reg_mr_EOPNOTSUPP_ERRNO, setup__peer, teardown__peer, &prestate_Capable), { "mr_reg__USAGE_READ_SRC_IB", mr_reg__success, setup__peer, teardown__peer, prestates + 0}, { "mr_reg__USAGE_READ_SRC_iWARP", mr_reg__success, setup__peer, teardown__peer, prestates + 1}, { "mr_reg__USAGE_READ_DST_IB", mr_reg__success, setup__peer, teardown__peer, prestates + 2}, { "mr_reg__USAGE_READ_DST_iWARP", mr_reg__success, setup__peer, teardown__peer, prestates + 3}, { "mr_reg__USAGE_WRITE_SRC_IB", mr_reg__success, setup__peer, teardown__peer, prestates + 4}, { "mr_reg__USAGE_WRITE_SRC_iWARP", mr_reg__success, setup__peer, teardown__peer, prestates + 5}, { "mr_reg__USAGE_WRITE_DST_IB", mr_reg__success, setup__peer, teardown__peer, prestates + 6}, { "mr_reg__USAGE_WRITE_DST_iWARP", mr_reg__success, setup__peer, teardown__peer, prestates + 7}, { "mr_reg__USAGE_RECV_IB", mr_reg__success, setup__peer, teardown__peer, prestates + 8}, { "mr_reg__USAGE_RECV_iWARP", mr_reg__success, setup__peer, teardown__peer, prestates + 9}, { "mr_reg__USAGE_FLUSH_TYPE_VISIBILITY_OR_PERSISTENT_IB", mr_reg__success, setup__peer, teardown__peer, prestates + 10}, { "mr_reg__USAGE_FLUSH_TYPE_VISIBILITY_OR_PERSISTENT_iWARP", mr_reg__success, setup__peer, teardown__peer, prestates + 11}, #ifdef NATIVE_FLUSH_SUPPORTED { "mr_reg__USAGE_NATIVE_FLUSH_TYPE_VISIBILITY_IB", mr_reg__success, setup__peer, teardown__peer, prestates + 12}, { "mr_reg__USAGE_NATIVE_FLUSH_TYPE_VISIBILITY_iWARP", mr_reg__success, setup__peer, teardown__peer, prestates + 13}, { "mr_reg__USAGE_NATIVE_FLUSH_TYPE_PERSISTENT_IB", mr_reg__success, setup__peer, teardown__peer, prestates + 14}, { "mr_reg__USAGE_NATIVE_FLUSH_TYPE_PERSISTENT_iWARP", mr_reg__success, setup__peer, teardown__peer, prestates + 15}, #endif cmocka_unit_test_prestate_setup_teardown( mr_reg__success_odp, setup__peer, teardown__peer, &prestate_Capable), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/peer/peer-new.c000066400000000000000000000251121443364775400174360ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright (c) 2021-2023, Fujitsu Limited */ /* * peer-new.c -- a peer unit test * * APIs covered: * - rpma_peer_new() * - rpma_peer_delete() */ #include #include "cmocka_headers.h" #include "conn_req.h" #include "mocks-ibverbs.h" #include "mocks-rpma-utils.h" #include "peer.h" #include "peer-common.h" #include "test-common.h" /* * new__ibv_ctx_eq_NULL -- NULL ibv_ctx is not valid */ static void new__ibv_ctx_eq_NULL(void **unused) { /* * NOTE: it is not allowed for peer to allocate any resource before * validating arguments. */ /* run test */ struct rpma_peer *peer = NULL; int ret = rpma_peer_new(NULL, &peer); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(peer); } /* * new__peer_ptr_eq_NULL -- NULL **peer is not valid */ static void new__peer_ptr_eq_NULL(void **unused) { /* * NOTE: it is not allowed for peer to allocate any resource before * validating arguments. */ /* run test */ int ret = rpma_peer_new(MOCK_VERBS, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__ibv_ctx_and_peer_ptr_eq_NULL -- NULL ibv_ctx and NULL **peer * are not valid */ static void new__ibv_ctx_and_peer_ptr_eq_NULL(void **unused) { /* * NOTE: it is not allowed for peer to allocate any resource before * validating arguments. */ /* run test */ int ret = rpma_peer_new(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__alloc_pd_ENOMEM -- ibv_alloc_pd() fails with ENOMEM */ static void new__alloc_pd_ENOMEM(void **unused) { /* * configure mocks: * - NOTE: it is not allowed to call ibv_dealloc_pd() if ibv_alloc_pd() * has failed. */ struct ibv_alloc_pd_mock_args alloc_args = {MOCK_VALIDATE, NULL}; will_return(ibv_alloc_pd, &alloc_args); expect_value(ibv_alloc_pd, ibv_ctx, MOCK_VERBS); will_return(ibv_alloc_pd, ENOMEM); will_return_maybe(rpma_utils_ibv_context_is_atomic_write_capable, 1); will_return_maybe(rpma_utils_ibv_context_is_flush_capable, 1); will_return_maybe(rpma_utils_ibv_context_is_odp_capable, 1); will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_peer *peer = NULL; int ret = rpma_peer_new(MOCK_VERBS, &peer); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(peer); } /* * new__alloc_pd_ERRNO -- ibv_alloc_pd() fails with MOCK_ERRNO */ static void new__alloc_pd_ERRNO(void **unused) { /* * configure mocks: * - NOTE: it is not allowed to call ibv_dealloc_pd() if ibv_alloc_pd() * has failed. */ struct ibv_alloc_pd_mock_args alloc_args = {MOCK_VALIDATE, NULL}; will_return(ibv_alloc_pd, &alloc_args); expect_value(ibv_alloc_pd, ibv_ctx, MOCK_VERBS); will_return(ibv_alloc_pd, MOCK_ERRNO); will_return_maybe(rpma_utils_ibv_context_is_atomic_write_capable, 1); will_return_maybe(rpma_utils_ibv_context_is_flush_capable, 1); will_return_maybe(rpma_utils_ibv_context_is_odp_capable, 1); will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_peer *peer = NULL; int ret = rpma_peer_new(MOCK_VERBS, &peer); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(peer); } /* * new__alloc_pd_no_error -- ibv_alloc_pd() fails without error */ static void new__alloc_pd_no_error(void **unused) { /* * configure mocks: * - NOTE: it is not allowed to call ibv_dealloc_pd() if ibv_alloc_pd() * has failed. */ struct ibv_alloc_pd_mock_args alloc_args = {MOCK_VALIDATE, NULL}; will_return(ibv_alloc_pd, &alloc_args); expect_value(ibv_alloc_pd, ibv_ctx, MOCK_VERBS); will_return(ibv_alloc_pd, MOCK_OK); will_return_maybe(rpma_utils_ibv_context_is_atomic_write_capable, 1); will_return_maybe(rpma_utils_ibv_context_is_flush_capable, 1); will_return_maybe(rpma_utils_ibv_context_is_odp_capable, 1); will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ struct rpma_peer *peer = NULL; int ret = rpma_peer_new(MOCK_VERBS, &peer); /* verify the results */ assert_int_equal(ret, RPMA_E_UNKNOWN); assert_null(peer); } /* * new__atomic_write_ERRNO -- rpma_utils_ibv_context_is_atomic_write_capable() * fails with MOCK_ERRNO */ static void new__atomic_write_ERRNO(void **unused) { /* configure mocks */ will_return(rpma_utils_ibv_context_is_atomic_write_capable, MOCK_ERR_PENDING); will_return(rpma_utils_ibv_context_is_atomic_write_capable, RPMA_E_PROVIDER); will_return(rpma_utils_ibv_context_is_atomic_write_capable, MOCK_ERRNO); will_return_maybe(__wrap__test_malloc, MOCK_OK); will_return_maybe(ibv_alloc_pd, MOCK_IBV_PD); will_return_maybe(ibv_dealloc_pd, MOCK_OK); /* run test */ struct rpma_peer *peer = NULL; int ret = rpma_peer_new(MOCK_VERBS, &peer); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(peer); } /* * new__flush_ERRNO -- rpma_utils_ibv_context_is_flush_capable() fails with MOCK_ERRNO */ static void new__flush_ERRNO(void **unused) { /* configure mocks */ will_return_maybe(rpma_utils_ibv_context_is_atomic_write_capable, 1); will_return(rpma_utils_ibv_context_is_flush_capable, MOCK_ERR_PENDING); will_return(rpma_utils_ibv_context_is_flush_capable, RPMA_E_PROVIDER); will_return(rpma_utils_ibv_context_is_flush_capable, MOCK_ERRNO); will_return_maybe(__wrap__test_malloc, MOCK_OK); will_return_maybe(ibv_alloc_pd, MOCK_IBV_PD); will_return_maybe(ibv_dealloc_pd, MOCK_OK); /* run test */ struct rpma_peer *peer = NULL; int ret = rpma_peer_new(MOCK_VERBS, &peer); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(peer); } /* * new__odp_ERRNO -- rpma_utils_ibv_context_is_odp_capable() * fails with MOCK_ERRNO */ static void new__odp_ERRNO(void **unused) { /* configure mocks */ will_return_maybe(rpma_utils_ibv_context_is_atomic_write_capable, 1); will_return_maybe(rpma_utils_ibv_context_is_flush_capable, 1); will_return(rpma_utils_ibv_context_is_odp_capable, MOCK_ERR_PENDING); will_return(rpma_utils_ibv_context_is_odp_capable, RPMA_E_PROVIDER); will_return(rpma_utils_ibv_context_is_odp_capable, MOCK_ERRNO); will_return_maybe(__wrap__test_malloc, MOCK_OK); will_return_maybe(ibv_alloc_pd, MOCK_IBV_PD); will_return_maybe(ibv_dealloc_pd, MOCK_OK); /* run test */ struct rpma_peer *peer = NULL; int ret = rpma_peer_new(MOCK_VERBS, &peer); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(peer); } /* * new__malloc_ERRNO-- malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **unused) { /* configure mocks */ will_return(__wrap__test_malloc, MOCK_ERRNO); struct ibv_alloc_pd_mock_args alloc_args = {MOCK_PASSTHROUGH, MOCK_IBV_PD}; will_return_maybe(ibv_alloc_pd, &alloc_args); struct ibv_dealloc_pd_mock_args dealloc_args = {MOCK_PASSTHROUGH, MOCK_OK}; will_return_maybe(ibv_dealloc_pd, &dealloc_args); will_return_maybe(rpma_utils_ibv_context_is_atomic_write_capable, 1); will_return_maybe(rpma_utils_ibv_context_is_flush_capable, 1); will_return_maybe(rpma_utils_ibv_context_is_odp_capable, 1); /* run test */ struct rpma_peer *peer = NULL; int ret = rpma_peer_new(MOCK_VERBS, &peer); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(peer); } /* * new__success -- happy day scenario */ static void new__success(void **unused) { /* * configure mocks for rpma_peer_new(): * NOTE: it is not allowed to call ibv_dealloc_pd() if ibv_alloc_pd() * succeeded. */ struct ibv_alloc_pd_mock_args alloc_args = {MOCK_VALIDATE, MOCK_IBV_PD}; will_return(ibv_alloc_pd, &alloc_args); expect_value(ibv_alloc_pd, ibv_ctx, MOCK_VERBS); will_return_maybe(rpma_utils_ibv_context_is_atomic_write_capable, 0); will_return_maybe(rpma_utils_ibv_context_is_flush_capable, 0); will_return(rpma_utils_ibv_context_is_odp_capable, 1); will_return(__wrap__test_malloc, MOCK_OK); /* run test - step 1 */ struct rpma_peer *peer = NULL; int ret = rpma_peer_new(MOCK_VERBS, &peer); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(peer); /* * configure mocks for rpma_peer_delete(): * NOTE: it is not allowed to call ibv_alloc_pd() nor malloc() in * rpma_peer_delete(). */ struct ibv_dealloc_pd_mock_args dealloc_args = {MOCK_VALIDATE, MOCK_OK}; will_return(ibv_dealloc_pd, &dealloc_args); expect_value(ibv_dealloc_pd, pd, MOCK_IBV_PD); /* run test - step 2 */ ret = rpma_peer_delete(&peer); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(peer); } /* * delete__invalid_peer_ptr -- NULL **peer is not valid */ static void delete__invalid_peer_ptr(void **unused) { /* * NOTE: it is not allowed for peer to allocate any resource before * validating arguments. */ /* run test */ int ret = rpma_peer_delete(NULL); /* verify the result */ assert_int_equal(ret, RPMA_E_INVAL); } /* * delete__null_peer -- valid NULL *peer */ static void delete__null_peer(void **unused) { /* * NOTE: it is not allowed for peer to allocate any resource when * quick-exiting. */ /* run test */ struct rpma_peer *peer = NULL; int ret = rpma_peer_delete(&peer); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_null(peer); } /* * delete__dealloc_pd_ERRNO -- ibv_dealloc_pd() fails with MOCK_ERRNO */ static void delete__dealloc_pd_ERRNO(void **unused) { struct prestate *prestate = &prestate_Capable; assert_int_equal(setup__peer((void **)&prestate), 0); assert_non_null(prestate->peer); /* * configure mocks for rpma_peer_delete(): * NOTE: it is not allowed to call ibv_alloc_pd() nor malloc() in * rpma_peer_delete(). */ struct ibv_dealloc_pd_mock_args dealloc_args = {MOCK_VALIDATE, MOCK_ERRNO}; will_return(ibv_dealloc_pd, &dealloc_args); expect_value(ibv_dealloc_pd, pd, MOCK_IBV_PD); /* run test */ int ret = rpma_peer_delete(&prestate->peer); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(prestate->peer); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* rpma_peer_new() unit tests */ cmocka_unit_test(new__ibv_ctx_eq_NULL), cmocka_unit_test(new__peer_ptr_eq_NULL), cmocka_unit_test(new__ibv_ctx_and_peer_ptr_eq_NULL), cmocka_unit_test(new__alloc_pd_ENOMEM), cmocka_unit_test(new__alloc_pd_ERRNO), cmocka_unit_test(new__alloc_pd_no_error), cmocka_unit_test(new__odp_ERRNO), cmocka_unit_test(new__atomic_write_ERRNO), cmocka_unit_test(new__flush_ERRNO), cmocka_unit_test(new__malloc_ERRNO), cmocka_unit_test(new__success), /* rpma_peer_delete() unit tests */ cmocka_unit_test(delete__invalid_peer_ptr), cmocka_unit_test(delete__null_peer), cmocka_unit_test(delete__dealloc_pd_ERRNO) }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/peer_cfg/000077500000000000000000000000001443364775400163665ustar00rootroot00000000000000rpma-1.3.0/tests/unit/peer_cfg/CMakeLists.txt000066400000000000000000000013451443364775400211310ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) function(add_test_peer_cfg name) set(src_name peer_cfg-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c peer_cfg-common.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${LIBRPMA_SOURCE_DIR}/peer_cfg.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_peer_cfg(delete) add_test_peer_cfg(descriptor) add_test_peer_cfg(direct_write_to_pmem) add_test_peer_cfg(new) rpma-1.3.0/tests/unit/peer_cfg/peer_cfg-common.c000066400000000000000000000016631443364775400216000ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * peer_cfg-common.c -- the peer_cfg unit tests common functions */ #include "peer_cfg-common.h" /* * setup__peer_cfg -- prepare a new rpma_peer_cfg */ int setup__peer_cfg(void **cstate_ptr) { static struct peer_cfg_test_state cstate = {0}; /* configure mocks */ will_return(__wrap__test_malloc, MOCK_OK); /* prepare an object */ int ret = rpma_peer_cfg_new(&cstate.cfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(cstate.cfg); *cstate_ptr = &cstate; return 0; } /* * teardown__peer_cfg -- delete the rpma_peer_cfg */ int teardown__peer_cfg(void **cstate_ptr) { struct peer_cfg_test_state *cstate = *cstate_ptr; /* prepare an object */ int ret = rpma_peer_cfg_delete(&cstate->cfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->cfg); *cstate_ptr = NULL; return 0; } rpma-1.3.0/tests/unit/peer_cfg/peer_cfg-common.h000066400000000000000000000013601443364775400215770ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* * peer_cfg-common.h -- the peer_cfg unit tests common definitions */ #ifndef PEER_CFG_COMMON #define PEER_CFG_COMMON #include "cmocka_headers.h" #include "test-common.h" #include "conn_req.h" #define MOCK_PEER_PCFG_PTR ((struct rpma_peer_cfg **)0xA1D1) #define MOCK_DESC ((void *)0xA1D3) #define MOCK_DESC_SIZE ((size_t)1) #define MOCK_WRONG_DESC_SIZE ((size_t)0) #define MOCK_SUPPORTED false /* * All the resources used between setup__peer_cfg_new and teardown__peer_cfg_new */ struct peer_cfg_test_state { struct rpma_peer_cfg *cfg; }; int setup__peer_cfg(void **cstate_ptr); int teardown__peer_cfg(void **cstate_ptr); #endif /* PEER_CFG_COMMON */ rpma-1.3.0/tests/unit/peer_cfg/peer_cfg-delete.c000066400000000000000000000017421443364775400215500ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * peer_cfg-delete.c -- the rpma_peer_cfg_delete() unit tests * * APIs covered: * - rpma_peer_cfg_delete() */ #include "peer_cfg-common.h" /* * delete__pcfg_NULL -- NULL pcfg is invalid */ static void delete__pcfg_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_delete(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * delete__cfg_NULL -- NULL cfg is valid - quick exit */ static void delete__cfg_NULL(void **unused) { /* run test */ struct rpma_peer_cfg *cfg = NULL; int ret = rpma_peer_cfg_delete(&cfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cfg); } static const struct CMUnitTest test_delete[] = { /* rpma_peer_cfg_delete() unit tests */ cmocka_unit_test(delete__pcfg_NULL), cmocka_unit_test(delete__cfg_NULL), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_delete, NULL, NULL); } rpma-1.3.0/tests/unit/peer_cfg/peer_cfg-descriptor.c000066400000000000000000000155271443364775400224720ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * peer_cfg-direct_write_to_pmem.c -- the peer_cfg descriptor unit tests * * APIs covered: * - rpma_peer_cfg_get_descriptor_size() * - rpma_peer_cfg_get_descriptor() * - rpma_peer_cfg_from_descriptor() */ #include "peer_cfg-common.h" #include "test-common.h" /* * get_desc_size__pcfg_NULL -- NULL pcfg is invalid */ static void get_desc_size__pcfg_NULL(void **unused) { /* run test */ size_t desc_size; int ret = rpma_peer_cfg_get_descriptor_size(NULL, &desc_size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_desc_size__desc_size_NULL -- NULL desc_size is invalid */ static void get_desc_size__desc_size_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_get_descriptor_size(MOCK_PEER_PCFG, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_desc_size__pcfg_desc_size_NULL -- NULL pcfg and desc_size are invalid */ static void get_desc_size__pcfg_desc_size_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_get_descriptor_size(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_desc_size__success -- happy day scenario */ static void get_desc_size__success(void **unused) { /* run test */ size_t desc_size; int ret = rpma_peer_cfg_get_descriptor_size(MOCK_PEER_PCFG, &desc_size); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(desc_size, MOCK_DESC_SIZE); } /* * get_desc__pcfg_NULL -- NULL pcfg is invalid */ static void get_desc__pcfg_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_get_descriptor(NULL, MOCK_DESC); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_desc__desc_NULL -- NULL desc is invalid */ static void get_desc__desc_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_get_descriptor(MOCK_PEER_PCFG, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_desc__pcfg_desc_NULL -- NULL pcfg and desc are invalid */ static void get_desc__pcfg_desc_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_get_descriptor(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * from_desc__desc_NULL -- NULL desc is invalid */ static void from_desc__desc_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_from_descriptor(NULL, MOCK_DESC_SIZE, MOCK_PEER_PCFG_PTR); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * from_desc__pcfg_ptr_NULL -- NULL pcfg_ptr is invalid */ static void from_desc__pcfg_ptr_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_from_descriptor(MOCK_DESC, MOCK_DESC_SIZE, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * from_desc__pcfg_ptr_desc_NULL -- NULL pcfg_ptr and desc are invalid */ static void from_desc__pcfg_ptr_desc_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_from_descriptor(NULL, MOCK_DESC_SIZE, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * from_desc__incorrect_desc_size -- incorrect size of the descriptor */ static void from_desc__incorrect_desc_size(void **unused) { /* configure mocks */ will_return_maybe(__wrap__test_malloc, MOCK_ERRNO); /* run test of rpma_peer_cfg_from_descriptor() */ struct rpma_peer_cfg *pcfg = NULL; int ret = rpma_peer_cfg_from_descriptor(MOCK_DESC, MOCK_WRONG_DESC_SIZE, &pcfg); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(pcfg); } /* * from_desc__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void from_desc__malloc_ERRNO(void **unused) { /* configure mocks */ will_return(__wrap__test_malloc, MOCK_ERRNO); /* run test of rpma_peer_cfg_from_descriptor() */ struct rpma_peer_cfg *pcfg = NULL; int ret = rpma_peer_cfg_from_descriptor(MOCK_DESC, MOCK_DESC_SIZE, &pcfg); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(pcfg); } /* * from_desc__success -- happy day scenario */ static void from_desc__success(void **unused) { /* verify test conditions */ size_t desc_size; (void) rpma_peer_cfg_get_descriptor_size(MOCK_PEER_PCFG, &desc_size); assert_int_equal(desc_size, MOCK_DESC_SIZE); for (uint8_t supp = 0; supp < 2; supp++) { /* configure mocks */ will_return(__wrap__test_malloc, MOCK_OK); /* run test of rpma_peer_cfg_from_descriptor() */ uint8_t desc[MOCK_DESC_SIZE]; desc[0] = supp; struct rpma_peer_cfg *pcfg; int ret = rpma_peer_cfg_from_descriptor(desc, MOCK_DESC_SIZE, &pcfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); bool supported; ret = rpma_peer_cfg_get_direct_write_to_pmem(pcfg, &supported); assert_int_equal(ret, MOCK_OK); assert_int_equal(supported, supp); ret = rpma_peer_cfg_delete(&pcfg); assert_int_equal(ret, MOCK_OK); } } /* * get_desc__lifecycle -- happy day scenario */ static void get_desc__lifecycle(void **cstate_ptr) { struct peer_cfg_test_state *cstate = *cstate_ptr; /* verify test conditions */ size_t desc_size; (void) rpma_peer_cfg_get_descriptor_size(MOCK_PEER_PCFG, &desc_size); assert_int_equal(desc_size, MOCK_DESC_SIZE); /* run test of rpma_peer_cfg_get_descriptor() */ uint8_t desc[MOCK_DESC_SIZE]; int ret = rpma_peer_cfg_get_descriptor(cstate->cfg, desc); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(desc[0], (uint8_t)false); /* run test of rpma_peer_cfg_set_direct_write_to_pmem() */ ret = rpma_peer_cfg_set_direct_write_to_pmem(cstate->cfg, true); /* verify the results */ assert_int_equal(ret, MOCK_OK); /* run test of rpma_peer_cfg_get_descriptor() */ ret = rpma_peer_cfg_get_descriptor(cstate->cfg, &desc); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(desc[0], (uint8_t)true); } static const struct CMUnitTest test_direct_write_to_pmem[] = { /* rpma_peer_cfg_get_descriptor_size() unit tests */ cmocka_unit_test(get_desc_size__pcfg_NULL), cmocka_unit_test(get_desc_size__desc_size_NULL), cmocka_unit_test(get_desc_size__pcfg_desc_size_NULL), cmocka_unit_test(get_desc_size__success), /* rpma_peer_cfg_get_descriptor() unit tests */ cmocka_unit_test(get_desc__pcfg_NULL), cmocka_unit_test(get_desc__desc_NULL), cmocka_unit_test(get_desc__pcfg_desc_NULL), /* rpma_peer_cfg_from_descriptor() unit tests */ cmocka_unit_test(from_desc__desc_NULL), cmocka_unit_test(from_desc__pcfg_ptr_NULL), cmocka_unit_test(from_desc__pcfg_ptr_desc_NULL), cmocka_unit_test(from_desc__incorrect_desc_size), cmocka_unit_test(from_desc__malloc_ERRNO), cmocka_unit_test(from_desc__success), /* rpma_peer_cfg_get_descriptor() lifecycle */ cmocka_unit_test_setup_teardown(get_desc__lifecycle, setup__peer_cfg, teardown__peer_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_direct_write_to_pmem, NULL, NULL); } rpma-1.3.0/tests/unit/peer_cfg/peer_cfg-direct_write_to_pmem.c000066400000000000000000000060161443364775400245110ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * peer_cfg-direct_write_to_pmem.c -- * the rpma_peer_cfg_set/get_direct_write_to_pmem() unit tests * * APIs covered: * - rpma_peer_cfg_set_direct_write_to_pmem() * - rpma_peer_cfg_get_direct_write_to_pmem() */ #include "peer_cfg-common.h" #include "test-common.h" /* * set_dw2p__pcfg_NULL -- NULL pcfg is invalid */ static void set_dw2p__pcfg_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_set_direct_write_to_pmem(NULL, MOCK_SUPPORTED); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_dw2p__pcfg_NULL -- NULL pcfg is invalid */ static void get_dw2p__pcfg_NULL(void **unused) { /* run test */ bool supported; int ret = rpma_peer_cfg_get_direct_write_to_pmem(NULL, &supported); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_dw2p__supported_NULL -- NULL supported is invalid */ static void get_dw2p__supported_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_get_direct_write_to_pmem(MOCK_PEER_PCFG, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_dw2p__pcfg_supported_NULL -- NULL pcfg and supported are invalid */ static void get_dw2p__pcfg_supported_NULL(void **unused) { /* run test */ int ret = rpma_peer_cfg_get_direct_write_to_pmem(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * direct_write_to_pmem__lifecycle -- happy day scenario */ static void direct_write_to_pmem__lifecycle(void **cstate_ptr) { struct peer_cfg_test_state *cstate = *cstate_ptr; /* run test of rpma_peer_cfg_get_direct_write_to_pmem() */ bool supported; int ret = rpma_peer_cfg_get_direct_write_to_pmem(cstate->cfg, &supported); /* verify the results */ assert_int_equal(ret, MOCK_OK); /* 'false' is the default value */ assert_int_equal(supported, false); /* first 'true', then 'false' */ for (int supp = 1; supp >= 0; supp--) { /* run test of rpma_peer_cfg_set_direct_write_to_pmem() */ ret = rpma_peer_cfg_set_direct_write_to_pmem(cstate->cfg, (bool)supp); /* verify the results */ assert_int_equal(ret, MOCK_OK); /* run test of rpma_peer_cfg_get_direct_write_to_pmem() */ ret = rpma_peer_cfg_get_direct_write_to_pmem(cstate->cfg, &supported); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(supported, (bool)supp); } } static const struct CMUnitTest test_direct_write_to_pmem[] = { /* rpma_peer_cfg_set_direct_write_to_pmem() unit tests */ cmocka_unit_test(set_dw2p__pcfg_NULL), /* rpma_peer_cfg_get_direct_write_to_pmem() unit tests */ cmocka_unit_test(get_dw2p__pcfg_NULL), cmocka_unit_test(get_dw2p__supported_NULL), cmocka_unit_test(get_dw2p__pcfg_supported_NULL), /* rpma_peer_cfg_set/get_direct_write_to_pmem() lifecycle */ cmocka_unit_test_setup_teardown(direct_write_to_pmem__lifecycle, setup__peer_cfg, teardown__peer_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_direct_write_to_pmem, NULL, NULL); } rpma-1.3.0/tests/unit/peer_cfg/peer_cfg-new.c000066400000000000000000000025511443364775400210760ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * peer_cfg-new.c -- the rpma_peer_cfg_new() unit tests * * API covered: * - rpma_peer_cfg_new() */ #include "peer_cfg-common.h" /* * new__pcfg_NULL -- NULL pcfg is invalid */ static void new__pcfg_NULL(void **unused) { /* configure mocks */ will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ int ret = rpma_peer_cfg_new(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **unused) { /* configure mocks */ will_return(__wrap__test_malloc, MOCK_ERRNO); /* run test */ struct rpma_peer_cfg *cfg = NULL; int ret = rpma_peer_cfg_new(&cfg); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(cfg); } /* * new__success -- all is OK */ static void new__success(void **cstate_ptr) { /* all things are done in setup__peer_cfg and teardown__peer_cfg */ } static const struct CMUnitTest test_new[] = { /* rpma_peer_cfg_new() unit tests */ cmocka_unit_test(new__pcfg_NULL), cmocka_unit_test(new__malloc_ERRNO), cmocka_unit_test_setup_teardown(new__success, setup__peer_cfg, teardown__peer_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_new, NULL, NULL); } rpma-1.3.0/tests/unit/private_data/000077500000000000000000000000001443364775400172575ustar00rootroot00000000000000rpma-1.3.0/tests/unit/private_data/CMakeLists.txt000066400000000000000000000011611443364775400220160ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # include(../../cmake/ctest_helpers.cmake) function (add_test_private_data name) set(src_name private_data-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c private_data-common.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${LIBRPMA_SOURCE_DIR}/private_data.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_private_data(store) rpma-1.3.0/tests/unit/private_data/private_data-common.c000066400000000000000000000031541443364775400233570ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * private_data-common.c -- the private_data unit tests common functions */ #include #include #include "cmocka_headers.h" #include "private_data.h" #include "private_data-common.h" #include "librpma.h" /* * setup__cm_event -- setup test environment with a valid cm_event */ int setup__cm_event(void **edata_ptr) { static struct rdma_cm_event edata = {0}; static char buff[] = DEFAULT_VALUE; edata.event = CM_EVENT_VALID; edata.param.conn.private_data = buff; edata.param.conn.private_data_len = DEFAULT_LEN; *edata_ptr = &edata; return 0; } /* * setup__private_data -- prepare a valid private data object */ int setup__private_data(void **pdata_ptr) { struct rdma_cm_event edata = {0}; char buff[] = DEFAULT_VALUE; edata.event = CM_EVENT_VALID; edata.param.conn.private_data = buff; edata.param.conn.private_data_len = DEFAULT_LEN; /* configure mocks */ will_return(__wrap__test_malloc, MOCK_OK); static struct rpma_conn_private_data pdata = {0}; int ret = rpma_private_data_store(&edata, &pdata); assert_int_equal(ret, MOCK_OK); assert_non_null(pdata.ptr); assert_string_equal(pdata.ptr, DEFAULT_VALUE); assert_int_equal(pdata.len, DEFAULT_LEN); *pdata_ptr = &pdata; return 0; } /* * teardown__private_data -- delete the private data object */ int teardown__private_data(void **pdata_ptr) { struct rpma_conn_private_data *pdata = *pdata_ptr; rpma_private_data_delete(pdata); assert_null(pdata->ptr); assert_int_equal(pdata->len, 0); return 0; } rpma-1.3.0/tests/unit/private_data/private_data-common.h000066400000000000000000000015551443364775400233670ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * private_data-common.h -- the private_data unit tests common functions */ #ifndef PRIVATE_DATA_COMMON #define PRIVATE_DATA_COMMON #include #include #include #include "cmocka_headers.h" #include "private_data.h" #include "librpma.h" #include "test-common.h" /* * Both RDMA_CM_EVENT_CONNECT_REQUEST and RDMA_CM_EVENT_ESTABLISHED are valid. * After the event type check, the behavior should not differ. */ #define CM_EVENT_VALID RDMA_CM_EVENT_CONNECT_REQUEST #define DEFAULT_VALUE "The default one" #define DEFAULT_LEN (strlen(DEFAULT_VALUE) + 1) int setup__cm_event(void **edata_ptr); int setup__private_data(void **pdata_ptr); int teardown__private_data(void **pdata_ptr); #endif /* PRIVATE_DATA_COMMON */ rpma-1.3.0/tests/unit/private_data/private_data-store.c000066400000000000000000000061601443364775400232230ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright 2021, Fujitsu */ /* * private_data-store.c -- the rpma_private_data_store() unit tests * * API covered: * -rpma_private_data_store() */ #include #include #include "cmocka_headers.h" #include "private_data.h" #include "private_data-common.h" #include "librpma.h" /* * store__data_NULL -- data == NULL should prevent storing a private data */ static void store__data_NULL(void **edata_ptr) { struct rdma_cm_event *edata = *edata_ptr; edata->param.conn.private_data = NULL; /* run test */ struct rpma_conn_private_data pdata = {0}; int ret = rpma_private_data_store(*edata_ptr, &pdata); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(pdata.ptr, NULL); assert_int_equal(pdata.len, 0); } /* * store__data_len_0 -- data_len == 0 should prevent storing a private data * object */ static void store__data_len_0(void **edata_ptr) { struct rdma_cm_event *edata = *edata_ptr; edata->param.conn.private_data_len = 0; /* run test */ struct rpma_conn_private_data pdata = {0}; int ret = rpma_private_data_store(*edata_ptr, &pdata); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(pdata.ptr, NULL); assert_int_equal(pdata.len, 0); } /* * store__data_NULL_data_len_0 -- data == NULL && data_len == 0 should * prevent storing a private data */ static void store__data_NULL_data_len_0(void **edata_ptr) { struct rdma_cm_event *edata = *edata_ptr; edata->param.conn.private_data = NULL; edata->param.conn.private_data_len = 0; /* run test */ struct rpma_conn_private_data pdata = {0}; int ret = rpma_private_data_store(*edata_ptr, &pdata); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_ptr_equal(pdata.ptr, NULL); assert_int_equal(pdata.len, 0); } /* * store__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void store__malloc_ERRNO(void **edata_ptr) { /* configure mocks */ will_return(__wrap__test_malloc, MOCK_ERRNO); /* run test */ struct rpma_conn_private_data pdata = {0}; int ret = rpma_private_data_store(*edata_ptr, &pdata); /* verify the result */ assert_int_equal(ret, RPMA_E_NOMEM); assert_ptr_equal(pdata.ptr, NULL); assert_int_equal(pdata.len, 0); } /* * test_lifecycle - happy day scenario */ static void test_lifecycle(void **unused) { /* * the thing is done by setup__private_data() and * teardown__private_data() */ } static const struct CMUnitTest test_store[] = { /* rpma_private_data_store() unit tests */ cmocka_unit_test_setup_teardown(store__data_NULL, setup__cm_event, NULL), cmocka_unit_test_setup_teardown(store__data_len_0, setup__cm_event, NULL), cmocka_unit_test_setup_teardown(store__data_NULL_data_len_0, setup__cm_event, NULL), cmocka_unit_test_setup_teardown(store__malloc_ERRNO, setup__cm_event, NULL), /* rpma_private_data_store()/_discard() lifecycle */ cmocka_unit_test_setup_teardown(test_lifecycle, setup__private_data, teardown__private_data), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_store, NULL, NULL); } rpma-1.3.0/tests/unit/srq/000077500000000000000000000000001443364775400154215ustar00rootroot00000000000000rpma-1.3.0/tests/unit/srq/CMakeLists.txt000066400000000000000000000016001443364775400201560ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Fujitsu # include(../../cmake/ctest_helpers.cmake) function(add_test_srq name) set(src_name srq-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c srq-common.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${TEST_UNIT_COMMON_DIR}/mocks-ibverbs.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-cq.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-mr.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-peer.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-srq_cfg.c ${LIBRPMA_SOURCE_DIR}/srq.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_srq(get_ibv_srq) add_test_srq(get_rcq) add_test_srq(new_delete) add_test_srq(recv) rpma-1.3.0/tests/unit/srq/srq-common.c000066400000000000000000000030311443364775400176550ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq-common.c -- the rpma_srq unit tests common functions */ #include "srq-common.h" struct srq_test_state Srq_new_srq_cfg_default = { .get_args.cfg = MOCK_SRQ_CFG_DEFAULT, .get_args.rq_size = MOCK_SRQ_SIZE_DEFAULT, .get_args.rcq_size = MOCK_SRQ_RCQ_SIZE_DEFAULT, }; struct srq_test_state Srq_new_srq_cfg_custom = { .get_args.cfg = MOCK_SRQ_CFG_CUSTOM, .get_args.rq_size = MOCK_SRQ_SIZE_CUSTOM, .get_args.rcq_size = MOCK_SRQ_RCQ_SIZE_CUSTOM, }; /* * setup__srq_new -- prepare a valid srq object */ int setup__srq_new(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; /* configure mocks */ will_return(rpma_peer_create_srq, &cstate->get_args); will_return(rpma_peer_create_srq, MOCK_OK); will_return(__wrap__test_malloc, MOCK_OK); /* run test */ int ret = rpma_srq_new(MOCK_PEER, MOCK_GET_SRQ_CFG(cstate), &cstate->srq); /* verify the result */ assert_int_equal(ret, MOCK_OK); *cstate_ptr = cstate; return 0; } /* * teardown__srq_delete -- destroy the srq object */ int teardown__srq_delete(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_SRQ_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(ibv_destroy_srq, srq, MOCK_IBV_SRQ); will_return(ibv_destroy_srq, MOCK_OK); /* run test */ int ret = rpma_srq_delete(&cstate->srq); /* verify the result */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->srq); return 0; } rpma-1.3.0/tests/unit/srq/srq-common.h000066400000000000000000000032051443364775400176650ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Fujitsu */ /* * srq-common.h -- the rpma_srq unit tests common definitions */ #ifndef SRQ_COMMON_H #define SRQ_COMMON_H #include #include #include "cmocka_headers.h" #include "test-common.h" #include "mocks-ibverbs.h" #include "mocks-rpma-srq_cfg.h" #include "mocks-rpma-srq.h" #include "mocks-rpma-cq.h" #include "srq.h" #define MOCK_GET_SRQ_CFG(cstate) \ ((cstate)->get_args.cfg == MOCK_SRQ_CFG_DEFAULT ? NULL : (cstate)->get_args.cfg) #define MOCK_GET_SRQ_RCQ(cstate) ((cstate)->get_args.rcq_size ? MOCK_RPMA_SRQ_RCQ : NULL) #define SRQ_NEW_TEST_SETUP_TEARDOWN_WITHOUT_CFG(test_func, setup_func, teardown_func) \ {#test_func "__without_cfg", (test_func), (setup_func), (teardown_func), \ &Srq_new_srq_cfg_default} #define SRQ_NEW_TEST_WITHOUT_CFG(test_func) \ SRQ_NEW_TEST_SETUP_TEARDOWN_WITHOUT_CFG(test_func, NULL, NULL) #define SRQ_NEW_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_CFG(test_func, setup_func, teardown_func) \ {#test_func "__without_cfg", (test_func), (setup_func), (teardown_func), \ &Srq_new_srq_cfg_default}, \ {#test_func "__with_cfg", (test_func), (setup_func), (teardown_func), \ &Srq_new_srq_cfg_custom} #define SRQ_NEW_TEST_WITH_AND_WITHOUT_CFG(test_func) \ SRQ_NEW_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_CFG(test_func, NULL, NULL) struct srq_test_state { struct srq_cfg_get_mock_args get_args; struct rpma_srq *srq; }; extern struct srq_test_state Srq_new_srq_cfg_default; extern struct srq_test_state Srq_new_srq_cfg_custom; int setup__srq_new(void **srq_ptr); int teardown__srq_delete(void **srq_ptr); #endif /* SRQ_COMMON_H */ rpma-1.3.0/tests/unit/srq/srq-get_ibv_srq.c000066400000000000000000000015551443364775400207020ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq-get_ibv_srq.c -- the rpma_srq_get_ibv_srq() unit tests * * APIs covered: * - rpma_srq_get_ibv_srq() */ #include "srq-common.h" /* * get_ibv_srq__success -- happy day scenario */ static void get_ibv_srq__success(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; /* run test */ struct ibv_srq *ibv_srq = rpma_srq_get_ibv_srq(cstate->srq); /* verify the results */ assert_ptr_equal(ibv_srq, MOCK_IBV_SRQ); } static const struct CMUnitTest tests_get_ibv_srq[] = { /* rpma_srq_get_ibv_srq() unit tests */ cmocka_unit_test_prestate_setup_teardown( get_ibv_srq__success, setup__srq_new, teardown__srq_delete, &Srq_new_srq_cfg_default), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_ibv_srq, NULL, NULL); } rpma-1.3.0/tests/unit/srq/srq-get_rcq.c000066400000000000000000000030561443364775400200200ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq-get_rcq.c -- the rpma_srq_get_rcq() unit tests * * APIs covered: * - rpma_srq_get_rcq() */ #include "srq-common.h" /* * get_rcq__srq_NULL -- srq NULL is invalid */ static void get_rcq__srq_NULL(void **unused) { struct rpma_cq *rcq = NULL; /* run test */ int ret = rpma_srq_get_rcq(NULL, &rcq); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_rcq__rcq_ptr_NULL -- rcq_ptr NULL is invalid */ static void get_rcq__rcq_ptr_NULL(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_srq_get_rcq(cstate->srq, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_rcq__success -- happy day scenario */ static void get_rcq__success(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; struct rpma_cq *rcq = NULL; /* run test */ int ret = rpma_srq_get_rcq(cstate->srq, &rcq); /* verify the results */ assert_int_equal(ret, 0); assert_ptr_equal(rcq, MOCK_GET_SRQ_RCQ(cstate)); } static const struct CMUnitTest tests_get_rcq[] = { /* rpma_srq_get_rcq() unit tests */ cmocka_unit_test(get_rcq__srq_NULL), cmocka_unit_test_prestate_setup_teardown( get_rcq__rcq_ptr_NULL, setup__srq_new, teardown__srq_delete, &Srq_new_srq_cfg_default), SRQ_NEW_TEST_SETUP_TEARDOWN_WITHOUT_CFG( get_rcq__success, setup__srq_new, teardown__srq_delete), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_get_rcq, NULL, NULL); } rpma-1.3.0/tests/unit/srq/srq-new_delete.c000066400000000000000000000146301443364775400205070ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq-new_delete.c -- the rpma_srq_new/delete() unit tests * * APIs covered: * - rpma_srq_new() * - rpma_srq_delete() */ #include "srq-common.h" /* * new__peer_NULL -- NULL peer is invalid */ static void new__peer_NULL(void **unused) { /* run test */ struct rpma_srq *srq = NULL; int ret = rpma_srq_new(NULL, NULL, &srq); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(srq); } /* * new__srq_ptr_NULL -- NULL srq_ptr is invalid */ static void new__srq_ptr_NULL(void **unused) { /* run test */ int ret = rpma_srq_new(MOCK_PEER, NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__rpma_peer_create_srq_ERRNO -- rpma_peer_create_srq() fails with MOCK_ERRNO */ static void new__rpma_peer_create_srq_ERRNO(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; /* configure mocks */ will_return(rpma_peer_create_srq, &cstate->get_args); will_return(rpma_peer_create_srq, RPMA_E_PROVIDER); will_return(rpma_peer_create_srq, MOCK_ERRNO); /* run test */ int ret = rpma_srq_new(MOCK_PEER, MOCK_GET_SRQ_CFG(cstate), &cstate->srq); /* verify the result */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * new__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; /* configure mocks */ will_return(rpma_peer_create_srq, &cstate->get_args); will_return(rpma_peer_create_srq, MOCK_OK); will_return(__wrap__test_malloc, MOCK_ERRNO); expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_SRQ_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(ibv_destroy_srq, srq, MOCK_IBV_SRQ); will_return(ibv_destroy_srq, MOCK_OK); /* run test */ int ret = rpma_srq_new(MOCK_PEER, MOCK_GET_SRQ_CFG(cstate), &cstate->srq); /* verify the result */ assert_int_equal(ret, RPMA_E_NOMEM); } /* * new__malloc_ERRNO_subsequent_ERRNO2 -- malloc() fails with MOCK_ERRNO * whereas subsequent (rpma_cq_delete(), ibv_destroy_srq()) fail with * MOCK_ERRNO2 */ static void new__malloc_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; /* configure mocks */ will_return(rpma_peer_create_srq, &cstate->get_args); will_return(rpma_peer_create_srq, MOCK_OK); will_return(__wrap__test_malloc, MOCK_ERRNO); expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_SRQ_RCQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO2); expect_value(ibv_destroy_srq, srq, MOCK_IBV_SRQ); will_return(ibv_destroy_srq, MOCK_ERRNO2); /* run test */ int ret = rpma_srq_new(MOCK_PEER, MOCK_GET_SRQ_CFG(cstate), &cstate->srq); /* verify the result */ assert_int_equal(ret, RPMA_E_NOMEM); } /* * test_lifecycle - happy day scenario */ static void test_lifecycle(void **unused) { /* * the thing is done by setup__srq_new() and teardown__srq_delete() */ } /* * delete__srq_ptr_NULL - NULL srq_ptr is invalid */ static void delete__srq_ptr_NULL(void **unused) { /* run test */ int ret = rpma_srq_delete(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * delete__srq_NULL - NULL *srq_ptr should exit quickly */ static void delete__srq_NULL(void **unused) { struct rpma_srq *srq = NULL; /* run test */ int ret = rpma_srq_delete(&srq); /* verify the results */ assert_int_equal(ret, MOCK_OK); } /* * delete__rpma_cq_delete_ERRNO -- rpma_cq_delete() fails with MOCK_ERRNO */ static void delete__rpma_cq_delete_ERRNO(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; assert_int_equal(setup__srq_new((void **)&cstate), MOCK_OK); assert_non_null(cstate); /* configure mocks */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_SRQ_RCQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); expect_value(ibv_destroy_srq, srq, MOCK_IBV_SRQ); will_return(ibv_destroy_srq, MOCK_OK); /* run test */ int ret = rpma_srq_delete(&cstate->srq); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->srq); } /* * delete__rpma_cq_delete_ERRNO_subsequent_ERRNO2 -- ibv_destroy_srq() * fails with MOCK_ERRNO2 after rpma_cq_delete() failed with MOCK_ERRNO */ static void delete__rpma_cq_delete_ERRNO_subsequent_ERRNO2(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; assert_int_equal(setup__srq_new((void **)&cstate), MOCK_OK); assert_non_null(cstate); /* configure mocks */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_RPMA_SRQ_RCQ); will_return(rpma_cq_delete, RPMA_E_PROVIDER); will_return(rpma_cq_delete, MOCK_ERRNO); expect_value(ibv_destroy_srq, srq, MOCK_IBV_SRQ); will_return(ibv_destroy_srq, MOCK_ERRNO2); /* run test */ int ret = rpma_srq_delete(&cstate->srq); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->srq); } /* * delete__ibv_destroy_srq_ERRNO -- ibv_destroy_srq() fails with MOCK_ERRNO */ static void delete__ibv_destroy_srq_ERRNO(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; assert_int_equal(setup__srq_new((void **)&cstate), MOCK_OK); assert_non_null(cstate); /* configure mocks */ expect_value(rpma_cq_delete, *cq_ptr, MOCK_GET_SRQ_RCQ(cstate)); will_return(rpma_cq_delete, MOCK_OK); expect_value(ibv_destroy_srq, srq, MOCK_IBV_SRQ); will_return(ibv_destroy_srq, MOCK_ERRNO); /* run test */ int ret = rpma_srq_delete(&cstate->srq); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(cstate->srq); } static const struct CMUnitTest tests_new_delete[] = { /* rpma_srq_new() unit tests */ cmocka_unit_test(new__peer_NULL), cmocka_unit_test(new__srq_ptr_NULL), SRQ_NEW_TEST_WITHOUT_CFG(new__rpma_peer_create_srq_ERRNO), SRQ_NEW_TEST_WITH_AND_WITHOUT_CFG(new__malloc_ERRNO), SRQ_NEW_TEST_WITHOUT_CFG(new__malloc_ERRNO_subsequent_ERRNO2), /* rpma_srq_new()/delete() lifecycle */ SRQ_NEW_TEST_SETUP_TEARDOWN_WITH_AND_WITHOUT_CFG(test_lifecycle, setup__srq_new, teardown__srq_delete), /* rpma_srq_delete() unit tests */ cmocka_unit_test(delete__srq_ptr_NULL), cmocka_unit_test(delete__srq_NULL), SRQ_NEW_TEST_WITHOUT_CFG(delete__rpma_cq_delete_ERRNO), SRQ_NEW_TEST_WITHOUT_CFG( delete__rpma_cq_delete_ERRNO_subsequent_ERRNO2), SRQ_NEW_TEST_WITH_AND_WITHOUT_CFG(delete__ibv_destroy_srq_ERRNO), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_new_delete, NULL, NULL); } rpma-1.3.0/tests/unit/srq/srq-recv.c000066400000000000000000000050541443364775400173330ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq-recv.c -- the rpma_srq_recv() unit tests * * APIs covered: * - rpma_srq_recv() */ #include "srq-common.h" /* * recv__srq_NULL - NULL srq is invalid */ static void recv__srq_NULL(void **unused) { /* run test */ int ret = rpma_srq_recv(NULL, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__dst_NULL_offset_not_NULL - NULL dst and not NULL offset is invalid */ static void recv__dst_NULL_offset_not_NULL(void **unused) { /* run test */ int ret = rpma_srq_recv(MOCK_RPMA_SRQ, NULL, MOCK_LOCAL_OFFSET, 0, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__dst_NULL_len_not_NULL - NULL dst and not NULL len is invalid */ static void recv__dst_NULL_len_not_NULL(void **unused) { /* run test */ int ret = rpma_srq_recv(MOCK_RPMA_SRQ, NULL, 0, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__dst_NULL_offset_len_not_NULL - NULL dst and not NULL offset or len are invalid */ static void recv__dst_NULL_offset_len_not_NULL(void **unused) { /* run test */ int ret = rpma_srq_recv(MOCK_RPMA_SRQ, NULL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * recv__success - happy day scenario */ static void recv__success(void **cstate_ptr) { struct srq_test_state *cstate = *cstate_ptr; /* configure mocks */ expect_value(rpma_mr_srq_recv, srq, MOCK_IBV_SRQ); expect_value(rpma_mr_srq_recv, dst, MOCK_RPMA_MR_LOCAL); expect_value(rpma_mr_srq_recv, offset, MOCK_LOCAL_OFFSET); expect_value(rpma_mr_srq_recv, len, MOCK_LEN); expect_value(rpma_mr_srq_recv, op_context, MOCK_OP_CONTEXT); will_return(rpma_mr_srq_recv, MOCK_OK); /* run test */ int ret = rpma_srq_recv(cstate->srq, MOCK_RPMA_MR_LOCAL, MOCK_LOCAL_OFFSET, MOCK_LEN, MOCK_OP_CONTEXT); /* verify the results */ assert_int_equal(ret, MOCK_OK); } static const struct CMUnitTest tests_recv[] = { /* rpma_srq_recv() unit tests */ cmocka_unit_test(recv__srq_NULL), cmocka_unit_test(recv__dst_NULL_offset_not_NULL), cmocka_unit_test(recv__dst_NULL_len_not_NULL), cmocka_unit_test(recv__dst_NULL_offset_len_not_NULL), cmocka_unit_test_prestate_setup_teardown(recv__success, setup__srq_new, teardown__srq_delete, &Srq_new_srq_cfg_default), cmocka_unit_test(NULL) }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(tests_recv, NULL, NULL); } rpma-1.3.0/tests/unit/srq_cfg/000077500000000000000000000000001443364775400162405ustar00rootroot00000000000000rpma-1.3.0/tests/unit/srq_cfg/CMakeLists.txt000066400000000000000000000012531443364775400210010ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Fujitsu # include(../../cmake/ctest_helpers.cmake) function(add_test_srq_cfg name) set(src_name srq_cfg-${name}) set(name ut-${src_name}) build_test_src(UNIT NAME ${name} SRCS ${src_name}.c srq_cfg-common.c ${TEST_UNIT_COMMON_DIR}/mocks-stdlib.c ${LIBRPMA_SOURCE_DIR}/srq_cfg.c) target_compile_definitions(${name} PRIVATE TEST_MOCK_ALLOC) set_target_properties(${name} PROPERTIES LINK_FLAGS "-Wl,--wrap=_test_malloc") add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_srq_cfg(delete) add_test_srq_cfg(new) add_test_srq_cfg(rcqe) add_test_srq_cfg(rcq_size) add_test_srq_cfg(rq_size) rpma-1.3.0/tests/unit/srq_cfg/srq_cfg-common.c000066400000000000000000000016341443364775400213220ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq_cfg-common.c -- the srq_cfg unit tests common functions */ #include "srq_cfg-common.h" /* * setup__srq_cfg -- prepare a new rpma_srq_cfg */ int setup__srq_cfg(void **cstate_ptr) { static struct srq_cfg_test_state cstate = {0}; /* configure mocks */ will_return(__wrap__test_malloc, MOCK_OK); /* prepare an object */ int ret = rpma_srq_cfg_new(&cstate.cfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_non_null(cstate.cfg); *cstate_ptr = &cstate; return 0; } /* * teardown__srq_cfg -- delete the rpma_srq_cfg */ int teardown__srq_cfg(void **cstate_ptr) { struct srq_cfg_test_state *cstate = *cstate_ptr; /* prepare an object */ int ret = rpma_srq_cfg_delete(&cstate->cfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cstate->cfg); *cstate_ptr = NULL; return 0; } rpma-1.3.0/tests/unit/srq_cfg/srq_cfg-common.h000066400000000000000000000010051443364775400213170ustar00rootroot00000000000000/* SPDX-License-Identifier: BSD-3-Clause */ /* Copyright 2022, Fujitsu */ /* * srq_cfg-common.h -- the srq_cfg unit tests common definitions */ #ifndef SRQ_CFG_COMMON #define SRQ_CFG_COMMON #include "cmocka_headers.h" #include "librpma.h" #include "test-common.h" /* * All the resources used between setup__srq_cfg and teardown__srq_cfg */ struct srq_cfg_test_state { struct rpma_srq_cfg *cfg; }; int setup__srq_cfg(void **cstate_ptr); int teardown__srq_cfg(void **cstate_ptr); #endif /* SRQ_CFG_COMMON */ rpma-1.3.0/tests/unit/srq_cfg/srq_cfg-delete.c000066400000000000000000000017341443364775400212750ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq_cfg-delete.c -- the rpma_srq_cfg_delete() unit tests * * APIs covered: * - rpma_srq_cfg_delete() */ #include "srq_cfg-common.h" /* * delete__cfg_ptr_NULL -- NULL cfg_ptr is invalid */ static void delete__cfg_ptr_NULL(void **unused) { /* run test */ int ret = rpma_srq_cfg_delete(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * delete__cfg_NULL -- NULL cfg is valid - quick exit */ static void delete__cfg_NULL(void **unused) { /* run test */ struct rpma_srq_cfg *cfg = NULL; int ret = rpma_srq_cfg_delete(&cfg); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_null(cfg); } static const struct CMUnitTest test_delete[] = { /* rpma_srq_cfg_delete() unit tests */ cmocka_unit_test(delete__cfg_ptr_NULL), cmocka_unit_test(delete__cfg_NULL), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_delete, NULL, NULL); } rpma-1.3.0/tests/unit/srq_cfg/srq_cfg-new.c000066400000000000000000000037301443364775400206220ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* Copyright 2022, Intel Corporation */ /* * srq_cfg-new.c -- the rpma_srq_cfg_new() unit tests * * API covered: * - rpma_srq_cfg_new() */ #include "srq_cfg-common.h" #include "srq_cfg.h" /* * new__cfg_ptr_NULL -- NULL cfg_ptr is invalid */ static void new__cfg_ptr_NULL(void **unused) { /* configure mocks */ will_return_maybe(__wrap__test_malloc, MOCK_OK); /* run test */ int ret = rpma_srq_cfg_new(NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * new__malloc_ERRNO -- malloc() fails with MOCK_ERRNO */ static void new__malloc_ERRNO(void **unused) { /* configure mocks */ will_return(__wrap__test_malloc, MOCK_ERRNO); /* run test */ struct rpma_srq_cfg *cfg = NULL; int ret = rpma_srq_cfg_new(&cfg); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(cfg); } /* * new__success -- all is OK */ static void new__success(void **cstate_ptr) { struct srq_cfg_test_state *cstate = *cstate_ptr; /* get the default configuration for comparison */ struct rpma_srq_cfg *cfg_default = rpma_srq_cfg_default(); uint32_t a_size, b_size; int ret; /* collect values and compare to defaults */ ret = rpma_srq_cfg_get_rq_size(cstate->cfg, &a_size); assert_int_equal(ret, MOCK_OK); ret = rpma_srq_cfg_get_rq_size(cfg_default, &b_size); assert_int_equal(ret, MOCK_OK); assert_int_equal(a_size, b_size); ret = rpma_srq_cfg_get_rcq_size(cstate->cfg, &a_size); assert_int_equal(ret, MOCK_OK); ret = rpma_srq_cfg_get_rcq_size(cfg_default, &b_size); assert_int_equal(ret, MOCK_OK); assert_int_equal(a_size, b_size); } static const struct CMUnitTest test_new[] = { /* rpma_srq_cfg_new() unit tests */ cmocka_unit_test(new__cfg_ptr_NULL), cmocka_unit_test(new__malloc_ERRNO), cmocka_unit_test_setup_teardown(new__success, setup__srq_cfg, teardown__srq_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_new, NULL, NULL); } rpma-1.3.0/tests/unit/srq_cfg/srq_cfg-rcq_size.c000066400000000000000000000040761443364775400216540ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq_cfg-cq_size.c -- the rpma_srq_cfg_set/get_rcq_size() unit tests * * APIs covered: * - rpma_srq_cfg_set_rcq_size() * - rpma_srq_cfg_get_rcq_size() */ #include "srq_cfg-common.h" #include "test-common.h" /* * set__cfg_NULL -- NULL cfg is invalid */ static void set__cfg_NULL(void **unused) { /* run test */ int ret = rpma_srq_cfg_set_rcq_size(NULL, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__cfg_NULL -- NULL cfg is invalid */ static void get__cfg_NULL(void **unused) { /* run test */ uint32_t rcq_size; int ret = rpma_srq_cfg_get_rcq_size(NULL, &rcq_size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__rcq_size_NULL -- NULL rcq_size is invalid */ static void get__rcq_size_NULL(void **cstate_ptr) { struct srq_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_srq_cfg_get_rcq_size(cstate->cfg, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * rcq_size__lifecycle -- happy day scenario */ static void rcq_size__lifecycle(void **cstate_ptr) { struct srq_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_srq_cfg_set_rcq_size(cstate->cfg, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, MOCK_OK); /* run test */ uint32_t rcq_size; ret = rpma_srq_cfg_get_rcq_size(cstate->cfg, &rcq_size); /* verify the results */ assert_int_equal(ret, MOCK_OK); assert_int_equal(rcq_size, MOCK_Q_SIZE); } static const struct CMUnitTest test_rcq_size[] = { /* rpma_srq_cfg_set_rcq_size() unit tests */ cmocka_unit_test(set__cfg_NULL), /* rpma_srq_cfg_get_rcq_size() unit tests */ cmocka_unit_test(get__cfg_NULL), cmocka_unit_test_setup_teardown(get__rcq_size_NULL, setup__srq_cfg, teardown__srq_cfg), /* rpma_srq_cfg_set/get_rcq_size() lifecycle */ cmocka_unit_test_setup_teardown(rcq_size__lifecycle, setup__srq_cfg, teardown__srq_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_rcq_size, NULL, NULL); } rpma-1.3.0/tests/unit/srq_cfg/srq_cfg-rcqe.c000066400000000000000000000026001443364775400207560ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq_cfg-rcqe.c -- the rpma_srq_cfg_get_rcqe() unit tests * * API covered: * - rpma_srq_cfg_get_rcqe() */ #include #include "srq_cfg-common.h" #include "srq_cfg.h" /* * rcqe__lifecycle -- happy day scenario */ static void rcqe__lifecycle(void **cstate_ptr) { struct srq_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_srq_cfg_set_rcq_size(cstate->cfg, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, MOCK_OK); int rcqe; rpma_srq_cfg_get_rcqe(cstate->cfg, &rcqe); assert_int_equal(rcqe, MOCK_Q_SIZE); } /* * rcqe__clipped -- rcq_size > INT_MAX => rcqe = INT_MAX */ static void rcqe__clipped(void **cstate_ptr) { struct srq_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_srq_cfg_set_rcq_size(cstate->cfg, (uint32_t)INT_MAX + 1); /* verify the results */ assert_int_equal(ret, MOCK_OK); int rcqe; rpma_srq_cfg_get_rcqe(cstate->cfg, &rcqe); assert_int_equal(rcqe, INT_MAX); } static const struct CMUnitTest test_rcqe[] = { /* rpma_srq_cfg_set/get_cq_size() lifecycle */ cmocka_unit_test_setup_teardown(rcqe__lifecycle, setup__srq_cfg, teardown__srq_cfg), cmocka_unit_test_setup_teardown(rcqe__clipped, setup__srq_cfg, teardown__srq_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_rcqe, NULL, NULL); } rpma-1.3.0/tests/unit/srq_cfg/srq_cfg-rq_size.c000066400000000000000000000037711443364775400215120ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2022, Fujitsu */ /* * srq_cfg-rq_size.c -- the rpma_srq_cfg_set/get_rq_size() unit tests * * APIs covered: * - rpma_srq_cfg_set_rq_size() * - rpma_srq_cfg_get_rq_size() */ #include "srq_cfg-common.h" #include "test-common.h" /* * set__cfg_NULL -- NULL cfg is invalid */ static void set__cfg_NULL(void **unused) { /* run test */ int ret = rpma_srq_cfg_set_rq_size(NULL, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__cfg_NULL -- NULL cfg is invalid */ static void get__cfg_NULL(void **unused) { /* run test */ uint32_t rq_size; int ret = rpma_srq_cfg_get_rq_size(NULL, &rq_size); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get__rq_size_NULL -- NULL rq_size is invalid */ static void get__rq_size_NULL(void **cstate_ptr) { struct srq_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_srq_cfg_get_rq_size(cstate->cfg, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * rq_size__lifecycle -- happy day scenario */ static void rq_size__lifecycle(void **cstate_ptr) { struct srq_cfg_test_state *cstate = *cstate_ptr; /* run test */ int ret = rpma_srq_cfg_set_rq_size(cstate->cfg, MOCK_Q_SIZE); /* verify the results */ assert_int_equal(ret, MOCK_OK); uint32_t rq_size; ret = rpma_srq_cfg_get_rq_size(cstate->cfg, &rq_size); assert_int_equal(ret, MOCK_OK); assert_int_equal(rq_size, MOCK_Q_SIZE); } static const struct CMUnitTest test_rq_size[] = { /* rpma_srq_cfg_set_rq_size() unit tests */ cmocka_unit_test(set__cfg_NULL), /* rpma_srq_cfg_get_rq_size() unit tests */ cmocka_unit_test(get__cfg_NULL), cmocka_unit_test_setup_teardown(get__rq_size_NULL, setup__srq_cfg, teardown__srq_cfg), /* rpma_srq_cfg_set/get_rq_size() lifecycle */ cmocka_unit_test_setup_teardown(rq_size__lifecycle, setup__srq_cfg, teardown__srq_cfg), }; int main(int argc, char *argv[]) { return cmocka_run_group_tests(test_rq_size, NULL, NULL); } rpma-1.3.0/tests/unit/utils/000077500000000000000000000000001443364775400157545ustar00rootroot00000000000000rpma-1.3.0/tests/unit/utils/CMakeLists.txt000066400000000000000000000017121443364775400205150ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # Copyright (c) 2022-2023, Fujitsu Limited # include(../../cmake/ctest_helpers.cmake) function(add_test_utils name) set(src_name utils-${name}) set(name ut-${src_name}) if(${name} STREQUAL "ut-utils-get_ibv_context") set(src_dependencies utils-common.c) else() set(src_dependencies ${TEST_UNIT_COMMON_DIR}/mocks-rdma_cm.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-info.c) endif() build_test_src(UNIT NAME ${name} SRCS ${src_name}.c ${src_dependencies} ${TEST_UNIT_COMMON_DIR}/mocks-ibverbs.c ${TEST_UNIT_COMMON_DIR}/mocks-rpma-log.c ${LIBRPMA_SOURCE_DIR}/rpma_err.c ${LIBRPMA_SOURCE_DIR}/utils.c) add_test_generic(NAME ${name} TRACERS none) endfunction() add_test_utils(conn_event_2str) add_test_utils(get_ibv_context) add_test_utils(ibv_context_is_atomic_write_capable) add_test_utils(ibv_context_is_flush_capable) add_test_utils(ibv_context_is_odp_capable) rpma-1.3.0/tests/unit/utils/utils-common.c000066400000000000000000000056061443364775400205550ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * utils-common.c -- the utils unit tests common functions */ #include #include #include #include "cmocka_headers.h" #include "librpma.h" #include "mocks-ibverbs.h" #include "info.h" #include "test-common.h" /* * rpma_info_new -- mock of rpma_info_new */ int rpma_info_new(const char *addr, const char *port, enum rpma_info_side side, struct rpma_info **info_ptr) { /* * rpma_info_new() and rdma_create_id() may be called in any order. * If the first one fails, then the second one won't be called, * so we cannot add cmocka's expects here. * Otherwise, unconsumed expects would cause a test failure. */ assert_string_equal(addr, MOCK_IP_ADDRESS); assert_null(port); assert_true(side == RPMA_INFO_PASSIVE || side == RPMA_INFO_ACTIVE); *info_ptr = mock_type(struct rpma_info *); if (*info_ptr == NULL) { int result = mock_type(int); /* XXX validate the errno handling */ if (result == RPMA_E_PROVIDER) errno = mock_type(int); return result; } expect_value(rpma_info_delete, *info_ptr, *info_ptr); return 0; } /* * rdma_create_id -- mock of rdma_create_id */ int rdma_create_id(struct rdma_event_channel *channel, struct rdma_cm_id **id, void *context, enum rdma_port_space ps) { /* * rpma_info_new() and rdma_create_id() may be called in any order. * If the first one fails, then the second one won't be called, * so we cannot add cmocka's expects here. * Otherwise, unconsumed expects would cause a test failure. */ assert_non_null(id); assert_null(context); assert_int_equal(ps, RDMA_PS_TCP); /* allocate (struct rdma_cm_id *) */ *id = mock_type(struct rdma_cm_id *); if (*id == NULL) { errno = mock_type(int); return -1; } expect_value(rdma_destroy_id, id, *id); return 0; } /* * rpma_info_bind_addr -- mock of rpma_info_bind_addr */ int rpma_info_bind_addr(const struct rpma_info *info, struct rdma_cm_id *id) { check_expected(info); check_expected(id); /* XXX validate the errno handling */ int ret = mock_type(int); if (ret) errno = mock_type(int); return ret; } /* * rpma_info_resolve_addr -- mock of rpma_info_resolve_addr */ int rpma_info_resolve_addr(const struct rpma_info *info, struct rdma_cm_id *id, int timeout_ms) { check_expected(info); check_expected(id); assert_int_equal(timeout_ms, RPMA_DEFAULT_TIMEOUT_MS); /* XXX validate the errno handling */ int ret = mock_type(int); if (ret) errno = mock_type(int); return ret; } /* * rdma_destroy_id -- mock of rdma_destroy_id */ int rdma_destroy_id(struct rdma_cm_id *id) { check_expected(id); errno = mock_type(int); if (errno) return -1; return 0; } /* * rpma_info_delete -- mock of rpma_info_delete */ int rpma_info_delete(struct rpma_info **info_ptr) { if (info_ptr == NULL) return RPMA_E_INVAL; check_expected(*info_ptr); return 0; } rpma-1.3.0/tests/unit/utils/utils-conn_event_2str.c000066400000000000000000000050421443364775400223670ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020-2022, Intel Corporation */ /* * utils-conn_event_2str.c -- a unit test for rpma_utils_conn_event_2str() */ #include "cmocka_headers.h" #include "librpma.h" #define RPMA_CONN_UNSUPPORTED (RPMA_CONN_UNDEFINED - 1) /* * conn_event_2str__CONN_UNDEFINED - sanity test for * rpma_utils_conn_event_2str() */ static void conn_event_2str__CONN_UNDEFINED(void **unused) { assert_string_equal(rpma_utils_conn_event_2str(RPMA_CONN_UNDEFINED), "Undefined connection event"); } /* * conn_event_2str__CONN_ESTABLISHED - sanity test for * rpma_utils_conn_event_2str() */ static void conn_event_2str__CONN_ESTABLISHED(void **unused) { assert_string_equal(rpma_utils_conn_event_2str(RPMA_CONN_ESTABLISHED), "Connection established"); } /* * conn_event_2str__CONN_CLOSED - sanity test for * rpma_utils_conn_event_2str() */ static void conn_event_2str__CONN_CLOSED(void **unused) { assert_string_equal(rpma_utils_conn_event_2str(RPMA_CONN_CLOSED), "Connection closed"); } /* * conn_event_2str__CONN_LOST - sanity test for * rpma_utils_conn_event_2str() */ static void conn_event_2str__CONN_LOST(void **unused) { assert_string_equal(rpma_utils_conn_event_2str(RPMA_CONN_LOST), "Connection lost"); } /* * conn_event_2str__CONN_REJECTED - sanity test for * rpma_utils_conn_event_2str() */ static void conn_event_2str__CONN_REJECTED(void **unused) { assert_string_equal(rpma_utils_conn_event_2str(RPMA_CONN_REJECTED), "Connection rejected"); } /* * conn_event_2str__CONN_UNREACHABLE - sanity test for * rpma_utils_conn_event_2str() */ static void conn_event_2str__CONN_UNREACHABLE(void **unused) { assert_string_equal(rpma_utils_conn_event_2str(RPMA_CONN_UNREACHABLE), "Connection unreachable"); } /* * conn_event_2str__CONN_UNSUPPORTED - sanity test for * rpma_utils_conn_event_2str() */ static void conn_event_2str__CONN_UNSUPPORTED(void **unused) { assert_string_equal(rpma_utils_conn_event_2str(RPMA_CONN_UNSUPPORTED), "Unsupported connection event"); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* rpma_utils_conn_event_2str() unit tests */ cmocka_unit_test(conn_event_2str__CONN_UNDEFINED), cmocka_unit_test(conn_event_2str__CONN_ESTABLISHED), cmocka_unit_test(conn_event_2str__CONN_CLOSED), cmocka_unit_test(conn_event_2str__CONN_LOST), cmocka_unit_test(conn_event_2str__CONN_REJECTED), cmocka_unit_test(conn_event_2str__CONN_UNREACHABLE), cmocka_unit_test(conn_event_2str__CONN_UNSUPPORTED), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/utils/utils-get_ibv_context.c000066400000000000000000000230631443364775400224450ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * utils-get_ibv_context.c -- a unit test for rpma_utils_get_ibv_context() */ #include #include #include #include "cmocka_headers.h" #include "librpma.h" #include "mocks-ibverbs.h" #include "info.h" #include "test-common.h" #define TYPE_UNKNOWN (enum rpma_util_ibv_context_type)(-1) /* * sanity__type_unknown - TYPE_UNKNOWN != RPMA_UTIL_IBV_CONTEXT_LOCAL && * TYPE_UNKNOWN != RPMA_UTIL_IBV_CONTEXT_REMOTE */ static void sanity__type_unknown(void **unused) { /* run test */ assert_int_not_equal(TYPE_UNKNOWN, RPMA_UTIL_IBV_CONTEXT_LOCAL); assert_int_not_equal(TYPE_UNKNOWN, RPMA_UTIL_IBV_CONTEXT_REMOTE); } /* * get_ibvc__addr_NULL - test NULL addr parameter */ static void get_ibvc__addr_NULL(void **unused) { /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(NULL, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); assert_null(ibv_ctx); } /* * get_ibvc__ibv_ctx_NULL - test NULL ibv_ctx parameter */ static void get_ibvc__ibv_ctx_NULL(void **unused) { /* run test */ int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_REMOTE, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_ibvc__type_unknown - type == TYPE_UNKNOWN */ static void get_ibvc__type_unknown(void **unused) { /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, TYPE_UNKNOWN, &ibv_ctx); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_ibvc__addr_NULL_ibv_ctx_NULL_type_unknown - test NULL addr and * ibv_ctx parameter and type == TYPE_UNKNOWN */ static void get_ibvc__addr_NULL_ibv_ctx_NULL_type_unknown(void **unused) { /* run test */ int ret = rpma_utils_get_ibv_context(NULL, TYPE_UNKNOWN, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * get_ibvc__info_new_failed_E_PROVIDER - rpma_info_new fails with * RPMA_E_PROVIDER */ static void get_ibvc__info_new_failed_E_PROVIDER(void **unused) { /* configure mocks */ will_return(rpma_info_new, NULL /* info_ptr */); will_return(rpma_info_new, RPMA_E_PROVIDER); will_return(rpma_info_new, MOCK_ERRNO); struct rdma_cm_id id; id.verbs = MOCK_VERBS; will_return_maybe(rdma_create_id, &id); will_return_maybe(rdma_destroy_id, 0); /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ibv_ctx); } /* * get_ibvc__info_new_failed_E_NOMEM - rpma_info_new fails with RPMA_E_NOMEM */ static void get_ibvc__info_new_failed_E_NOMEM(void **unused) { /* configure mocks */ will_return(rpma_info_new, NULL /* info_ptr */); will_return(rpma_info_new, RPMA_E_NOMEM); struct rdma_cm_id id; id.verbs = MOCK_VERBS; will_return_maybe(rdma_create_id, &id); will_return_maybe(rdma_destroy_id, 0); /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); /* verify the results */ assert_int_equal(ret, RPMA_E_NOMEM); assert_null(ibv_ctx); } /* * get_ibvc__create_id_failed - rdma_create_id fails with MOCK_ERRNO */ static void get_ibvc__create_id_failed(void **unused) { /* * Configure mocks. * We assume it is not important which call of rpma_info_new() * succeeds (active or passive), since failing rdma_create_id() * should look and behaves the same. * Here we assume that if rpma_info_new() is called, it will succeed * for a local address (passive side). */ will_return_maybe(rpma_info_new, MOCK_INFO); will_return(rdma_create_id, NULL); will_return(rdma_create_id, MOCK_ERRNO); /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ibv_ctx); } /* * get_ibvc__bind_addr_failed_E_PROVIDER - rpma_info_bind_addr fails * with RPMA_E_PROVIDER */ static void get_ibvc__bind_addr_failed_E_PROVIDER(void **unused) { /* configure mocks */ will_return(rpma_info_new, MOCK_INFO); struct rdma_cm_id id; id.verbs = MOCK_VERBS; will_return(rdma_create_id, &id); /* a local address (passive side) */ expect_value(rpma_info_bind_addr, info, MOCK_INFO); expect_value(rpma_info_bind_addr, id, &id); will_return(rpma_info_bind_addr, RPMA_E_PROVIDER); will_return(rpma_info_bind_addr, MOCK_ERRNO); will_return(rdma_destroy_id, 0); /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ibv_ctx); } /* * get_ibvc__resolve_addr_failed_E_PROVIDER - rpma_info_resolve_addr fails * with RPMA_E_PROVIDER */ static void get_ibvc__resolve_addr_failed_E_PROVIDER(void **unused) { /* configure mocks */ will_return(rpma_info_new, MOCK_INFO); struct rdma_cm_id id; id.verbs = MOCK_VERBS; will_return(rdma_create_id, &id); /* a remote address (active side) */ expect_value(rpma_info_resolve_addr, info, MOCK_INFO); expect_value(rpma_info_resolve_addr, id, &id); will_return(rpma_info_resolve_addr, RPMA_E_PROVIDER); will_return(rpma_info_resolve_addr, MOCK_ERRNO); will_return(rdma_destroy_id, 0); /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); assert_null(ibv_ctx); } /* * get_ibvc__success_destroy_id_failed_passive - test if * rpma_utils_get_ibv_context() succeeds if rdma_destroy_id() fails */ static void get_ibvc__success_destroy_id_failed_passive(void **unused) { /* configure mocks */ will_return(rpma_info_new, MOCK_INFO); struct rdma_cm_id id; id.verbs = MOCK_VERBS; will_return(rdma_create_id, &id); /* a local address (passive side) */ expect_value(rpma_info_bind_addr, info, MOCK_INFO); expect_value(rpma_info_bind_addr, id, &id); will_return(rpma_info_bind_addr, 0); will_return(rdma_destroy_id, MOCK_ERRNO); /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); /* verify the results */ assert_int_equal(ret, 0); assert_ptr_equal(ibv_ctx, MOCK_VERBS); } /* * get_ibvc__success_destroy_id_failed_active - test if * rpma_utils_get_ibv_context() succeeds if rdma_destroy_id() fails */ static void get_ibvc__success_destroy_id_failed_active(void **unused) { /* configure mocks */ will_return(rpma_info_new, MOCK_INFO); struct rdma_cm_id id; id.verbs = MOCK_VERBS; will_return(rdma_create_id, &id); /* a remote address (active side) */ expect_value(rpma_info_resolve_addr, info, MOCK_INFO); expect_value(rpma_info_resolve_addr, id, &id); will_return(rpma_info_resolve_addr, 0); will_return(rdma_destroy_id, MOCK_ERRNO); /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); /* verify the results */ assert_int_equal(ret, 0); assert_ptr_equal(ibv_ctx, MOCK_VERBS); } /* * get_ibvc__success_passive - test the 'all is OK' situation */ static void get_ibvc__success_passive(void **unused) { /* configure mocks */ will_return(rpma_info_new, MOCK_INFO); struct rdma_cm_id id; id.verbs = MOCK_VERBS; will_return(rdma_create_id, &id); expect_value(rpma_info_bind_addr, info, MOCK_INFO); expect_value(rpma_info_bind_addr, id, &id); will_return(rpma_info_bind_addr, 0); will_return(rdma_destroy_id, 0); /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_LOCAL, &ibv_ctx); /* verify the results */ assert_int_equal(ret, 0); assert_ptr_equal(ibv_ctx, MOCK_VERBS); } /* * get_ibvc__success_active - test the 'all is OK' situation */ static void get_ibvc__success_active(void **unused) { /* configure mocks */ will_return(rpma_info_new, MOCK_INFO); struct rdma_cm_id id; id.verbs = MOCK_VERBS; will_return(rdma_create_id, &id); expect_value(rpma_info_resolve_addr, info, MOCK_INFO); expect_value(rpma_info_resolve_addr, id, &id); will_return(rpma_info_resolve_addr, 0); will_return(rdma_destroy_id, 0); /* run test */ struct ibv_context *ibv_ctx = NULL; int ret = rpma_utils_get_ibv_context(MOCK_IP_ADDRESS, RPMA_UTIL_IBV_CONTEXT_REMOTE, &ibv_ctx); /* verify the results */ assert_int_equal(ret, 0); assert_ptr_equal(ibv_ctx, MOCK_VERBS); } int main(int argc, char *argv[]) { const struct CMUnitTest tests[] = { /* sanity */ cmocka_unit_test(sanity__type_unknown), cmocka_unit_test(get_ibvc__addr_NULL), cmocka_unit_test(get_ibvc__ibv_ctx_NULL), cmocka_unit_test(get_ibvc__type_unknown), cmocka_unit_test(get_ibvc__addr_NULL_ibv_ctx_NULL_type_unknown), cmocka_unit_test(get_ibvc__info_new_failed_E_PROVIDER), cmocka_unit_test(get_ibvc__info_new_failed_E_NOMEM), cmocka_unit_test(get_ibvc__create_id_failed), cmocka_unit_test(get_ibvc__bind_addr_failed_E_PROVIDER), cmocka_unit_test(get_ibvc__resolve_addr_failed_E_PROVIDER), cmocka_unit_test(get_ibvc__success_destroy_id_failed_passive), cmocka_unit_test(get_ibvc__success_destroy_id_failed_active), cmocka_unit_test(get_ibvc__success_passive), cmocka_unit_test(get_ibvc__success_active), }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/utils/utils-ibv_context_is_atomic_write_capable.c000066400000000000000000000051721443364775400265170ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2022, Fujitsu Limited */ /* * utils-ibv_context_is_atomic_write_capable.c -- a unit test * for rpma_utils_ibv_context_is_atomic_write_capable() */ #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "librpma.h" #include "test-common.h" #include "utils.h" /* * ibvc_atomic_write__cap_no -- atomic write attribute is not set * in attr.device_cap_flags_ex */ static void ibvc_atomic_write__cap_no(void **unused) { #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED /* configure mocks */ struct ibv_device_attr_ex attr = { .device_cap_flags_ex = 0, /* atomic write attribute is not set */ }; will_return(ibv_query_device_ex_mock, &attr); #endif /* run test */ int is_atomic_write_capable; int ret = rpma_utils_ibv_context_is_atomic_write_capable(MOCK_VERBS, &is_atomic_write_capable); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(is_atomic_write_capable, 0); } #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED /* * ibvc_atomic_write__query_fail -- ibv_query_device_ex() failed */ static void ibvc_atomic_write__query_fail(void **unused) { /* configure mocks */ will_return(ibv_query_device_ex_mock, NULL); will_return(ibv_query_device_ex_mock, MOCK_ERRNO); /* run test */ int is_atomic_write_capable; int ret = rpma_utils_ibv_context_is_atomic_write_capable(MOCK_VERBS, &is_atomic_write_capable); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * ibvc_atomic_write__cap_yes -- atomic write attribute is set * in attr.device_cap_flags_ex */ static void ibvc_atomic_write__cap_yes(void **unused) { /* configure mocks */ struct ibv_device_attr_ex attr = { /* atomic write attribute is set */ .device_cap_flags_ex = IB_UVERBS_DEVICE_ATOMIC_WRITE, }; will_return(ibv_query_device_ex_mock, &attr); /* run test */ int is_atomic_write_capable; int ret = rpma_utils_ibv_context_is_atomic_write_capable(MOCK_VERBS, &is_atomic_write_capable); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(is_atomic_write_capable, 1); } #endif int main(int argc, char *argv[]) { MOCK_VERBS->abi_compat = __VERBS_ABI_IS_EXTENDED; #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED Verbs_context.query_device_ex = ibv_query_device_ex_mock; #endif Verbs_context.sz = sizeof(struct verbs_context); const struct CMUnitTest tests[] = { /* rpma_utils_ibv_context_is_atomic_write_capable() unit tests */ cmocka_unit_test(ibvc_atomic_write__cap_no), #ifdef NATIVE_ATOMIC_WRITE_SUPPORTED cmocka_unit_test(ibvc_atomic_write__query_fail), cmocka_unit_test(ibvc_atomic_write__cap_yes), #endif }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/utils/utils-ibv_context_is_flush_capable.c000066400000000000000000000067001443364775400251500ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright (c) 2023 Fujitsu Limited */ /* * utils-ibv_context_is_flush_capable.c -- a unit test * for rpma_utils_ibv_context_is_flush_capable() */ #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "librpma.h" #include "test-common.h" #include "utils.h" /* * ibvc_flush__cap_no_without_both_attributes -- both of flush attributes are not set * in attr.device_cap_flags_ex */ static void ibvc_flush__cap_no_without_both_attributes(void **unused) { #ifdef NATIVE_FLUSH_SUPPORTED /* configure mocks */ struct ibv_device_attr_ex attr = { /* both of flush attributes are not set */ .device_cap_flags_ex = 0, }; will_return(ibv_query_device_ex_mock, &attr); #endif /* run test */ int is_flush_capable; int ret = rpma_utils_ibv_context_is_flush_capable(MOCK_VERBS, &is_flush_capable); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(is_flush_capable, 0); } #ifdef NATIVE_FLUSH_SUPPORTED /* * ibvc_flush__query_fail -- ibv_query_device_ex() failed */ static void ibvc_flush__query_fail(void **unused) { /* configure mocks */ will_return(ibv_query_device_ex_mock, NULL); will_return(ibv_query_device_ex_mock, MOCK_ERRNO); /* run test */ int is_flush_capable; int ret = rpma_utils_ibv_context_is_flush_capable(MOCK_VERBS, &is_flush_capable); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * ibvc_flush__cap_no_with_one_attribute -- only one flush attribute is set * in attr.device_cap_flags_ex */ static void ibvc_flush__cap_no_with_one_attribute(void **unused) { /* configure mocks */ struct ibv_device_attr_ex attr; uint64_t flush_attrs[2] = {IB_UVERBS_DEVICE_FLUSH_GLOBAL, IB_UVERBS_DEVICE_FLUSH_PERSISTENT}; int num = sizeof(flush_attrs) / sizeof(flush_attrs[0]); for (int i = 0; i < num; i++) { /* only one flush attribute is set */ attr.device_cap_flags_ex = flush_attrs[i]; will_return(ibv_query_device_ex_mock, &attr); /* run test */ int is_flush_capable; int ret = rpma_utils_ibv_context_is_flush_capable(MOCK_VERBS, &is_flush_capable); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(is_flush_capable, 0); } } /* * ibvc_flush__cap_yes_with_both_attributes -- both of flush attributes are set * in attr.device_cap_flags_ex */ static void ibvc_flush__cap_yes_with_both_attributes(void **unused) { /* configure mocks */ struct ibv_device_attr_ex attr = { /* both of flush attributes are set */ .device_cap_flags_ex = IB_UVERBS_DEVICE_FLUSH_GLOBAL | IB_UVERBS_DEVICE_FLUSH_PERSISTENT, }; will_return(ibv_query_device_ex_mock, &attr); /* run test */ int is_flush_capable; int ret = rpma_utils_ibv_context_is_flush_capable(MOCK_VERBS, &is_flush_capable); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(is_flush_capable, 1); } #endif int main(int argc, char *argv[]) { MOCK_VERBS->abi_compat = __VERBS_ABI_IS_EXTENDED; #ifdef NATIVE_FLUSH_SUPPORTED Verbs_context.query_device_ex = ibv_query_device_ex_mock; #endif Verbs_context.sz = sizeof(struct verbs_context); const struct CMUnitTest tests[] = { /* rpma_utils_ibv_context_is_flush_capable() unit tests */ cmocka_unit_test(ibvc_flush__cap_no_without_both_attributes), #ifdef NATIVE_FLUSH_SUPPORTED cmocka_unit_test(ibvc_flush__query_fail), cmocka_unit_test(ibvc_flush__cap_no_with_one_attribute), cmocka_unit_test(ibvc_flush__cap_yes_with_both_attributes), #endif }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tests/unit/utils/utils-ibv_context_is_odp_capable.c000066400000000000000000000110731443364775400246100ustar00rootroot00000000000000// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* Copyright (c) 2023 Fujitsu Limite */ /* * utils-ibv_context_is_odp_capable.c -- a unit test for * rpma_utils_ibv_context_is_odp_capable() */ #include "cmocka_headers.h" #include "mocks-ibverbs.h" #include "librpma.h" #include "test-common.h" /* * ibvc_odp__ibv_ctx_NULL -- ibv_ctx NULL is invalid */ static void ibvc_odp__ibv_ctx_NULL(void **unused) { /* run test */ int is_odp_capable; int ret = rpma_utils_ibv_context_is_odp_capable(NULL, &is_odp_capable); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * ibvc_odp__cap_NULL -- is_odp_capable NULL is invalid */ static void ibvc_odp__cap_NULL(void **unused) { /* run test */ int ret = rpma_utils_ibv_context_is_odp_capable(MOCK_VERBS, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } /* * ibvc_odp__ibv_ctx_cap_NULL -- ibv_ctx and is_odp_capable NULL are invalid */ static void ibvc_odp__ibv_ctx_cap_NULL(void **unused) { /* run test */ int ret = rpma_utils_ibv_context_is_odp_capable(NULL, NULL); /* verify the results */ assert_int_equal(ret, RPMA_E_INVAL); } #ifdef ON_DEMAND_PAGING_SUPPORTED /* * ibvc_odp__query_fail -- ibv_query_device_ex() failed */ static void ibvc_odp__query_fail(void **unused) { /* configure mocks */ will_return(ibv_query_device_ex_mock, NULL); will_return(ibv_query_device_ex_mock, MOCK_ERRNO); /* run test */ int is_odp_capable; int ret = rpma_utils_ibv_context_is_odp_capable(MOCK_VERBS, &is_odp_capable); /* verify the results */ assert_int_equal(ret, RPMA_E_PROVIDER); } /* * ibvc_odp__general_caps_no -- ibv_odp_caps.general_caps IBV_ODP_SUPPORT bit * is not set */ static void ibvc_odp__general_caps_no(void **unused) { /* configure mocks */ struct ibv_device_attr_ex attr = { .odp_caps = { .general_caps = 0, /* IBV_ODP_SUPPORT not set */ .per_transport_caps = { IBV_ODP_SUPPORT_WRITE | IBV_ODP_SUPPORT_READ, 0, 0 }, } }; will_return(ibv_query_device_ex_mock, &attr); /* run test */ int is_odp_capable; int ret = rpma_utils_ibv_context_is_odp_capable(MOCK_VERBS, &is_odp_capable); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(is_odp_capable, 0); } /* * ibvc_odp__rc_caps_not_all -- ibv_odp_caps.per_transport_caps.rc_odp_caps * not all required bits are set */ static void ibvc_odp__rc_caps_not_all(void **unused) { /* configure mocks */ struct ibv_device_attr_ex attr = { .odp_caps = { .general_caps = IBV_ODP_SUPPORT, .per_transport_caps = { /* IBV_ODP_SUPPORT_READ not set */ IBV_ODP_SUPPORT_WRITE, 0, 0 }, } }; will_return(ibv_query_device_ex_mock, &attr); /* run test */ int is_odp_capable; int ret = rpma_utils_ibv_context_is_odp_capable(MOCK_VERBS, &is_odp_capable); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(is_odp_capable, 0); } /* * ibvc_odp__odp_capable -- all required bits are set */ static void ibvc_odp__odp_capable(void **unused) { /* configure mocks */ struct ibv_device_attr_ex attr = { .odp_caps = { .general_caps = IBV_ODP_SUPPORT, .per_transport_caps = { IBV_ODP_SUPPORT_WRITE | IBV_ODP_SUPPORT_READ, 0, 0 }, } }; will_return(ibv_query_device_ex_mock, &attr); /* run test */ int is_odp_capable; int ret = rpma_utils_ibv_context_is_odp_capable(MOCK_VERBS, &is_odp_capable); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(is_odp_capable, 1); } #else /* * ibvc_odp__odp_incapable -- ON_DEMAND_PAGING_SUPPORTED is not defined */ static void ibvc_odp__odp_incapable(void **unused) { /* run test */ int is_odp_capable; int ret = rpma_utils_ibv_context_is_odp_capable(MOCK_VERBS, &is_odp_capable); /* verify the results */ assert_int_equal(ret, 0); assert_int_equal(is_odp_capable, 0); } #endif int main(int argc, char *argv[]) { MOCK_VERBS->abi_compat = __VERBS_ABI_IS_EXTENDED; #ifdef ON_DEMAND_PAGING_SUPPORTED Verbs_context.query_device_ex = ibv_query_device_ex_mock; #endif Verbs_context.sz = sizeof(struct verbs_context); const struct CMUnitTest tests[] = { /* rpma_utils_ibv_context_is_odp_capable() unit tests */ cmocka_unit_test(ibvc_odp__ibv_ctx_NULL), cmocka_unit_test(ibvc_odp__cap_NULL), cmocka_unit_test(ibvc_odp__ibv_ctx_cap_NULL), #ifdef ON_DEMAND_PAGING_SUPPORTED cmocka_unit_test(ibvc_odp__query_fail), cmocka_unit_test(ibvc_odp__general_caps_no), cmocka_unit_test(ibvc_odp__rc_caps_not_all), cmocka_unit_test(ibvc_odp__odp_capable), #else cmocka_unit_test(ibvc_odp__odp_incapable), #endif }; return cmocka_run_group_tests(tests, NULL, NULL); } rpma-1.3.0/tools/000077500000000000000000000000001443364775400136335ustar00rootroot00000000000000rpma-1.3.0/tools/README.md000066400000000000000000000006111443364775400151100ustar00rootroot00000000000000# librpma Tools This directory contains tools that could be useful for librpma users. * [ddio.sh](ddio.sh) script allows monitoring and controlling Intel® Direct Data I/O configuration on Intel® Cascade Lake platforms. * [config_softroce.sh](config_softroce.sh) script is used to configuring SoftRoCE. It can be also run from the CMake build directory using `make config_softroce`. rpma-1.3.0/tools/config_softroce.sh000077500000000000000000000076261443364775400173560ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # # config_softroce.sh - configure SoftRoCE # # Usage: config_softroce.sh [|verify] # # Options: # - configure SoftRoCE for the given # or for the first active and up one if no argument # is given # verify - verify if SoftRoCE is already configured # if [ "$1" == "verify" ]; then VERIFY=1 NET_IF="" else VERIFY=0 NET_IF=$1 fi MODULE="rdma_rxe" DIR="/lib/modules/$(uname -r)" STATE_OK="state ACTIVE physical_state LINK_UP" # BASH_ENV is used by CircleCI: # https://circleci.com/docs/2.0/env-vars/#setting-an-environment-variable-in-a-shell-command [ "$BASH_ENV" == "" ] && BASH_ENV=/dev/null function get_IP4() { NET_IF=$1 ip -4 -j -p a show $NET_IF | grep -e local | cut -d'"' -f4 } function print_IP4_of() { NET_IF=$1 echo -n "The SoftRoCE-configured network interface '$NET_IF' has " IP=$(get_IP4 $NET_IF) if [ "$IP" != "" ]; then echo "been assigned the following IP address: $IP" # set RPMA_TESTING_IP for CircleCI echo "export RPMA_TESTING_IP=$IP" >> $BASH_ENV else echo "no IP address assigned" fi } function print_IP4_all() { echo "The IP addresses of the SoftRoCE-configured network interfaces:" NET_IFS=$(rdma link show | grep -e "$STATE_OK" | cut -d' ' -f8) for NET_IF in $NET_IFS; do IP=$(get_IP4 $NET_IF) if [ "$IP" != "" ]; then echo "- network interface: $NET_IF, IP address: $IP" # set RPMA_TESTING_IP for CircleCI echo "export RPMA_TESTING_IP=$IP" >> $BASH_ENV else echo "- network interface: $NET_IF, no IP address assigned" fi done } if [ $(lsmod | grep -e $MODULE | wc -l) -lt 1 ]; then N_MODULES=$(find $DIR -name "$MODULE.ko*" | wc -l) if [ $N_MODULES -lt 1 ]; then echo "Error: cannot find the '$MODULE' module in the '$DIR' directory" exit 1 fi if ! sudo modprobe $MODULE; then echo "Error: cannot load the '$MODULE' module" sudo modprobe -v $MODULE exit 1 fi fi if ! which ip > /dev/null; then echo "Error: cannot find the 'ip' command. Install the 'iproute/iproute2' package" exit 1 fi if ! which rdma > /dev/null; then echo "Error: cannot find the 'rdma' command. Install the 'iproute/iproute2' package" exit 1 fi if ! rdma link show > /dev/null ; then echo "Error: the 'rdma link show' command failed" exit 1 fi if [ "$NET_IF" == "" ]; then RDMA_LINKS=$(rdma link show | grep -e "$STATE_OK" | wc -l) if [ $RDMA_LINKS -gt 0 ]; then if [ $VERIFY -eq 0 ]; then echo "SoftRoCE has been already configured:" rdma link show | grep -e "$STATE_OK" print_IP4_all fi exit 0 elif [ $VERIFY -eq 1 ]; then echo "Error: SoftRoCE has not been configured yet!" exit 1 fi # pick up the first 'up' network interface NET_IF=$(ip link | grep -v -e "LOOPBACK" | grep -e "state UP" | head -n1 | cut -d: -f2 | cut -d' ' -f2) if [ "$NET_IF" == "" ]; then # # Look for a USB Ethernet network interfaces, # which may not have 'state UP', # but only 'UP' and 'state UNKNOWN', for example: # ... ... state UNKNOWN ... # NET_IF=$(ip link | grep -v -e "LOOPBACK" | grep -e "UP" | grep -e "state UNKNOWN" | head -n1 | cut -d: -f2 | cut -d' ' -f2) if [ "$NET_IF" == "" ]; then echo "Error: cannot find an active and up network interface" exit 1 fi fi fi echo "Configuring SoftRoCE for the '$NET_IF' network interface..." RXE_NAME="rxe_$NET_IF" sudo rdma link add $RXE_NAME type rxe netdev $NET_IF if [ $? -ne 0 ]; then echo "Error: configuring SoftRoCE failed" exit 1 fi RDMA_LINKS=$(rdma link show | grep -e "$STATE_OK" | grep -e "$NET_IF" | wc -l) if [ $RDMA_LINKS -lt 1 ]; then echo "Error: configuring SoftRoCE for the '$NET_IF' network interface failed" exit 1 fi echo "SoftRoCE for the '$NET_IF' network interface was successfully configured:" rdma link show | grep -e "$NET_IF" print_IP4_of $NET_IF rpma-1.3.0/tools/ddio.sh000077500000000000000000000154501443364775400151160ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2021, Intel Corporation # # # ddio -- query and toggle DDIO state per PCIe root port (EXPERIMENTAL) # # # usage -- print usage message and exit # # usage [] # function usage() { [ -n "$1" ] && echo Error: $1 cat >&2 <]:]]:][][.[]] For details please see setpci(8). -s DDIO-state desired DDIO state, either enable or disable Note: 'enable' is an equivalent of restoring default registers values For details please use -e. -q query DDIO state exit code 1 means DDIO is enabled (on) exit code 0 means DDIO is disabled ('off') -v be verbose -h print this help message -e print an excerpt from the documentation describing a principle of operation of this tool EOF exit 1 } # # excerpt -- print a principle of operation and exit # function excerpt() { cat <> $DDIO_LOG true # do not fail if the above condition is not true } # # _register_get -- read a register value # function _register_get() { register="$1" valhex=$(setpci -s "$device" "$register") valdec=$((16#$valhex)) _log "setpci -s $device $register" _log "=$valhex" echo $valdec } # # _register_set -- write a register value # function _register_set() { register="$1" valdec="$2" valhex=`printf "%x\n" $valdec` out=$(setpci -s $device $register=$valhex 2>&1) _log "setpci -s $device $register=$valhex" _log "=$?" _log "$out" } # # _bit_git -- get bit from a value # function _bit_get() { valdec="$1" bit="$2" valdec=$(($valdec & $bit)) echo "$valdec" } # # _bit_enable -- enable bit in a value # function _bit_enable() { valdec="$1" bit="$2" valdec=$(($valdec | $bit)) echo "$valdec" } # # _bit_disable -- disable bit in a value # function _bit_disable() { valdec="$1" bit="$2" valdec=$(($valdec & ~($bit))) echo "$valdec" } # # _require_device -- verify the provided device is valid # function _require_device() { [ -z "$device" ] && usage "device required: $device" setpci -s "$device" "$perfctrlsts_0_reg" 2>&1 > /dev/null [ $? -ne 0 ] && usage "invalid device: $device" true # do not fail if the above condition is not true } # # _require_root -- verify the script is run with root privileges # function _require_root() { [ $EUID -ne 0 ] && usage "root privileges required" true # do not fail if the above condition is not true } # # ddio_query -- read DDIO state # function ddio_query() { _require_device _require_root # read registers value pcs0=$(_register_get "$perfctrlsts_0_reg") pcs1=$(_register_get "$perfctrlsts_1_reg") # read meaningful bits from the registers use_alloacting_flow_wr=$(_bit_get "$pcs0" "$use_alloacting_flow_wr_bit") nosnoopopwren=$(_bit_get "$pcs0" "$nosnoopopwren_bit") tphdis=$(_bit_get "$pcs1" "$tphdis_bit") ddio_on="DDIO is enabled" ddio_off="DDIO is disabled" [ $use_alloacting_flow_wr -ne 0 ] && { [ $verbose -eq 1 ] && echo "$ddio_on: use_allocating_flow_wr != 0" 1>&2 return 1 } [ $nosnoopopwren -ne 0 ] && { [ $verbose -eq 1 ] && echo "$ddio_on: nosnoopopwren != 0" 1>&2 return 1 } [ $tphdis -eq 0 ] && { [ $verbose -eq 1 ] && echo "$ddio_on: TPHDIS == 0" 1>&2 return 1 } [ $verbose -eq 1 ] && echo "$ddio_off" 1>&2 return 0 } # # _ddio_set_disable -- turn off DDIO # function _ddio_set_disable() { [ $use_alloacting_flow_wr -ne 0 ] && { [ $verbose -eq 1 ] && echo "set use_allocating_flow_wr = 0" 1>&2 pcs0=$(_bit_disable "$pcs0" "$use_alloacting_flow_wr_bit") pcs0_mod=1 } [ $nosnoopopwren -ne 0 ] && { [ $verbose -eq 1 ] && echo "set nosnoopopwren = 0" 1>&2 pcs0=$(_bit_disable "$pcs0" "$nosnoopopwren_bit") pcs0_mod=1 } [ $tphdis -eq 0 ] && { [ $verbose -eq 1 ] && echo "set TPHDIS = 1" 1>&2 pcs1=$(_bit_enable "$pcs1" "$tphdis_bit") pcs1_mod=1 } true # do not fail if the above condition is not true } # # _ddio_set_enable -- turn on DDIO (restore defaults) # function _ddio_set_enable() { def="(default)" [ $use_alloacting_flow_wr -eq 0 ] && { [ $verbose -eq 1 ] && echo "set use_allocating_flow_wr = 1 $def" 1>&2 pcs0=$(_bit_enable "$pcs0" "$use_alloacting_flow_wr_bit") pcs0_mod=1 } [ $nosnoopopwren -ne 0 ] && { [ $verbose -eq 1 ] && echo "set nosnoopopwren = 0 $def" 1>&2 pcs0=$(_bit_disable "$pcs0" "$nosnoopopwren_bit") pcs0_mod=1 } [ $tphdis -ne 0 ] && { [ $verbose -eq 1 ] && echo "set TPHDIS = 0" 1>&2 pcs1=$(_bit_disable "$pcs1" "$tphdis_bit") pcs1_mod=1 } true # do not fail if the above condition is not true } # # ddio_set -- write DDIO state # function ddio_set() { _require_device _require_root # read current registers value pcs0=$(_register_get "$perfctrlsts_0_reg") pcs1=$(_register_get "$perfctrlsts_1_reg") # read meaningful bits from the registers use_alloacting_flow_wr=$(_bit_get "$pcs0" "$use_alloacting_flow_wr_bit") nosnoopopwren=$(_bit_get "$pcs0" "$nosnoopopwren_bit") tphdis=$(_bit_get "$pcs1" "$tphdis_bit") # setup the registers modification indicators pcs0_mod=0 pcs1_mod=0 _ddio_set_$ddio # write the new registers value [ $pcs0_mod -eq 1 ] && _register_set "$perfctrlsts_0_reg" "$pcs0" [ $pcs1_mod -eq 1 ] && _register_set "$perfctrlsts_1_reg" "$pcs1" true # do not fail if the above condition is not true } # # defaults # device= op=usage ddio= verbose=0 # # command-line argument processing... # args=`getopt d:s:qvhe $*` [ $? != 0 ] && usage set -- $args for arg do case "$arg" in -d) device="$2" shift 2 ;; -s) op="ddio_set" case "$2" in enable|disable) ;; *) usage "invalid DDIO-state value '$2'" ;; esac ddio="$2" shift 2 ;; -q) op="ddio_query" shift ;; -v) verbose=1 shift ;; -h) usage ;; -e) excerpt ;; esac done # # run a required operation # $op rpma-1.3.0/travis.yml-disabled000066400000000000000000000007351443364775400163000ustar00rootroot00000000000000dist: trusty sudo: required language: c services: - docker env: global: - GITHUB_REPO=pmem/rpma - DOCKER_REPO=ghcr.io/pmem/rpma - GH_CR_ADDR=ghcr.io matrix: - OS=ubuntu OS_VER=20.04 TYPE=normal CC=gcc - OS=fedora OS_VER=32 TYPE=normal CC=gcc - OS=fedora OS_VER=32 TYPE=normal CC=clang before_install: - echo $TRAVIS_COMMIT_RANGE - export HOST_WORKDIR=`pwd` - cd utils/docker - ./pull-or-rebuild-image.sh script: - ./build.sh rpma-1.3.0/utils/000077500000000000000000000000001443364775400136335ustar00rootroot00000000000000rpma-1.3.0/utils/LICENSE.CDDL000066400000000000000000000377701443364775400153630ustar00rootroot00000000000000COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 1. Definitions. 1.1. "Contributor" means each individual or entity that creates or contributes to the creation of Modifications. 1.2. "Contributor Version" means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. 1.3. "Covered Software" means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. 1.4. "Executable" means the Covered Software in any form other than Source Code. 1.5. "Initial Developer" means the individual or entity that first makes Original Software available under this License. 1.6. "Larger Work" means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. 1.7. "License" means this document. 1.8. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. 1.9. "Modifications" means the Source Code and Executable form of any of the following: A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; B. Any new file that contains any part of the Original Software or previous Modification; or C. Any new file that is contributed or otherwise made available under the terms of this License. 1.10. "Original Software" means the Source Code and Executable form of computer software code that is originally released under this License. 1.11. "Patent Claims" means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. 1.12. "Source Code" means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. 1.13. "You" (or "Your") means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, "You" includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants. 2.1. The Initial Developer Grant. Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: (a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and (b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). (c) The licenses granted in Sections 2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. (d) Notwithstanding Section 2.1(b) above, no patent license is granted: (1) for code that You delete from the Original Software, or (2) for infringements caused by: (i) the modification of the Original Software, or (ii) the combination of the Original Software with other software or devices. 2.2. Contributor Grant. Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: (a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and (b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1) Modifications made by that Contributor (or portions thereof); and (2) the combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). (c) The licenses granted in Sections 2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. (d) Notwithstanding Section 2.2(b) above, no patent license is granted: (1) for any code that Contributor has deleted from the Contributor Version; (2) for infringements caused by: (i) third party modifications of Contributor Version, or (ii) the combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3) under Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. 3. Distribution Obligations. 3.1. Availability of Source Code. Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. 3.2. Modifications. The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. 3.3. Required Notices. You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. 3.4. Application of Additional Terms. You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients' rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. 3.5. Distribution of Executable Versions. You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipient's rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. 3.6. Larger Works. You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. 4. Versions of the License. 4.1. New Versions. Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. 4.2. Effect of New Versions. You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. 4.3. Modified Versions. When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a) rename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b) otherwise make it clear that the license contains terms which differ from this License. 5. DISCLAIMER OF WARRANTY. COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. 6. TERMINATION. 6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. 6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as "Participant") alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sections 2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. 6.3. In the event of termination under Sections 6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. 7. LIMITATION OF LIABILITY. UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. 8. U.S. GOVERNMENT END USERS. The Covered Software is a "commercial item," as that term is defined in 48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer software" (as that term is defined at 48 C.F.R. ¤ 252.227-7014(a)(1)) and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. 9. MISCELLANEOUS. This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdiction's conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys' fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. 10. RESPONSIBILITY FOR CLAIMS. As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. rpma-1.3.0/utils/check-area.sh000077500000000000000000000031641443364775400161610ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2020, Intel Corporation # # Finds applicable area name for specified commit id. # if [ -z "$1" ]; then echo "Missing commit id argument." exit 1 fi files=$(git log $1 -1 --format=oneline --name-only | grep -v -e "$1") git show -q $1 | cat echo echo "Modified files:" echo "$files" # if [ $COMMON -eq 1 ] then the 'common' area should be printed COMMON=1 function categorize() { category=$1 shift cat_files=`echo "$files" | grep $*` if [ -n "${cat_files}" ]; then echo "$category" files=`echo "$files" | grep -v $*` COMMON=0 fi } echo echo "Areas computed basing on the list of modified files: (see utils/check-area.sh for full algorithm)" categorize rpma -e "^src/" categorize test -e "^tests/" categorize tools -e "^tools/" categorize examples -e "^examples/" categorize doc -e "^doc/" -e ".md\$" -e "^ChangeLog" -e "README" categorize common -e "^utils/" \ -e ".inc\$" \ -e ".yml\$" \ -e ".gitattributes" \ -e ".gitignore" \ -e "^.mailmap\$" \ -e "Makefile\$" # print out the 'common' area if nothing else was printed [ $COMMON -eq 1 ] && echo "common" echo echo "If the above list contains more than 1 entry, please consider splitting" echo "your change into more commits, unless those changes don't make sense " echo "individually (they do not build, tests do not pass, etc)." echo "For example, it's perfectly fine to use 'obj' prefix for one commit that" echo "changes libpmemobj source code, its tests and documentation." if [ -n "$files" ]; then echo echo "Uncategorized files:" echo "$files" fi rpma-1.3.0/utils/check-commit.sh000077500000000000000000000017141443364775400165400ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # Used to check whether all the commit messages in a pull request # follow the GIT/RPMA guidelines. # # usage: ./check-commit.sh commit # if [ -z "$1" ]; then echo "Usage: check-commit.sh commit-id" exit 1 fi echo "Checking $1" subject=$(git log --format="%s" -n 1 $1) if [[ $subject =~ ^Merge.* ]]; then # skip exit 0 fi if [[ $subject =~ ^Revert.* ]]; then # skip exit 0 fi # valid area names AREAS="rpma\|test\|tools\|examples\|doc\|common" prefix=$(echo $subject | sed -n "s/^\($AREAS\)\:.*/\1/p") if [ "$prefix" = "" ]; then echo "FAIL: subject line in commit message does not contain valid area name" echo `dirname $0`/check-area.sh $1 exit 1 fi commit_len=$(git log --format="%s%n%b" -n 1 $1 | wc -L) if [ $commit_len -gt 73 ]; then echo "FAIL: commit message exceeds 72 chars per line (commit_len)" echo git log -n 1 $1 | cat exit 1 fi rpma-1.3.0/utils/check-commits.sh000077500000000000000000000022261443364775400167220ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # Used to check whether all the commit messages in a pull request # follow the GIT/RPMA guidelines. # # usage: ./check-commits.sh [range] # if [ -z "$1" ]; then # on CI run this check only for pull requests if [ -n "$CI_REPO_SLUG" ]; then if [[ "$CI_REPO_SLUG" != "$GITHUB_REPO" \ || $CI_EVENT_TYPE != "pull_request" ]]; then echo "SKIP: $0 can only be executed for pull requests to $GITHUB_REPO" exit 0 fi fi # CI_COMMIT_RANGE can be invalid for force pushes - use another # method to determine the list of commits if [[ $(git rev-list $CI_COMMIT_RANGE 2>/dev/null) || -n "$CI_COMMIT_RANGE" ]]; then MERGE_BASE=$(echo $CI_COMMIT_RANGE | cut -d. -f1) [ -z $MERGE_BASE ] && \ MERGE_BASE=$(git log --pretty="%cN:%H" | grep GitHub | head -n1 | cut -d: -f2) RANGE=$MERGE_BASE..$CI_COMMIT else MERGE_BASE=$(git log --pretty="%cN:%H" | grep GitHub | head -n1 | cut -d: -f2) RANGE=$MERGE_BASE..HEAD fi else RANGE="$1" fi COMMITS=$(git log --pretty=%H $RANGE) set -e for commit in $COMMITS; do `dirname $0`/check-commit.sh $commit done rpma-1.3.0/utils/check_license/000077500000000000000000000000001443364775400164125ustar00rootroot00000000000000rpma-1.3.0/utils/check_license/check-headers.sh000077500000000000000000000126211443364775400214410ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2022, Intel Corporation # Copyright (c) 2022 Fujitsu Limited # check-headers.sh - check copyright and license in source files SELF=$0 function usage() { echo "Usage: $SELF [-h|-v|-a]" echo " -h, --help this help message" echo " -v, --verbose verbose mode" echo " -a, --all check all files (only modified files are checked by default)" } if [ "$#" -lt 2 ]; then usage >&2 exit 2 fi SOURCE_ROOT=$1 shift LICENSE=$1 shift PATTERN=`mktemp` TMP=`mktemp` TMP2=`mktemp` TEMPFILE=`mktemp` rm -f $PATTERN $TMP $TMP2 if [ "$1" == "-h" -o "$1" == "--help" ]; then usage exit 0 fi # # "git -C ${SOURCE_ROOT}" is not supported on CentOS-7, # so let's do it using the 'cd' command. # Leave 'export GIT="git"' for consistency with the PMDK's version. # cd ${SOURCE_ROOT} export GIT="git" $GIT rev-parse || exit 1 if [ -f $SOURCE_ROOT/.git/shallow ]; then SHALLOW_CLONE=1 echo echo "Warning: This is a shallow clone. Checking dates in copyright headers" echo " will be skipped in case of files that have no history." echo else SHALLOW_CLONE=0 fi VERBOSE=0 CHECK_ALL=0 while [ "$1" != "" ]; do case $1 in -v|--verbose) VERBOSE=1 ;; -a|--all) CHECK_ALL=1 ;; esac shift done if [ $CHECK_ALL -eq 0 ]; then CURRENT_COMMIT=$($GIT log --pretty=%H -1) MERGE_BASE=$($GIT merge-base HEAD origin/main 2>/dev/null) [ -z $MERGE_BASE ] && \ MERGE_BASE=$($GIT log --pretty="%cN:%H" | grep GitHub | head -n1 | cut -d: -f2) [ -z "$MERGE_BASE" -o "$CURRENT_COMMIT" == "$MERGE_BASE" ] && \ CHECK_ALL=1 fi if [ $CHECK_ALL -eq 1 ]; then echo "Checking copyright headers of all files..." GIT_COMMAND="ls-tree -r --name-only HEAD" else if [ $VERBOSE -eq 1 ]; then echo echo "Warning: will check copyright headers of modified files only," echo " in order to check all files issue the following command:" echo " $ $SELF -a" echo " (e.g.: $ $SELF $SOURCE_ROOT $LICENSE -a)" echo fi echo "Checking copyright headers of modified files only..." GIT_COMMAND="diff --name-only $MERGE_BASE $CURRENT_COMMIT" fi FILES=$($GIT $GIT_COMMAND | ${SOURCE_ROOT}/utils/check_license/file-exceptions.sh | \ grep -E -e '\.[chs]$' -e '\.[ch]pp$' -e '\.sh$' \ -e '\.py$' -e '\.link$' -e 'Makefile*' -e 'TEST*' \ -e '/common.inc$' -e '/match$' -e '/check_whitespace$' \ -e 'LICENSE$' -e 'CMakeLists.txt$' -e '\.cmake$' -e '/Dockerfile.*' | \ xargs) RV=0 for file in $FILES ; do # The src_path is a path which should be used in every command except git. # git is called with -C flag so filepaths should be relative to SOURCE_ROOT src_path="${SOURCE_ROOT}/$file" [ ! -f $src_path ] && continue # ensure that file is UTF-8 encoded ENCODING=`file -b --mime-encoding $src_path` iconv -f $ENCODING -t "UTF-8" $src_path > $TEMPFILE if ! grep -q "SPDX-License-Identifier: $LICENSE" $src_path; then echo "error: no $LICENSE SPDX tag in file: $src_path" >&2 RV=1 elif [[ $file == *.c ]]; then if ! grep -q -e "// SPDX-License-Identifier: $LICENSE" $src_path; then echo "error: wrong format of SPDX tag in the file: $src_path" >&2 RV=1 fi elif [[ $file == *.h ]]; then if ! grep -q -e "/\* SPDX-License-Identifier: $LICENSE \*/" $src_path; then echo "error: wrong format of SPDX tag in the file: $src_path" >&2 RV=1 fi elif [[ $file != LICENSE ]]; then if ! grep -q -e "# SPDX-License-Identifier: $LICENSE" $src_path; then echo "error: wrong format of SPDX tag in the file: $src_path" >&2 RV=1 fi fi if [ $SHALLOW_CLONE -eq 0 ]; then $GIT log --no-merges --format="%ai %aE" -- $file | sort > $TMP else # mark the grafted commits (commits with no parents) $GIT log --no-merges --format="%ai %aE grafted-%p-commit" -- $file | sort > $TMP fi # skip checking dates for non-Intel commits [[ ! $(tail -n1 $TMP) =~ "@intel.com" ]] && continue # skip checking dates for new files [ $(cat $TMP | wc -l) -le 1 ] && continue # grep out the grafted commits (commits with no parents) # and skip checking dates for non-Intel commits grep -v -e "grafted--commit" $TMP | grep -e "@intel.com" > $TMP2 [ $(cat $TMP2 | wc -l) -eq 0 ] && continue FIRST=`head -n1 $TMP2` LAST=` tail -n1 $TMP2` YEARS=`sed ' /Copyright [0-9-]\+.*, Intel Corporation/!d s/.*Copyright \([0-9]\+\)-\([0-9]\+\),.*/\1-\2/ s/.*Copyright \([0-9]\+\),.*/\1-\1/' $src_path` if [ -z "$YEARS" ]; then echo >&2 "No copyright years in $src_path" RV=1 continue fi HEADER_FIRST=`echo $YEARS | cut -d"-" -f1` HEADER_LAST=` echo $YEARS | cut -d"-" -f2` COMMIT_FIRST=`echo $FIRST | cut -d"-" -f1` COMMIT_LAST=` echo $LAST | cut -d"-" -f1` if [ "$COMMIT_FIRST" != "" -a "$COMMIT_LAST" != "" ]; then if [ $HEADER_LAST -lt $COMMIT_LAST ]; then if [ $HEADER_FIRST -lt $COMMIT_FIRST ]; then COMMIT_FIRST=$HEADER_FIRST fi COMMIT_LAST=`date +%G` if [ $COMMIT_FIRST -eq $COMMIT_LAST ]; then NEW=$COMMIT_LAST else NEW=$COMMIT_FIRST-$COMMIT_LAST fi echo "$file:1: error: wrong copyright date: (is: $YEARS, should be: $NEW)" >&2 RV=1 fi else echo "error: unknown commit dates in file: $file" >&2 RV=1 fi done rm -f $TMP $TMP2 $TEMPFILE ${SOURCE_ROOT}/utils/check_license/check-ms-license.pl $FILES # check if error found if [ $RV -eq 0 ]; then echo "Copyright headers are OK." else echo "Error(s) in copyright headers found!" >&2 fi exit $RV rpma-1.3.0/utils/check_license/check-ms-license.pl000077500000000000000000000042501443364775400220650ustar00rootroot00000000000000#!/usr/bin/perl -w # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation use Digest::MD5 "md5_hex"; my $BSD3 = <; close F; next unless /Copyright.*(Microsoft Corporation|FUJITSU)/; s/^ \*//mg; s/^#//mg; if (index($_, $BSD3) == -1) { $err = 1; print STDERR "Outside copyright but no/wrong license text in $f\n"; } } exit $err rpma-1.3.0/utils/check_license/file-exceptions.sh000077500000000000000000000003611443364775400220470ustar00rootroot00000000000000#!/bin/sh -e # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # file-exceptions.sh - filter out files not checked for copyright and license grep -v -E -e 'src/non-existing-path-to-be-replaced-with-real-one' rpma-1.3.0/utils/check_whitespace000077500000000000000000000061701443364775400170560ustar00rootroot00000000000000#!/usr/bin/env perl # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2015-2020, Intel Corporation # # # check_whitespace -- scrub source tree for whitespace errors # use strict; use warnings; use File::Basename; use File::Find; use Encode; use v5.10; my $Me = $0; $Me =~ s,.*/,,; $SIG{HUP} = $SIG{INT} = $SIG{TERM} = $SIG{__DIE__} = sub { die @_ if $^S; my $errstr = shift; die "$Me: ERROR: $errstr"; }; my $Errcount = 0; # # err -- emit error, keep total error count # sub err { warn @_, "\n"; $Errcount++; } # # decode_file_as_string -- slurp an entire file into memory and decode # sub decode_file_as_string { my ($full, $file) = @_; my $fh; open($fh, '<', $full) or die "$full $!\n"; local $/; $_ = <$fh>; close $fh; # check known encodings or die my $decoded; my @encodings = ("UTF-8", "UTF-16", "UTF-16LE", "UTF-16BE"); foreach my $enc (@encodings) { eval { $decoded = decode( $enc, $_, Encode::FB_CROAK ) }; if (!$@) { $decoded =~ s/\R/\n/g; return $decoded; } } die "$Me: ERROR: Unknown file encoding"; } # # check_whitespace -- run the checks on the given file # sub check_whitespace { my ($full, $file) = @_; my $line = 0; my $eol; my $nf = 0; my $fstr = decode_file_as_string($full, $file); for (split /^/, $fstr) { $line++; $eol = /[\n]/s; if (/^\.nf$/) { err("$full:$line: ERROR: nested .nf") if $nf; $nf = 1; } elsif (/^\.fi$/) { $nf = 0; } elsif ($nf == 0) { chomp; err("$full:$line: ERROR: trailing whitespace") if /\s$/; err("$full:$line: ERROR: spaces before tabs") if / \t/; } } err("$full:$line: .nf without .fi") if $nf; err("$full:$line: noeol") unless $eol; } sub check_whitespace_with_exc { my ($full) = @_; $_ = $full; $_ = basename($full); return 0 unless /^(README.*|LICENSE.*|CMakeLists.txt|.gitignore|check_whitespace|.*\.([chp13s]|cc|sh|map|cpp|hpp|inc|md|cmake))$/; return 0 if -z; check_whitespace($full, $_); return 1; } my $verbose = 0; my $force = 0; my $recursive = 0; sub check { my ($file) = @_; my $r; if ($force) { $r = check_whitespace($file, basename($file)); } else { $r = check_whitespace_with_exc($file); } if ($verbose) { if ($r == 0) { printf("skipped $file\n"); } else { printf("checked $file\n"); } } } my @files = (); foreach my $arg (@ARGV) { if ($arg eq '-v') { $verbose = 1; next; } if ($arg eq '-f') { $force = 1; next; } if ($arg eq '-r') { $recursive = 1; next; } if ($arg eq '-g') { @files = `git ls-tree -r --name-only HEAD`; chomp(@files); next; } if ($arg eq '-h') { printf "Options: -g - check all files tracked by git -r dir - recursively check all files in specified directory -v verbose - print whether file was checked or not -f force - disable blacklist\n"; exit 1; } if ($recursive == 1) { find(sub { my $full = $File::Find::name; if (!$force && ($full eq './.git')) { $File::Find::prune = 1; return; } return unless -f; push @files, $full; }, $arg); $recursive = 0; next; } push @files, $arg; } if (!@files) { printf "Empty file list!\n"; } foreach (@files) { check($_); } exit $Errcount; rpma-1.3.0/utils/cstyle000077500000000000000000000654221443364775400150750ustar00rootroot00000000000000#!/usr/bin/env perl # SPDX-License-Identifier: CDDL-1.0 # Copyright 2008 Sun Microsystems, Inc. # Copyright 2017-2020, Intel Corporation # @(#)cstyle 1.58 98/09/09 (from shannon) #ident "%Z%%M% %I% %E% SMI" # # cstyle - check for some common stylistic errors. # # cstyle is a sort of "lint" for C coding style. # It attempts to check for the style used in the # kernel, sometimes known as "Bill Joy Normal Form". # # There's a lot this can't check for, like proper indentation # of code blocks. There's also a lot more this could check for. # # A note to the non perl literate: # # perl regular expressions are pretty much like egrep # regular expressions, with the following special symbols # # \s any space character # \S any non-space character # \w any "word" character [a-zA-Z0-9_] # \W any non-word character # \d a digit [0-9] # \D a non-digit # \b word boundary (between \w and \W) # \B non-word boundary # require 5.0; use IO::File; use Getopt::Std; use strict; use warnings; my $usage = "usage: cstyle [-chpvCP] [-o constructs] file ... -c check continuation indentation inside functions -h perform heuristic checks that are sometimes wrong -p perform some of the more picky checks -v verbose -C don't check anything in header block comments -P check for use of non-POSIX types -o constructs allow a comma-separated list of optional constructs: doxygen allow doxygen-style block comments (/** /*!) src2man allow src2man-style block comments (/** n) splint allow splint-style lint comments (/*@ ... @*/) "; my %opts; if (!getopts("cho:pvCP", \%opts)) { print $usage; exit 2; } my $check_continuation = $opts{'c'}; my $heuristic = $opts{'h'}; my $picky = $opts{'p'}; my $verbose = $opts{'v'}; my $ignore_hdr_comment = $opts{'C'}; my $check_posix_types = $opts{'P'}; my $doxygen_comments = 0; my $splint_comments = 0; my $src2man_comments = 0; if (defined($opts{'o'})) { for my $x (split /,/, $opts{'o'}) { if ($x eq "doxygen") { $doxygen_comments = 1; } elsif ($x eq "splint") { $splint_comments = 1; } elsif ($x eq "src2man") { $src2man_comments = 1; } else { print "cstyle: unrecognized construct \"$x\"\n"; print $usage; exit 2; } } } my ($filename, $line, $prev); # shared globals my $fmt; my $hdr_comment_start; if ($verbose) { $fmt = "%s:%d: %s\n%s\n"; } else { $fmt = "%s:%d: %s\n"; } if ($doxygen_comments) { # doxygen comments look like "/*!" or "/**"; allow them. $hdr_comment_start = qr/^\s*\/\*[\!\*]?$/; } elsif ($src2man_comments) { # src2man comments look like "/** n", # where 'n' stands for a manual's section number; allow them. $hdr_comment_start = qr/^\s*\/\*$|^\s*\/\*\* [0-9]{1}$/; } else { $hdr_comment_start = qr/^\s*\/\*$/; } # Note, following must be in single quotes so that \s and \w work right. my $typename = '(int|char|short|long|unsigned|float|double' . '|\w+_t|struct\s+\w+|union\s+\w+|FILE|BOOL)'; # mapping of old types to POSIX compatible types my %old2posix = ( 'unchar' => 'uchar_t', 'ushort' => 'ushort_t', 'uint' => 'uint_t', 'ulong' => 'ulong_t', 'u_int' => 'uint_t', 'u_short' => 'ushort_t', 'u_long' => 'ulong_t', 'u_char' => 'uchar_t', 'quad' => 'quad_t' ); my $lint_re = qr/\/\*(?: ARGSUSED[0-9]*|NOTREACHED|LINTLIBRARY|VARARGS[0-9]*| CONSTCOND|CONSTANTCOND|CONSTANTCONDITION|EMPTY| FALLTHRU|FALLTHROUGH|LINTED.*?|PRINTFLIKE[0-9]*| PROTOLIB[0-9]*|SCANFLIKE[0-9]*|CSTYLED.*? )\*\//x; my $splint_re = qr/\/\*@.*?@\*\//x; my $warlock_re = qr/\/\*\s*(?: VARIABLES\ PROTECTED\ BY| MEMBERS\ PROTECTED\ BY| ALL\ MEMBERS\ PROTECTED\ BY| READ-ONLY\ VARIABLES:| READ-ONLY\ MEMBERS:| VARIABLES\ READABLE\ WITHOUT\ LOCK:| MEMBERS\ READABLE\ WITHOUT\ LOCK:| LOCKS\ COVERED\ BY| LOCK\ UNNEEDED\ BECAUSE| LOCK\ NEEDED:| LOCK\ HELD\ ON\ ENTRY:| READ\ LOCK\ HELD\ ON\ ENTRY:| WRITE\ LOCK\ HELD\ ON\ ENTRY:| LOCK\ ACQUIRED\ AS\ SIDE\ EFFECT:| READ\ LOCK\ ACQUIRED\ AS\ SIDE\ EFFECT:| WRITE\ LOCK\ ACQUIRED\ AS\ SIDE\ EFFECT:| LOCK\ RELEASED\ AS\ SIDE\ EFFECT:| LOCK\ UPGRADED\ AS\ SIDE\ EFFECT:| LOCK\ DOWNGRADED\ AS\ SIDE\ EFFECT:| FUNCTIONS\ CALLED\ THROUGH\ POINTER| FUNCTIONS\ CALLED\ THROUGH\ MEMBER| LOCK\ ORDER: )/x; my $err_stat = 0; # exit status if ($#ARGV >= 0) { foreach my $arg (@ARGV) { my $fh = new IO::File $arg, "r"; if (!defined($fh)) { printf "%s: can not open\n", $arg; } else { &cstyle($arg, $fh); close $fh; } } } else { &cstyle("", *STDIN); } exit $err_stat; my $no_errs = 0; # set for CSTYLED-protected lines sub err($) { my ($error) = @_; unless ($no_errs) { if ($verbose) { printf $fmt, $filename, $., $error, $line; } else { printf $fmt, $filename, $., $error; } $err_stat = 1; } } sub err_prefix($$) { my ($prevline, $error) = @_; my $out = $prevline."\n".$line; unless ($no_errs) { printf $fmt, $filename, $., $error, $out; $err_stat = 1; } } sub err_prev($) { my ($error) = @_; unless ($no_errs) { printf $fmt, $filename, $. - 1, $error, $prev; $err_stat = 1; } } sub cstyle($$) { my ($fn, $filehandle) = @_; $filename = $fn; # share it globally my $in_cpp = 0; my $next_in_cpp = 0; my $in_comment = 0; my $in_header_comment = 0; my $comment_done = 0; my $in_warlock_comment = 0; my $in_function = 0; my $in_function_header = 0; my $in_declaration = 0; my $note_level = 0; my $nextok = 0; my $nocheck = 0; my $in_string = 0; my ($okmsg, $comment_prefix); $line = ''; $prev = ''; reset_indent(); line: while (<$filehandle>) { s/\r?\n$//; # strip return and newline # save the original line, then remove all text from within # double or single quotes, we do not want to check such text. $line = $_; # # C allows strings to be continued with a backslash at the end of # the line. We translate that into a quoted string on the previous # line followed by an initial quote on the next line. # # (we assume that no-one will use backslash-continuation with character # constants) # $_ = '"' . $_ if ($in_string && !$nocheck && !$in_comment); # # normal strings and characters # s/'([^\\']|\\[^xX0]|\\0[0-9]*|\\[xX][0-9a-fA-F]*)'/''/g; s/"([^\\"]|\\.)*"/\"\"/g; # # detect string continuation # if ($nocheck || $in_comment) { $in_string = 0; } else { # # Now that all full strings are replaced with "", we check # for unfinished strings continuing onto the next line. # $in_string = (s/([^"](?:"")*)"([^\\"]|\\.)*\\$/$1""/ || s/^("")*"([^\\"]|\\.)*\\$/""/); } # # figure out if we are in a cpp directive # $in_cpp = $next_in_cpp || /^\s*#/; # continued or started $next_in_cpp = $in_cpp && /\\$/; # only if continued # strip off trailing backslashes, which appear in long macros s/\s*\\$//; # an /* END CSTYLED */ comment ends a no-check block. if ($nocheck) { if (/\/\* *END *CSTYLED *\*\//) { $nocheck = 0; } else { reset_indent(); next line; } } # a /*CSTYLED*/ comment indicates that the next line is ok. if ($nextok) { if ($okmsg) { err($okmsg); } $nextok = 0; $okmsg = 0; if (/\/\* *CSTYLED.*\*\//) { /^.*\/\* *CSTYLED *(.*) *\*\/.*$/; $okmsg = $1; $nextok = 1; } $no_errs = 1; } elsif ($no_errs) { $no_errs = 0; } # check length of line. # first, a quick check to see if there is any chance of being too long. if (($line =~ tr/\t/\t/) * 7 + length($line) > 100) { # yes, there is a chance. # replace tabs with spaces and check again. my $eline = $line; 1 while $eline =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e; if (length($eline) > 100) { # allow long line if it is user visible string # find if line start from " and ends # with " + 2 optional characters # (these characters can be i.e. '");' '" \' or '",' etc...) if($eline =~ /^ *".*"[^"]{0,2}$/) { # check if entire line is one string literal $eline =~ s/^ *"//; $eline =~ s/"[^"]{0,2}$//; if($eline =~ /[^\\]"|[^\\](\\\\)+"/) { err("line > 100 characters"); } # allow long line if it is an URL } elsif (!($eline =~ / https?:\/\//)) { err("line > 100 characters"); } } } # ignore NOTE(...) annotations (assumes NOTE is on lines by itself). if ($note_level || /\b_?NOTE\s*\(/) { # if in NOTE or this is NOTE s/[^()]//g; # eliminate all non-parens $note_level += s/\(//g - length; # update paren nest level next; } # a /* BEGIN CSTYLED */ comment starts a no-check block. if (/\/\* *BEGIN *CSTYLED *\*\//) { $nocheck = 1; } # a /*CSTYLED*/ comment indicates that the next line is ok. if (/\/\* *CSTYLED.*\*\//) { /^.*\/\* *CSTYLED *(.*) *\*\/.*$/; $okmsg = $1; $nextok = 1; } if (/\/\/ *CSTYLED/) { /^.*\/\/ *CSTYLED *(.*)$/; $okmsg = $1; $nextok = 1; } # universal checks; apply to everything if (/\t +\t/) { err("spaces between tabs"); } if (/ \t+ /) { err("tabs between spaces"); } if (/\s$/) { err("space or tab at end of line"); } if (/[^ \t(]\/\*/ && !/\w\(\/\*.*\*\/\);/) { err("comment preceded by non-blank"); } # is this the beginning or ending of a function? # (not if "struct foo\n{\n") if (/^{$/ && $prev =~ /\)\s*(const\s*)?(\/\*.*\*\/\s*)?\\?$/) { $in_function = 1; $in_declaration = 1; $in_function_header = 0; $prev = $line; next line; } if (/^}\s*(\/\*.*\*\/\s*)*$/) { if ($prev =~ /^\s*return\s*;/) { err_prev("unneeded return at end of function"); } $in_function = 0; reset_indent(); # we don't check between functions $prev = $line; next line; } if (/^\w*\($/) { $in_function_header = 1; } if ($in_warlock_comment && /\*\//) { $in_warlock_comment = 0; $prev = $line; next line; } # a blank line terminates the declarations within a function. # XXX - but still a problem in sub-blocks. if ($in_declaration && /^$/) { $in_declaration = 0; } if ($comment_done) { $in_comment = 0; $in_header_comment = 0; $comment_done = 0; } # does this looks like the start of a block comment? if (/$hdr_comment_start/) { if (!/^\t*\/\*/) { err("block comment not indented by tabs"); } $in_comment = 1; /^(\s*)\//; $comment_prefix = $1; if ($comment_prefix eq "") { $in_header_comment = 1; } $prev = $line; next line; } # are we still in the block comment? if ($in_comment) { if (/^$comment_prefix \*\/$/) { $comment_done = 1; } elsif (/\*\//) { $comment_done = 1; err("improper block comment close") unless ($ignore_hdr_comment && $in_header_comment); } elsif (!/^$comment_prefix \*[ \t]/ && !/^$comment_prefix \*$/) { err("improper block comment") unless ($ignore_hdr_comment && $in_header_comment); } } if ($in_header_comment && $ignore_hdr_comment) { $prev = $line; next line; } # check for errors that might occur in comments and in code. # allow spaces to be used to draw pictures in header and block comments. if (/[^ ] / && !/".* .*"/ && !$in_header_comment && !$in_comment) { err("spaces instead of tabs"); } if (/^ / && !/^ \*[ \t\/]/ && !/^ \*$/ && (!/^ \w/ || $in_function != 0)) { err("indent by spaces instead of tabs"); } if (/^\t+ [^ \t\*]/ || /^\t+ \S/ || /^\t+ \S/) { err("continuation line not indented by 4 spaces"); } if (/ a$/ || / an$/ || / the$/) { err("orphaned pronoun at the end of the line"); } if (/$warlock_re/ && !/\*\//) { $in_warlock_comment = 1; $prev = $line; next line; } if (/^\s*\/\*./ && !/^\s*\/\*.*\*\// && !/$hdr_comment_start/) { err("improper first line of block comment"); } if ($in_comment) { # still in comment, don't do further checks $prev = $line; next line; } if ((/[^(]\/\*\S/ || /^\/\*\S/) && !(/$lint_re/ || ($splint_comments && /$splint_re/))) { err("missing blank after open comment"); } if (/\S\*\/[^)]|\S\*\/$/ && !(/$lint_re/ || ($splint_comments && /$splint_re/))) { err("missing blank before close comment"); } if (/\/\/\S/) { # C++ comments err("missing blank after start comment"); } # check for unterminated single line comments, but allow them when # they are used to comment out the argument list of a function # declaration. if (/\S.*\/\*/ && !/\S.*\/\*.*\*\// && !/\(\/\*/) { err("unterminated single line comment"); } if (/^(#else|#endif|#include)(.*)$/) { $prev = $line; if ($picky) { my $directive = $1; my $clause = $2; # Enforce ANSI rules for #else and #endif: no noncomment # identifiers are allowed after #endif or #else. Allow # C++ comments since they seem to be a fact of life. if ((($1 eq "#endif") || ($1 eq "#else")) && ($clause ne "") && (!($clause =~ /^\s+\/\*.*\*\/$/)) && (!($clause =~ /^\s+\/\/.*$/))) { err("non-comment text following " . "$directive (or malformed $directive " . "directive)"); } } next line; } # # delete any comments and check everything else. Note that # ".*?" is a non-greedy match, so that we don't get confused by # multiple comments on the same line. # s/\/\*.*?\*\//\x01/g; s/\/\/.*$/\x01/; # C++ comments # delete any trailing whitespace; we have already checked for that. s/\s*$//; # following checks do not apply to text in comments. if (/[^ \t\+]\+[^\+=]/ || /[^\+]\+[^ \+=]/) { err("missing space around + operator"); } if (/[^ \t]\+=/ || /\+=[^ ]/) { err("missing space around += operator"); } if (/[^ \t\-]\-[^\->]/ && !/\(\w+\)\-\w/ && !/[\(\[\{]\-[\w \t]+[\)\]\},]/) { err("missing space before - operator"); } if (/[^\-]\-[^ \-=>]/ && !/\(\-\w+\)/ && !/(return|case|=|>|<|\?|:|,|^[ \t]+)[ \t]+\-[\w\(]/ && !/(\([^\)]+\)|\[|\(|\{)\-[\w\(\]]/) { err("missing space after - operator"); } if (/(return|case|=|\?|:|,|\[)[ \t]+\-[ \t]/ || /[\(\[]\-[ \t]/) { err("extra space after - operator"); } if (/[ \t(\[]\+\+ / || / \+\+[)\]]/) { err("extra space before or after ++ operator"); } if (/[ \t(\[]\-\- / || / \-\-[)\]]/) { err("extra space before or after -- operator"); } if (/[^ \t]\-=/ || /\-=[^ ]/) { err("missing space around -= operator"); } if (/[^ \t][\%\/]/ || /[\%\/][^ =]/ || /[\%\/]=[^ ]/) { err("missing space around one of operators: % %= / /="); } if (/[^ \t]\*=/ || /\*=[^ ]/) { err("missing space around *= operator"); } if (/[^ \t\(\)\*\[]\*/) { err("missing space before * operator"); } if (/\*[^ =\*\w\(,]/ && !/\(.+ \*+\)/ && !/\*\[\]/ && !/\*\-\-\w/ && !/\*\+\+\w/ && !/\*\)/) { err("missing space after * operator"); } if (/[^<>\s][!<>=]=/ || /[^<>][!<>=]=[^\s,]/ || (/[^->]>[^,=>\s]/ && !/[^->]>$/) || (/[^<]<[^,=<\s]/ && !/[^<]<$/) || /[^<\s]<[^<]/ || /[^->\s]>[^>]/) { err("missing space around relational operator"); } if (/\S>>=/ || /\S<<=/ || />>=\S/ || /<<=\S/ || /\S[-+*\/&|^%]=/ || (/[^-+*\/&|^%!<>=\s]=[^=]/ && !/[^-+*\/&|^%!<>=\s]=$/) || (/[^!<>=]=[^=\s]/ && !/[^!<>=]=$/)) { # XXX - should only check this for C++ code # XXX - there are probably other forms that should be allowed if (!/\soperator=/) { err("missing space around assignment operator"); } } if (/[,;]\S/ && !/\bfor \(;;\)/) { err("comma or semicolon followed by non-blank"); } # allow "for" statements to have empty "while" clauses if (/\s[,;]/ && !/^[\t]+;$/ && !/^\s*for \([^;]*; ;[^;]*\)/) { err("comma or semicolon preceded by blank"); } if (/^\s*(&&|\|\|)/) { err("improper boolean continuation"); } if (/\S *(&&|\|\|)/ || /(&&|\|\|) *\S/) { err("more than one space around boolean operator"); } if (/\b(for|if|while|switch|return|case)\(/) { err("missing space between keyword and paren"); } if (/(\b(for|if|while|switch|return)\b.*){2,}/ && !/^#define/) { # multiple "case" and "sizeof" allowed err("more than one keyword on line"); } if (/\b(for|if|while|switch|return|case)\s\s+\(/ && !/^#if\s+\(/) { err("extra space between keyword and paren"); } # try to detect "func (x)" but not "if (x)" or # "#define foo (x)" or "int (*func)();" if (/\w\s\(/) { my $s = $_; # strip off all keywords on the line s/\b(for|if|while|switch|return|case)\s\(/XXX(/g; s/\b(sizeof|typeof|__typeof__)\s*\(/XXX(/g; s/#elif\s\(/XXX(/g; s/^#define\s+\w+\s+\(/XXX(/; # do not match things like "void (*f)();" # or "typedef void (func_t)();" s/\w\s\(+\*/XXX(*/g; s/\b($typename|void)\s+\(+/XXX(/og; s/\btypedef\s($typename|void)\s+\(+/XXX(/og; # do not match "__attribute__ ((format (...)))" s/\b__attribute__\s*\(\(format\s*\(/__attribute__((XXX(/g; if (/\w\s\(/) { err("extra space between function name and left paren"); } $_ = $s; } # try to detect "int foo(x)", but not "extern int foo(x);" # XXX - this still trips over too many legitimate things, # like "int foo(x,\n\ty);" # if (/^(\w+(\s|\*)+)+\w+\(/ && !/\)[;,](\s|\x01)*$/ && # !/^(extern|static)\b/) { # err("return type of function not on separate line"); # } # this is a close approximation if (/^(\w+(\s|\*)+)+\w+\(.*\)(\s|\x01)*$/ && !/^(extern|static)\b/) { err("return type of function not on separate line"); } if (/^#define\t/ || /^#ifdef\t/ || /^#ifndef\t/) { err("#define/ifdef/ifndef followed by tab instead of space"); } if (/^#define\s\s+/ || /^#ifdef\s\s+/ || /^#ifndef\s\s+/) { err("#define/ifdef/ifndef followed by more than one space"); } # AON C-style doesn't require this. #if (/^\s*return\W[^;]*;/ && !/^\s*return\s*\(.*\);/) { # err("unparenthesized return expression"); #} if (/\bsizeof\b/ && !/\bsizeof\s*\(.*\)/) { err("unparenthesized sizeof expression"); } if (/\b(sizeof|typeof)\b/ && /\b(sizeof|typeof)\s+\(.*\)/) { err("spaces between sizeof/typeof expression and paren"); } if (/\(\s/) { err("whitespace after left paren"); } # allow "for" statements to have empty "continue" clauses if (/\s\)/ && !/^\s*for \([^;]*;[^;]*; \)/) { err("whitespace before right paren"); } if (/^\s*\(void\)[^ ]/) { err("missing space after (void) cast"); } if (/\S\{/ && !/\{\{/ && !/\(struct \w+\)\{/) { err("missing space before left brace"); } if ($in_function && /^\s+{/ && ($prev =~ /\)\s*$/ || $prev =~ /\bstruct\s+\w+$/)) { err("left brace starting a line"); } if (/}(else|while)/) { err("missing space after right brace"); } if (/}\s\s+(else|while)/) { err("extra space after right brace"); } if (/\b_VOID\b|\bVOID\b|\bSTATIC\b/) { err("obsolete use of VOID or STATIC"); } if (/\b($typename|void)\*/o) { err("missing space between type name and *"); } if (/^\s+#/) { err("preprocessor statement not in column 1"); } if (/^#\s/) { err("blank after preprocessor #"); } if (/!\s*(strcmp|strncmp|bcmp)\s*\(/) { err("don't use boolean ! with comparison functions"); } if (/^\S+\([\S\s]*\)\s*{/) { err("brace of function definition not at beginning of line"); } if (/static\s+\S+\s*=\s*(0|NULL)\s*;/) { err("static variable initialized with 0 or NULL"); } if (/typedef[\S\s]+\*\s*\w+\s*;/) { err("typedefed pointer type"); } if (/unsigned\s+int\s/) { err("'unsigned int' instead of just 'unsigned'"); } if (/long\s+long\s+int\s/) { err("'long long int' instead of just 'long long'"); } elsif (/long\s+int\s/) { err("'long int' instead of just 'long'"); } # # We completely ignore, for purposes of indentation: # * lines outside of functions # * preprocessor lines # if ($check_continuation && $in_function && !$in_cpp) { process_indent($_); } if ($picky) { # try to detect spaces after casts, but allow (e.g.) # "sizeof (int) + 1", "void (*funcptr)(int) = foo;", and # "int foo(int) __NORETURN;" if ((/^\($typename( \*+)?\)\s/o || /\W\($typename( \*+)?\)\s/o) && !/sizeof\($typename( \*)?\)\s/o && !/\($typename( \*+)?\)\s+=[^=]/o) { err("space after cast"); } if (/\b($typename|void)\s*\*\s/o && !/\b($typename|void)\s*\*\s+const\b/o) { err("unary * followed by space"); } } if ($check_posix_types) { # try to detect old non-POSIX types. # POSIX requires all non-standard typedefs to end in _t, # but historically these have been used. if (/\b(unchar|ushort|uint|ulong|u_int|u_short|u_long|u_char|quad)\b/) { err("non-POSIX typedef $1 used: use $old2posix{$1} instead"); } } if ($heuristic) { # cannot check this everywhere due to "struct {\n...\n} foo;" if ($in_function && !$in_declaration && /}./ && !/}\s+=/ && !/{.*}[;,]$/ && !/}(\s|\x01)*$/ && !/} (else|while)/ && !/}}/) { err("possible bad text following right brace"); } # cannot check this because sub-blocks in # the middle of code are ok if ($in_function && /^\s+{/) { err("possible left brace starting a line"); } } if (/^\s*else\W/) { if ($prev =~ /^\s*}$/) { err_prefix($prev, "else and right brace should be on same line"); } } $prev = $line; } if ($prev eq "") { err("last line in file is blank"); } } # # Continuation-line checking # # The rest of this file contains the code for the continuation checking # engine. It's a pretty simple state machine which tracks the expression # depth (unmatched '('s and '['s). # # Keep in mind that the argument to process_indent() has already been heavily # processed; all comments have been replaced by control-A, and the contents of # strings and character constants have been elided. # my $cont_in; # currently inside of a continuation my $cont_off; # skipping an initializer or definition my $cont_noerr; # suppress cascading errors my $cont_start; # the line being continued my $cont_base; # the base indentation my $cont_first; # this is the first line of a statement my $cont_multiseg; # this continuation has multiple segments my $cont_special; # this is a C statement (if, for, etc.) my $cont_macro; # this is a macro my $cont_case; # this is a multi-line case my @cont_paren; # the stack of unmatched ( and [s we've seen sub reset_indent() { $cont_in = 0; $cont_off = 0; } sub delabel($) { # # replace labels with tabs. Note that there may be multiple # labels on a line. # local $_ = $_[0]; while (/^(\t*)( *(?:(?:\w+\s*)|(?:case\b[^:]*)): *)(.*)$/) { my ($pre_tabs, $label, $rest) = ($1, $2, $3); $_ = $pre_tabs; while ($label =~ s/^([^\t]*)(\t+)//) { $_ .= "\t" x (length($2) + length($1) / 8); } $_ .= ("\t" x (length($label) / 8)).$rest; } return ($_); } sub process_indent($) { require strict; local $_ = $_[0]; # preserve the global $_ s/\x01//g; # No comments s/\s+$//; # Strip trailing whitespace return if (/^$/); # skip empty lines # regexps used below; keywords taking (), macros, and continued cases my $special = '(?:(?:\}\s*)?else\s+)?(?:if|for|while|switch)\b'; my $macro = '[A-Z_][A-Z_0-9]*\('; my $case = 'case\b[^:]*$'; # skip over enumerations, array definitions, initializers, etc. if ($cont_off <= 0 && !/^\s*$special/ && (/(?:(?:\b(?:enum|struct|union)\s*[^\{]*)|(?:\s+=\s*))\{/ || (/^\s*{/ && $prev =~ /=\s*(?:\/\*.*\*\/\s*)*$/))) { $cont_in = 0; $cont_off = tr/{/{/ - tr/}/}/; return; } if ($cont_off) { $cont_off += tr/{/{/ - tr/}/}/; return; } if (!$cont_in) { $cont_start = $line; if (/^\t* /) { err("non-continuation indented 4 spaces"); $cont_noerr = 1; # stop reporting } $_ = delabel($_); # replace labels with tabs # check if the statement is complete return if (/^\s*\}?$/); return if (/^\s*\}?\s*else\s*\{?$/); return if (/^\s*do\s*\{?$/); return if (/{$/); return if (/}[,;]?$/); # Allow macros on their own lines return if (/^\s*[A-Z_][A-Z_0-9]*$/); # cases we don't deal with, generally non-kosher if (/{/) { err("stuff after {"); return; } # Get the base line, and set up the state machine /^(\t*)/; $cont_base = $1; $cont_in = 1; @cont_paren = (); $cont_first = 1; $cont_multiseg = 0; # certain things need special processing $cont_special = /^\s*$special/? 1 : 0; $cont_macro = /^\s*$macro/? 1 : 0; $cont_case = /^\s*$case/? 1 : 0; } else { $cont_first = 0; # Strings may be pulled back to an earlier (half-)tabstop unless ($cont_noerr || /^$cont_base / || (/^\t*(?: )?(?:gettext\()?\"/ && !/^$cont_base\t/)) { err_prefix($cont_start, "continuation should be indented 4 spaces"); } } my $rest = $_; # keeps the remainder of the line # # The split matches 0 characters, so that each 'special' character # is processed separately. Parens and brackets are pushed and # popped off the @cont_paren stack. For normal processing, we wait # until a ; or { terminates the statement. "special" processing # (if/for/while/switch) is allowed to stop when the stack empties, # as is macro processing. Case statements are terminated with a : # and an empty paren stack. # foreach $_ (split /[^\(\)\[\]\{\}\;\:]*/) { next if (length($_) == 0); # rest contains the remainder of the line my $rxp = "[^\Q$_\E]*\Q$_\E"; $rest =~ s/^$rxp//; if (/\(/ || /\[/) { push @cont_paren, $_; } elsif (/\)/ || /\]/) { my $cur = $_; tr/\)\]/\(\[/; my $old = (pop @cont_paren); if (!defined($old)) { err("unexpected '$cur'"); $cont_in = 0; last; } elsif ($old ne $_) { err("'$cur' mismatched with '$old'"); $cont_in = 0; last; } # # If the stack is now empty, do special processing # for if/for/while/switch and macro statements. # next if (@cont_paren != 0); if ($cont_special) { if ($rest =~ /^\s*{?$/) { $cont_in = 0; last; } if ($rest =~ /^\s*;$/) { err("empty if/for/while body ". "not on its own line"); $cont_in = 0; last; } if (!$cont_first && $cont_multiseg == 1) { err_prefix($cont_start, "multiple statements continued ". "over multiple lines"); $cont_multiseg = 2; } elsif ($cont_multiseg == 0) { $cont_multiseg = 1; } # We've finished this section, start # processing the next. goto section_ended; } if ($cont_macro) { if ($rest =~ /^$/) { $cont_in = 0; last; } } } elsif (/\;/) { if ($cont_case) { err("unexpected ;"); } elsif (!$cont_special) { err("unexpected ;") if (@cont_paren != 0); if (!$cont_first && $cont_multiseg == 1) { err_prefix($cont_start, "multiple statements continued ". "over multiple lines"); $cont_multiseg = 2; } elsif ($cont_multiseg == 0) { $cont_multiseg = 1; } if ($rest =~ /^$/) { $cont_in = 0; last; } if ($rest =~ /^\s*special/) { err("if/for/while/switch not started ". "on its own line"); } goto section_ended; } } elsif (/\{/) { err("{ while in parens/brackets") if (@cont_paren != 0); err("stuff after {") if ($rest =~ /[^\s}]/); $cont_in = 0; last; } elsif (/\}/) { err("} while in parens/brackets") if (@cont_paren != 0); if (!$cont_special && $rest !~ /^\s*(while|else)\b/) { if ($rest =~ /^$/) { err("unexpected }"); } else { err("stuff after }"); } $cont_in = 0; last; } } elsif (/\:/ && $cont_case && @cont_paren == 0) { err("stuff after multi-line case") if ($rest !~ /$^/); $cont_in = 0; last; } next; section_ended: # End of a statement or if/while/for loop. Reset # cont_special and cont_macro based on the rest of the # line. $cont_special = ($rest =~ /^\s*$special/)? 1 : 0; $cont_macro = ($rest =~ /^\s*$macro/)? 1 : 0; $cont_case = 0; next; } $cont_noerr = 0 if (!$cont_in); } rpma-1.3.0/utils/docker/000077500000000000000000000000001443364775400151025ustar00rootroot00000000000000rpma-1.3.0/utils/docker/0001-travis-fix-travisci_build_coverity_scan.sh.patch000066400000000000000000000016251443364775400271430ustar00rootroot00000000000000From b5179dc4822eaab192361da05aa95d98f523960f Mon Sep 17 00:00:00 2001 From: Lukasz Dorau Date: Mon, 7 May 2018 12:05:40 +0200 Subject: [PATCH] travis: fix travisci_build_coverity_scan.sh --- travisci_build_coverity_scan.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/travisci_build_coverity_scan.sh b/travisci_build_coverity_scan.sh index ad9d4afcf..562b08bcc 100644 --- a/travisci_build_coverity_scan.sh +++ b/travisci_build_coverity_scan.sh @@ -92,8 +92,8 @@ response=$(curl \ --form description="Travis CI build" \ $UPLOAD_URL) status_code=$(echo "$response" | sed -n '$p') -if [ "$status_code" != "201" ]; then +if [ "$status_code" != "200" ]; then TEXT=$(echo "$response" | sed '$d') - echo -e "\033[33;1mCoverity Scan upload failed: $TEXT.\033[0m" + echo -e "\033[33;1mCoverity Scan upload failed: $response.\033[0m" exit 1 fi -- 2.13.6 rpma-1.3.0/utils/docker/build.sh000077500000000000000000000064621443364775400165500ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2017-2023, Intel Corporation # # # build.sh - runs a Docker container from a Docker image with environment # prepared for running rpma build and tests. # # # Notes: # - run this script from its location or set the variable 'HOST_WORKDIR' to # where the root of this project is on the host machine, # - set variables 'OS' and 'OS_VER' properly to a system you want to build this # repo on (for proper values take a look on the list of Dockerfiles at the # utils/docker/images directory), eg. OS=ubuntu, OS_VER=20.04. # set -e source $(dirname $0)/set-ci-vars.sh if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set (eg. OS=fedora, OS_VER=30)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then HOST_WORKDIR=$(readlink -f ../..) fi imageName=${DOCKER_REPO}:${IMG_VER}-${OS}-${OS_VER} containerName=rpma-${OS}-${OS_VER} if [[ "$command" == "" ]]; then case $TYPE in normal) command="./run-build.sh"; ;; coverity) command="./run-coverity.sh"; ;; esac fi if [ "$TESTS_COVERAGE" == "1" ]; then docker_opts="${docker_opts} `bash <(curl -s https://codecov.io/env)`"; fi if [ -n "$DNS_SERVER" ]; then DNS_SETTING=" --dns=$DNS_SERVER "; fi if [ "$AUTO_DOC_UPDATE" == "1" ]; then # Create pull requests only on $GITHUB_REPO and only on the main branch, # otherwise show the git diff only. if [[ "$CI_BRANCH" != "main" || "$CI_EVENT_TYPE" == "pull_request" || "$CI_REPO_SLUG" != "$GITHUB_REPO" ]]; then AUTO_DOC_UPDATE="show-diff-only" fi fi WORKDIR=/rpma SCRIPTSDIR=$WORKDIR/utils/docker # check if we are running on a CI (Travis or GitHub Actions) [ -n "$GITHUB_ACTIONS" -o -n "$TRAVIS" ] && CI_RUN="YES" || CI_RUN="NO" # do not allocate a pseudo-TTY if we are running on GitHub Actions [ ! $GITHUB_ACTIONS ] && TTY='-t' || TTY='' echo Building ${IMG_VER}-${OS}-${OS_VER} # Run a container with # - environment variables set (--env) # - host directory containing source mounted (-v) # - working directory set (-w) docker run --privileged=true --name=$containerName -i $TTY \ $DNS_SETTING \ ${docker_opts} \ --env http_proxy=$http_proxy \ --env https_proxy=$https_proxy \ --env AUTO_DOC_UPDATE=$AUTO_DOC_UPDATE \ --env GITHUB_ACTIONS=$GITHUB_ACTIONS \ --env GITHUB_HEAD_REF=$GITHUB_HEAD_REF \ --env GITHUB_REPO=$GITHUB_REPO \ --env GITHUB_REPOSITORY=$GITHUB_REPOSITORY \ --env GITHUB_REF=$GITHUB_REF \ --env GITHUB_RUN_ID=$GITHUB_RUN_ID \ --env GITHUB_SHA=$GITHUB_SHA \ --env WORKDIR=$WORKDIR \ --env SCRIPTSDIR=$SCRIPTSDIR \ --env TESTS_COVERAGE=$TESTS_COVERAGE \ --env CI_COMMIT=$CI_COMMIT \ --env CI_COMMIT_RANGE=$CI_COMMIT_RANGE \ --env CI_REPO_SLUG=$CI_REPO_SLUG \ --env CI_BRANCH=$CI_BRANCH \ --env CI_EVENT_TYPE=$CI_EVENT_TYPE \ --env CI_RUN=$CI_RUN \ --env CI_SANITS=$CI_SANITS \ --env TRAVIS=$TRAVIS \ --env COVERITY_SCAN_TOKEN=$COVERITY_SCAN_TOKEN \ --env COVERITY_SCAN_NOTIFICATION_EMAIL=$COVERITY_SCAN_NOTIFICATION_EMAIL \ --env DOC_UPDATE_GITHUB_TOKEN=$DOC_UPDATE_GITHUB_TOKEN \ --env TEST_BUILD=$TEST_BUILD \ --env DEFAULT_TEST_DIR=/dev/shm \ --env TEST_PACKAGES=${TEST_PACKAGES:-ON} \ --env FAULT_INJECTION=$FAULT_INJECTION \ --env CC=${CC:-gcc} \ --shm-size=4G \ -v $HOST_WORKDIR:$WORKDIR \ -v /etc/localtime:/etc/localtime \ -w $SCRIPTSDIR \ $imageName $command rpma-1.3.0/utils/docker/coverity-command.sh000077500000000000000000000003351443364775400207220ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2022, Intel Corporation # # # coverity-command.sh - make script for the Coverity build # set -ex pwd mkdir -p build cd build cmake .. make -j$1 rpma-1.3.0/utils/docker/images/000077500000000000000000000000001443364775400163475ustar00rootroot00000000000000rpma-1.3.0/utils/docker/images/0001-fix-generating-gcov-files-and-turn-off-verbose-log.patch000066400000000000000000000027641443364775400314420ustar00rootroot00000000000000From d633d3b0a5f03be280efb80a69b9d5ed4e9c4d56 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Stolarczuk?= Date: Tue, 14 Jul 2020 13:58:34 +0200 Subject: [PATCH] fix generating gcov files and turn-off verbose log --- codecov | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/codecov b/codecov index e702ecd..0a2f4d8 100755 --- a/codecov +++ b/codecov @@ -1108,9 +1108,9 @@ then if [ "$ft_gcovout" = "0" ]; then # suppress gcov output - bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" >/dev/null 2>&1 || true + bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -execdir $gcov_exe -pb $gcov_arg {} \;" >/dev/null 2>&1 || true else - bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" || true + bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -execdir $gcov_exe -pb $gcov_arg {} \;" || true fi else say "${e}==>${x} gcov disabled" @@ -1425,7 +1425,7 @@ do report_len=$(wc -c < "$file") if [ "$report_len" -ne 0 ]; then - say " ${g}+${x} $file ${e}bytes=$(echo "$report_len" | tr -d ' ')${x}" + #say " ${g}+${x} $file ${e}bytes=$(echo "$report_len" | tr -d ' ')${x}" # append to to upload _filename=$(basename "$file") if [ "${_filename##*.}" = 'gcov' ]; -- 2.25.1 rpma-1.3.0/utils/docker/images/Dockerfile.archlinux-latest000066400000000000000000000037131443364775400236330ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2023, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of ubuntu-based # environment prepared for running tests of librpma. # # Pull base image FROM archlinux:latest MAINTAINER tomasz.gromadzki@intel.com # Do full system upgrade RUN pacman -Syu --noconfirm # base deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ pkg-config \ sudo \ which" ENV EXAMPLES_DEPS "\ m4 \ protobuf-c \ time" # rdma-core deps ENV RDMA_DEPS "\ fakeroot \ file" # PMDK deps ENV PMDK_DEPS "\ ndctl \ unzip \ wget" # RPMA deps ENV RPMA_DEPS "\ cmake \ diffutils \ file \ gawk \ groff \ graphviz \ pandoc" # Install required packages RUN pacman -S --noconfirm \ $BASE_DEPS \ $EXAMPLES_DEPS \ $RDMA_DEPS \ $PMDK_DEPS \ $RPMA_DEPS \ && rm -rf /var/cache/pacman/pkg/* # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Install PMDK COPY install-pmdk.sh install-pmdk.sh RUN ./install-pmdk.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 ENV PFILE ./password RUN useradd -m $USER RUN echo $USERPASS > $PFILE RUN echo $USERPASS >> $PFILE RUN passwd $USER < $PFILE RUN rm -f $PFILE RUN sed -i 's/# %wheel/%wheel/g' /etc/sudoers RUN gpasswd wheel -a $USER # Enable preserving given variables in the privileged environment - # - it is required by 'makepkg' to download required packages and sources RUN echo 'Defaults env_keep += "ftp_proxy http_proxy https_proxy no_proxy"' >> /etc/sudoers RUN echo 'Defaults env_keep += "FTP_PROXY HTTP_PROXY HTTPS_PROXY NO_PROXY"' >> /etc/sudoers # Install rdma-core and pmdk (requires user 'user') COPY install-archlinux-aur.sh install-archlinux-aur.sh RUN ./install-archlinux-aur.sh rdma-core # Switch to user USER $USER # Set required environment variables ENV OS archlinux ENV OS_VER latest ENV PACKAGE_MANAGER none ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.centos-7000066400000000000000000000025431443364775400220030ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of fedora-based # environment prepared for running tests of librpma # # Pull base image FROM centos:7 MAINTAINER tomasz.gromadzki@intel.com RUN yum update -y RUN yum install -y epel-release # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ passwd \ pkg-config \ rpm-build \ sudo \ which" # librpma library deps ENV RPMA_DEPS "\ cmake3 \ diffutils \ file \ gawk \ groff \ graphviz \ pandoc \ rdma-core-devel" # examples deps ('protobuf-c-devel' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem \ libpmem2 \ protobuf-c-devel" # Install all required packages RUN yum install -y \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ && yum clean all # run cmake3 as cmake RUN ln -s /usr/bin/cmake3 /usr/bin/cmake RUN ln -s /usr/bin/ctest3 /usr/bin/ctest # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER RUN echo $USERPASS | passwd $USER --stdin RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS centos ENV OS_VER 7 ENV PACKAGE_MANAGER rpm ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.centos-stream000066400000000000000000000027021443364775400231250ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of fedora-based # environment prepared for running tests of librpma. # # Pull base image # There is NO official docker image of CentOS Stream, so we use tgagor/centos-stream. FROM tgagor/centos:stream MAINTAINER tomasz.gromadzki@intel.com RUN dnf update -y RUN dnf install -y epel-release RUN dnf install -y 'dnf-command(config-manager)' RUN dnf config-manager --set-enabled powertools # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ passwd \ pkg-config \ rpm-build \ sudo \ which" # librpma library deps ENV RPMA_DEPS "\ cmake \ diffutils \ file \ gawk \ groff \ graphviz \ pandoc \ rdma-core-devel" # examples deps ('protobuf-c-devel' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem \ libpmem2 \ protobuf-c-devel" # Install all required packages RUN dnf install -y \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ && dnf clean all # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER RUN echo $USERPASS | passwd $USER --stdin RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS centos ENV OS_VER stream ENV PACKAGE_MANAGER rpm ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.debian-experimental000066400000000000000000000025721443364775400242630ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2022, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of debian-based # environment prepared for running tests of librpma. # # Pull base image FROM debian:experimental MAINTAINER tomasz.gromadzki@intel.com ENV DEBIAN_FRONTEND noninteractive # Update the Apt cache and install basic tools RUN apt-get update && apt-get dist-upgrade -y # base Linux deps ENV BASE_DEPS "\ apt-utils \ build-essential \ clang \ devscripts \ git \ pkg-config \ sudo \ whois" # librpma library deps ENV RPMA_DEPS "\ cmake \ curl \ gawk \ groff \ graphviz \ libibverbs-dev \ librdmacm-dev \ pandoc" # examples deps ('libprotobuf-c-dev' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-dev \ libpmem2-dev \ libprotobuf-c-dev" # Install all required packages RUN apt-get install -y --no-install-recommends \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ && rm -rf /var/lib/apt/lists/* # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER -g sudo -p `mkpasswd $USERPASS` USER $USER # Set required environment variables ENV OS debian ENV OS_VER experimental ENV PACKAGE_MANAGER deb ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.debian-latest000066400000000000000000000027151443364775400230610ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2023, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of debian-based # environment prepared for running tests of librpma. # # Pull base image FROM debian:latest MAINTAINER tomasz.gromadzki@intel.com ENV DEBIAN_FRONTEND noninteractive # Additional parameters to build docker without building components ARG SKIP_SCRIPTS_DOWNLOAD # Update the Apt cache and install basic tools RUN apt-get update && apt-get dist-upgrade -y # base Linux deps ENV BASE_DEPS "\ apt-utils \ build-essential \ clang \ devscripts \ git \ pkg-config \ sudo \ whois" # librpma library deps ENV RPMA_DEPS "\ cmake \ curl \ gawk \ groff \ graphviz \ libibverbs-dev \ librdmacm-dev \ pandoc" # examples deps ('libprotobuf-c-dev' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-dev \ libpmem2-dev \ libprotobuf-c-dev" # Install all required packages RUN apt-get install -y --no-install-recommends \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ && rm -rf /var/lib/apt/lists/* # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER -g sudo -p `mkpasswd $USERPASS` USER $USER # Set required environment variables ENV OS debian ENV OS_VER latest ENV PACKAGE_MANAGER deb ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.debian-stable000066400000000000000000000027141443364775400230360ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2022, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of debian-based # environment prepared for running tests of librpma. # # Pull base image FROM debian:stable MAINTAINER tomasz.gromadzki@intel.com ENV DEBIAN_FRONTEND noninteractive # Additional parameters to build docker without building components ARG SKIP_SCRIPTS_DOWNLOAD # Update the Apt cache and install basic tools RUN apt-get update && apt-get dist-upgrade -y # base Linux deps ENV BASE_DEPS "\ apt-utils \ build-essential \ clang \ devscripts \ git \ pkg-config \ sudo \ whois" # librpma library deps ENV RPMA_DEPS "\ cmake \ curl \ gawk \ groff \ graphviz \ libibverbs-dev \ librdmacm-dev \ pandoc" # examples deps ('libprotobuf-c-dev' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-dev \ libpmem2-dev \ libprotobuf-c-dev" # Install all required packages RUN apt-get install -y --no-install-recommends \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ && rm -rf /var/lib/apt/lists/* # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER -g sudo -p `mkpasswd $USERPASS` USER $USER # Set required environment variables ENV OS debian ENV OS_VER stable ENV PACKAGE_MANAGER deb ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.debian-testing000066400000000000000000000025571443364775400232460ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2022, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of debian-based # environment prepared for running tests of librpma. # # Pull base image FROM debian:testing MAINTAINER tomasz.gromadzki@intel.com ENV DEBIAN_FRONTEND noninteractive # Update the Apt cache and install basic tools RUN apt-get update && apt-get dist-upgrade -y # base Linux deps ENV BASE_DEPS "\ apt-utils \ build-essential \ clang \ devscripts \ git \ pkg-config \ sudo \ whois" # librpma library deps ENV RPMA_DEPS "\ cmake \ curl \ gawk \ groff \ graphviz \ libibverbs-dev \ librdmacm-dev \ pandoc" # examples deps ('libprotobuf-c-dev' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-dev \ libpmem2-dev \ libprotobuf-c-dev" # Install all required packages RUN apt-get install -y --no-install-recommends \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ && rm -rf /var/lib/apt/lists/* # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER -g sudo -p `mkpasswd $USERPASS` USER $USER # Set required environment variables ENV OS debian ENV OS_VER testing ENV PACKAGE_MANAGER deb ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.fedora-latest000066400000000000000000000024601443364775400230740ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2023, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of fedora-based # environment prepared for running tests of librpma. # # Pull base image FROM fedora:latest MAINTAINER tomasz.gromadzki@intel.com # Update all packages RUN dnf update -y # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ patch \ pkg-config \ rpm-build \ which" # librpma library deps ENV RPMA_DEPS "\ cmake \ file \ gawk \ groff \ graphviz \ pandoc \ rdma-core-devel" # examples deps ('protobuf-c-devel' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-devel \ libpmem2-devel \ protobuf-c-devel" # doc update deps ENV DOC_UPDATE_DEPS "\ hub" # Install all required packages RUN dnf install -y \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ $DOC_UPDATE_DEPS \ && dnf clean all # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER RUN echo "$USER:$USERPASS" | chpasswd RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS fedora ENV OS_VER latest ENV PACKAGE_MANAGER rpm ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.fedora-latest-with-rdma-core-45000066400000000000000000000035571443364775400261520ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2023, Intel Corporation # # # Dockerfile.fedora-latest-with-rdma-core-45 - Dockerfile for Fedora-latest with rdma-core v45.0 # installed from sources. # # This dockerfile is a 'recipe' for Docker to build an image of fedora-based environment # prepared for running tests of librpma. # # Pull base image FROM fedora:latest MAINTAINER tomasz.gromadzki@intel.com # Update all packages RUN dnf update -y # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ patch \ pkg-config \ rpm-build \ which" # librpma library deps ENV RPMA_DEPS "\ cmake \ file \ gawk \ groff \ graphviz \ pandoc" # examples deps ('protobuf-c-devel' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-devel \ libpmem2-devel \ protobuf-c-devel" # doc update deps ENV DOC_UPDATE_DEPS "\ hub" # rdma-core built from sources deps ENV RDMA_CORE_FROM_SOURCES_DEPS "\ libnl3-devel \ wget" # Install all required packages RUN dnf install -y \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ $DOC_UPDATE_DEPS \ $RDMA_CORE_FROM_SOURCES_DEPS \ && dnf clean all # Install rdma-core COPY install-rdma-core.sh install-rdma-core.sh RUN ./install-rdma-core.sh # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER RUN echo "$USER:$USERPASS" | chpasswd RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS fedora ENV OS_VER latest ENV PACKAGE_MANAGER rpm ENV NOTTY 1 # Paths to the rdma-core built from sources ENV PKG_CONFIG_PATH /rdma-core/build/lib/pkgconfig ENV LIBRARY_PATH /rdma-core/build/lib ENV LD_LIBRARY_PATH /rdma-core/build/lib ENV CPATH /rdma-core/build/include rpma-1.3.0/utils/docker/images/Dockerfile.fedora-rawhide000066400000000000000000000023561443364775400232270ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2022, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of fedora-based # environment prepared for running tests of librpma. # # Pull base image FROM fedora:rawhide MAINTAINER tomasz.gromadzki@intel.com # Update all packages RUN dnf update -y # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ patch \ pkg-config \ rpm-build \ which" # librpma library deps ENV RPMA_DEPS "\ cmake \ file \ gawk \ groff \ graphviz \ pandoc \ rdma-core-devel" # examples deps ('protobuf-c-devel' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-devel \ libpmem2-devel \ protobuf-c-devel" # Install all required packages RUN dnf install -y \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ && dnf clean all # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER RUN echo "$USER:$USERPASS" | chpasswd RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS fedora ENV OS_VER rawhide ENV PACKAGE_MANAGER rpm ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.opensuse-leap-latest000066400000000000000000000027141443364775400244160ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of environment # prepared for running tests of librpma. # # Pull base image FROM opensuse/leap:latest MAINTAINER tomasz.gromadzki@intel.com # Update the OS RUN zypper dup -y # Update all packages RUN zypper update -y # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ pkg-config \ sudo \ which" # librpma library deps ENV RPMA_DEPS "\ cmake \ file \ gawk \ groff \ graphviz \ pandoc \ rpm-build \ rdma-core-devel" # examples deps ('libprotobuf-c-devel' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-devel \ libpmem2-devel \ libprotobuf-c-devel" # Install all required packages RUN zypper install -y \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Clean the package cache RUN zypper clean all # Add user ENV USER user ENV USERPASS p1a2s3s4 ENV PFILE ./password RUN useradd -m $USER RUN echo $USERPASS > $PFILE RUN echo $USERPASS >> $PFILE RUN passwd $USER < $PFILE RUN rm -f $PFILE RUN sed -i 's/# %wheel/%wheel/g' /etc/sudoers RUN groupadd wheel RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS opensuse/leap ENV OS_VER latest ENV PACKAGE_MANAGER rpm ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.opensuse-tumbleweed-latest000066400000000000000000000027421443364775400256330ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of environment # prepared for running tests of librpma. # # Pull base image FROM opensuse/tumbleweed:latest MAINTAINER tomasz.gromadzki@intel.com # Update the OS RUN zypper dup -y # Update all packages RUN zypper update -y # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ pkgconf-pkg-config \ shadow \ sudo \ which" # librpma library deps ENV RPMA_DEPS "\ cmake \ file \ groff \ graphviz \ pandoc \ rpm-build \ rdma-core-devel" # examples deps ('libprotobuf-c-devel' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-devel \ libpmem2-devel \ libprotobuf-c-devel" # Install all required packages RUN zypper install -y \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Clean the package cache RUN zypper clean all # Add user ENV USER user ENV USERPASS p1a2s3s4 ENV PFILE ./password RUN useradd -m $USER RUN echo $USERPASS > $PFILE RUN echo $USERPASS >> $PFILE RUN passwd $USER < $PFILE RUN rm -f $PFILE RUN sed -i 's/# %wheel/%wheel/g' /etc/sudoers RUN groupadd wheel RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS opensuse/tumbleweed ENV OS_VER latest ENV PACKAGE_MANAGER rpm ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.rockylinux-8000066400000000000000000000025501443364775400227160ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2023, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of RedHat-based # environment prepared for running tests of librpma. # # Pull base image FROM rockylinux/rockylinux:8 MAINTAINER tomasz.gromadzki@intel.com RUN dnf update -y RUN dnf install -y epel-release RUN dnf install -y 'dnf-command(config-manager)' RUN dnf config-manager --set-enabled powertools # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ passwd \ pkg-config \ rpm-build \ sudo \ which" # librpma library deps ENV RPMA_DEPS "\ cmake \ diffutils \ file \ gawk \ groff \ graphviz \ pandoc \ rdma-core-devel" # PMDK deps ENV PMDK_DEPS "\ m4 \ ndctl \ wget" # Install all required packages RUN dnf install -y \ $BASE_DEPS \ $RPMA_DEPS \ $PMDK_DEPS \ && dnf clean all # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Install PMDK COPY install-pmdk.sh install-pmdk.sh RUN ./install-pmdk.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER RUN echo $USERPASS | passwd $USER --stdin RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS rockylinux/rockylinux ENV OS_VER 8 ENV PACKAGE_MANAGER rpm ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.rockylinux-9000066400000000000000000000024071443364775400227200ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2023, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of RedHat-based # environment prepared for running tests of librpma. # # Pull base image FROM rockylinux/rockylinux:9 MAINTAINER tomasz.gromadzki@intel.com RUN dnf update -y RUN dnf install -y epel-release # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ passwd \ pkg-config \ rpm-build \ sudo \ which" # librpma library deps ENV RPMA_DEPS "\ cmake \ diffutils \ file \ gawk \ groff \ graphviz \ pandoc \ rdma-core-devel" # PMDK deps ENV PMDK_DEPS "\ m4 \ ndctl \ wget" # Install all required packages RUN dnf install -y \ $BASE_DEPS \ $RPMA_DEPS \ $PMDK_DEPS \ && dnf clean all # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Install PMDK COPY install-pmdk.sh install-pmdk.sh RUN ./install-pmdk.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER RUN echo $USERPASS | passwd $USER --stdin RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS rockylinux/rockylinux ENV OS_VER 9 ENV PACKAGE_MANAGER rpm ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.ubuntu-latest000066400000000000000000000034361443364775400231620ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2023, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of ubuntu-based # environment prepared for running tests of librpma. # # Pull base image FROM ubuntu:latest MAINTAINER tomasz.gromadzki@intel.com ENV DEBIAN_FRONTEND noninteractive # Additional parameters to build docker without building components ARG SKIP_SCRIPTS_DOWNLOAD # Update the Apt cache and install basic tools RUN apt-get update && apt-get dist-upgrade -y # base Linux deps ENV BASE_DEPS "\ apt-utils \ build-essential \ clang \ devscripts \ git \ pkg-config \ sudo \ whois" # librpma library deps ENV RPMA_DEPS "\ cmake \ curl \ gawk \ groff \ graphviz \ libibverbs-dev \ librdmacm-dev \ pandoc" # examples deps ('libprotobuf-c-dev' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-dev \ libpmem2-dev \ libprotobuf-c-dev" # packages required by the Coverity build ENV COVERITY_DEPS "\ ruby \ wget" # Install all required packages RUN apt-get install -y --no-install-recommends \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ $COVERITY_DEPS \ && rm -rf /var/lib/apt/lists/* # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Download scripts required in run-*.sh COPY download-scripts.sh download-scripts.sh COPY 0001-fix-generating-gcov-files-and-turn-off-verbose-log.patch 0001-fix-generating-gcov-files-and-turn-off-verbose-log.patch RUN ./download-scripts.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER -g sudo -p `mkpasswd $USERPASS` USER $USER # Set required environment variables ENV OS ubuntu ENV OS_VER latest ENV PACKAGE_MANAGER deb ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.ubuntu-latest-with-rdma-core-45000066400000000000000000000045311443364775400262250ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2023, Intel Corporation # # # Dockerfile.ubuntu-latest-with-rdma-core-45 - Dockerfile for Ubuntu-latest with rdma-core v45.0 # installed from sources. # # This dockerfile is a 'recipe' for Docker to build an image of ubuntu-based environment # prepared for running tests of librpma. # # Pull base image FROM ubuntu:latest MAINTAINER tomasz.gromadzki@intel.com ENV DEBIAN_FRONTEND noninteractive # Additional parameters to build docker without building components ARG SKIP_SCRIPTS_DOWNLOAD # Update the Apt cache and install basic tools RUN apt-get update && apt-get dist-upgrade -y # base Linux deps ENV BASE_DEPS "\ apt-utils \ build-essential \ clang \ devscripts \ git \ pkg-config \ sudo \ whois" # librpma library deps ENV RPMA_DEPS "\ cmake \ curl \ gawk \ groff \ graphviz \ pandoc" # examples deps ('libprotobuf-c-dev' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-dev \ libpmem2-dev \ libprotobuf-c-dev" # packages required by the Coverity build ENV COVERITY_DEPS "\ ruby \ wget" # rdma-core built from sources deps ENV RDMA_CORE_FROM_SOURCES_DEPS "\ libnl-3-dev \ libnl-route-3-dev" # Install all required packages RUN apt-get install -y --no-install-recommends \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ $COVERITY_DEPS \ $RDMA_CORE_FROM_SOURCES_DEPS \ && rm -rf /var/lib/apt/lists/* # Install rdma-core COPY install-rdma-core.sh install-rdma-core.sh RUN ./install-rdma-core.sh # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Download scripts required in run-*.sh COPY download-scripts.sh download-scripts.sh COPY 0001-fix-generating-gcov-files-and-turn-off-verbose-log.patch 0001-fix-generating-gcov-files-and-turn-off-verbose-log.patch RUN ./download-scripts.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER -g sudo -p `mkpasswd $USERPASS` USER $USER # Set required environment variables ENV OS ubuntu ENV OS_VER latest ENV PACKAGE_MANAGER deb ENV NOTTY 1 # Paths to the rdma-core built from sources ENV PKG_CONFIG_PATH /rdma-core/build/lib/pkgconfig ENV LIBRARY_PATH /rdma-core/build/lib ENV LD_LIBRARY_PATH /rdma-core/build/lib ENV CPATH /rdma-core/build/include rpma-1.3.0/utils/docker/images/Dockerfile.ubuntu-rolling000066400000000000000000000025571443364775400233370ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2022, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of ubuntu-based # environment prepared for running tests of librpma. # # Pull base image FROM ubuntu:rolling MAINTAINER tomasz.gromadzki@intel.com ENV DEBIAN_FRONTEND noninteractive # Update the Apt cache and install basic tools RUN apt-get update && apt-get dist-upgrade -y # base Linux deps ENV BASE_DEPS "\ apt-utils \ build-essential \ clang \ devscripts \ git \ pkg-config \ sudo \ whois" # librpma library deps ENV RPMA_DEPS "\ cmake \ curl \ gawk \ groff \ graphviz \ libibverbs-dev \ librdmacm-dev \ pandoc" # examples deps ('libprotobuf-c-dev' is required only for examples 9 and 9s) ENV EXAMPLES_DEPS "\ libpmem-dev \ libpmem2-dev \ libprotobuf-c-dev" # Install all required packages RUN apt-get install -y --no-install-recommends \ $BASE_DEPS \ $RPMA_DEPS \ $EXAMPLES_DEPS \ && rm -rf /var/lib/apt/lists/* # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER -g sudo -p `mkpasswd $USERPASS` USER $USER # Set required environment variables ENV OS ubuntu ENV OS_VER rolling ENV PACKAGE_MANAGER deb ENV NOTTY 1 rpma-1.3.0/utils/docker/images/Dockerfile.vzlinux-latest000066400000000000000000000030371443364775400233540ustar00rootroot00000000000000# # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2023, Intel Corporation # # # Dockerfile - a 'recipe' for Docker to build an image of RedHat-based # environment prepared for running tests of librpma. # # Pull base image FROM virtuozzo/vzlinux8:latest MAINTAINER tomasz.gromadzki@intel.com # Temporary workaround for the error: # Curl error (60): Peer certificate cannot be authenticated with given CA certificates RUN echo "sslverify=false" >> /etc/yum.conf RUN dnf update -y RUN dnf install -y epel-release RUN dnf install -y 'dnf-command(config-manager)' RUN dnf config-manager --set-enabled powertools # base Linux deps ENV BASE_DEPS "\ clang \ gcc \ git \ make \ passwd \ pkg-config \ rpm-build \ sudo \ which" # librpma library deps ENV RPMA_DEPS "\ cmake \ diffutils \ file \ gawk \ groff \ graphviz \ pandoc \ rdma-core-devel" # PMDK deps ENV PMDK_DEPS "\ m4 \ ndctl \ unzip \ wget" # Install all required packages RUN dnf install -y \ $BASE_DEPS \ $RPMA_DEPS \ $PMDK_DEPS \ && dnf clean all # Install cmocka COPY install-cmocka.sh install-cmocka.sh RUN ./install-cmocka.sh # Install txt2man COPY install-txt2man.sh install-txt2man.sh RUN ./install-txt2man.sh # Install PMDK COPY install-pmdk.sh install-pmdk.sh RUN ./install-pmdk.sh # Add user ENV USER user ENV USERPASS p1a2s3s4 RUN useradd -m $USER RUN echo $USERPASS | passwd $USER --stdin RUN gpasswd wheel -a $USER USER $USER # Set required environment variables ENV OS virtuozzo/vzlinux8 ENV OS_VER latest ENV PACKAGE_MANAGER rpm ENV NOTTY 1 rpma-1.3.0/utils/docker/images/README.md000066400000000000000000000016521443364775400176320ustar00rootroot00000000000000# Content Dockerfiles and scripts placed in this directory are intended to be used as development process vehicles and part of continuous integration process. Images built out of those recipes may by used with docker or podman as development environment. Only those used on travis are fully tested on a daily basis. In case of any problem, patches and github issues are welcome. # How to build docker image ```sh docker build --build-arg https_proxy=https://proxy.com:port --build-arg http_proxy=http://proxy.com:port -t rpma:debian-unstable -f ./Dockerfile.debian-unstable . ``` # How to use docker image To run build and tests on local machine on docker: ```sh docker run --network=bridge --shm-size=4G -v /your/workspace/path/:/opt/workspace:z -w /opt/workspace/ -e PKG_CONFIG_PATH=/opt/pmdk/lib/pkgconfig -it rpma:debian-unstable /bin/bash ``` To get strace working, add to docker commandline ```sh --cap-add SYS_PTRACE ``` rpma-1.3.0/utils/docker/images/build-image.sh000077500000000000000000000021371443364775400210700ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2023, Intel Corporation # # # build-image.sh - prepares a Docker image with -based # environment for testing rpma, according # to the Dockerfile. file located # in the same directory. # # The script can be run locally. # set -e function usage { echo "Usage:" echo " build-image.sh " echo "where , for example, can be 'fedora-30', provided " \ "a Dockerfile named 'Dockerfile.fedora-30' exists in the " \ "current directory." } # Check if the first and second argument is nonempty if [[ -z "$1" || -z "$2" ]]; then usage exit 1 fi # Check if the file Dockerfile.OS-VER exists if [[ ! -f "Dockerfile.$2" ]]; then echo "ERROR: the file does not exist: Dockerfile.$2" usage exit 1 fi # Build a Docker image tagged with ${DOCKER_REPO}:${IMG_VER}-${OS}-${OS_VER} docker build -t $1:${IMG_VER}-$2 \ --build-arg http_proxy=$http_proxy \ --build-arg https_proxy=$https_proxy \ -f Dockerfile.$2 . rpma-1.3.0/utils/docker/images/download-scripts.sh000077500000000000000000000015211443364775400222010ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # # download-scripts.sh - downloads specific version of codecov's bash # script to generate and upload reports. It's useful, # since unverified version may break coverage results. # set -e # main: Merge pull request #342 from codecov/revert-proj-name-..., 18.08.2020 CODECOV_VERSION="e877c1280cc6e902101fb5df2981ed1c962da7f0" if [ "${SKIP_SCRIPTS_DOWNLOAD}" ]; then echo "Variable 'SKIP_SCRIPTS_DOWNLOAD' is set; skipping scripts' download" exit fi mkdir -p /opt/scripts # Download codecov's bash script git clone https://github.com/codecov/codecov-bash cd codecov-bash git checkout $CODECOV_VERSION git apply ../0001-fix-generating-gcov-files-and-turn-off-verbose-log.patch mv -v codecov /opt/scripts/codecov cd .. rm -rf codecov-bash rpma-1.3.0/utils/docker/images/install-archlinux-aur.sh000077500000000000000000000004001443364775400231260ustar00rootroot00000000000000#!/bin/bash -e # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # PKG=$1 git clone https://aur.archlinux.org/$PKG.git chown -R user:user ./$PKG cd $PKG sudo -u user makepkg -si --noconfirm --skippgpcheck cd .. rm -r $PKG rpma-1.3.0/utils/docker/images/install-cmocka.sh000077500000000000000000000017331443364775400216130ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2021, Intel Corporation # # # install-cmocka.sh - installs cmocka.org # # cmocka-1.1.5-26-g672c5ce - pull latest fixes CMOCKA_VERSION=672c5cee79eb412025c3dd8b034e611c1f119055 git clone https://git.cryptomilk.org/projects/cmocka.git if [ $? -ne 0 ]; then # in case of a failure retry after updating certificates set -e openssl s_client -showcerts -servername git.cryptomilk.org -connect git.cryptomilk.org:443 /dev/null \ | sed -n -e '/BEGIN\ CERTIFICATE/,/END\ CERTIFICATE/ p' > git-cryptomilk.org.pem cat git-cryptomilk.org.pem | sudo tee -a /etc/ssl/certs/ca-certificates.crt rm git-cryptomilk.org.pem git clone https://git.cryptomilk.org/projects/cmocka.git fi set -e mkdir cmocka/build cd cmocka/build git checkout $CMOCKA_VERSION cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=RelWithDebInfo make -j$(nproc) sudo make -j$(nproc) install cd ../.. sudo rm -rf cmocka rpma-1.3.0/utils/docker/images/install-pmdk.sh000077500000000000000000000012341443364775400213050ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2023, Intel Corporation # # # install-pmdk.sh - installs PMDK libraries # # PMDK version: 8074b19b1d9b40bcaaed7e0dc0622dccf8007f2f (1.12.1-119-g8074b19b1) # with required fixes, see: # https://github.com/pmem/pmdk/issues/5540 PMDK_VERSION=8074b19b1d9b40bcaaed7e0dc0622dccf8007f2f WORKDIR=$(pwd) set -ex # # Install PMDK libraries from sources # wget https://github.com/pmem/pmdk/archive/${PMDK_VERSION}.zip unzip ${PMDK_VERSION}.zip cd pmdk-${PMDK_VERSION} make -j$(nproc) NDCTL_ENABLE=n sudo make -j$(nproc) install prefix=/usr NDCTL_ENABLE=n cd $WORKDIR rm -rf pmdk-${PMDK_VERSION} rpma-1.3.0/utils/docker/images/install-rdma-core.sh000077500000000000000000000012311443364775400222200ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2023, Intel Corporation # # # install-rdma-core.sh - installs rdma-core libraries from sources with support for: # native atomic write and native flush # set -ex # rdma-core v45.0 with support for native atomic write and native flush VERSION="45.0" WORKDIR=$(pwd) # # Install rdma-core libraries from a release package # wget https://github.com/linux-rdma/rdma-core/releases/download/v${VERSION}/rdma-core-${VERSION}.tar.gz tar -xzf rdma-core-${VERSION}.tar.gz rm rdma-core-${VERSION}.tar.gz mv rdma-core-${VERSION} rdma-core cd rdma-core ./build.sh cd $WORKDIR rpma-1.3.0/utils/docker/images/install-txt2man.sh000077500000000000000000000005271443364775400217530ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2021, Intel Corporation # # # install-txt2man.sh - installs txt2man # set -e git clone https://github.com/mvertes/txt2man.git cd txt2man # txt2man v1.7.0 git checkout txt2man-1.7.0 make -j$(nproc) sudo make -j$(nproc) install prefix=/usr cd .. rm -rf txt2man rpma-1.3.0/utils/docker/images/push-image.sh000077500000000000000000000025221443364775400207460ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2020, Intel Corporation # # # push-image.sh - pushes the Docker image to ${DOCKER_REPO} # # The script utilizes $GH_CR_USER and $GH_CR_PAT variables # to log in to ${DOCKER_REPO}. The variables can be set # in the CI project's configuration for automated builds. # set -e source $(dirname $0)/../set-ci-vars.sh if [[ -z "$OS" ]]; then echo "OS environment variable is not set" exit 1 fi if [[ -z "$OS_VER" ]]; then echo "OS_VER environment variable is not set" exit 1 fi if [[ -z "${DOCKER_REPO}" ]]; then echo "DOCKER_REPO environment variable is not set" exit 1 fi if [[ -z "${GH_CR_USER}" || -z "${GH_CR_PAT}" ]]; then echo "ERROR: variables GH_CR_USER=\"${GH_CR_USER}\" and GH_CR_PAT=\"${GH_CR_PAT}\"" \ "have to be set properly to allow login to the ${DOCKER_REPO}." exit 1 fi TAG="${IMG_VER}-${OS}-${OS_VER}" # Check if the image tagged with ${DOCKER_REPO}:${TAG} exists locally if [[ ! $(docker images -a | awk -v pattern="^${DOCKER_REPO}:${TAG}\$" \ '$1":"$2 ~ pattern') ]] then echo "ERROR: Docker image tagged ${DOCKER_REPO}:${TAG} does not exists locally." exit 1 fi # Log in to ${DOCKER_REPO} echo "${GH_CR_PAT}" | docker login ${GH_CR_ADDR} -u="${GH_CR_USER}" --password-stdin # Push the image to ${DOCKER_REPO} docker push ${DOCKER_REPO}:${TAG} rpma-1.3.0/utils/docker/images/set-images-version.sh000077500000000000000000000011151443364775400224250ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # # # set-images-version.sh -- set value of the 'IMG_VER' variable # containing the current version of Docker images # # This file has to be located in the "utils/docker/images" subdirectory, # because every change of a value of IMG_VER has to trigger the rebuild # of all Docker images. # # A version of Docker images should be different only for different # and standalone branches. It makes no sense to change it for the same branch. # export IMG_VER=main rpma-1.3.0/utils/docker/prepare-for-build.sh000077500000000000000000000010571443364775400207630ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2022, Intel Corporation # # # prepare-for-build.sh - prepare the Docker image for the build # set -e function sudo_password() { echo $USERPASS | sudo -Sk $* } echo WORKDIR=$WORKDIR # make sure $WORKDIR has correct permissions sudo_password chown -R $(id -u):$(id -g) $WORKDIR # fix for: https://github.com/actions/checkout/issues/766 (git CVE-2022-24765) git config --global --add safe.directory "$WORKDIR" sudo_password git config --global --add safe.directory "$WORKDIR" rpma-1.3.0/utils/docker/pull-or-rebuild-image.sh000077500000000000000000000066671443364775400215560ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2022, Intel Corporation # # # pull-or-rebuild-image.sh - rebuilds the Docker image used in the # current CI build if necessary. # # The script rebuilds the Docker image if the Dockerfile for the current # OS version (Dockerfile.${OS}-${OS_VER}) or any .sh script from the directory # with Dockerfiles were modified and committed. # # If the CI build is not of the "pull_request" type (i.e. in case of # merge after pull_request) and it succeed, the Docker image should be pushed # to ${DOCKER_REPO}. An empty file is created to signal that to further scripts. # # If the Docker image does not have to be rebuilt, it will be pulled from # ${DOCKER_REPO}. # set -e source $(dirname $0)/set-ci-vars.sh source $(dirname $0)/set-vars.sh function rebuild_and_push_image() { # Rebuild Docker image for the current OS version echo "Rebuilding the Docker image for the Dockerfile.$OS-$OS_VER" pushd $images_dir_name ./build-image.sh ${DOCKER_REPO} ${OS}-${OS_VER} popd # Check if the image has to be pushed to ${DOCKER_REPO} # (i.e. the build is triggered by commits to the ${GITHUB_REPO} # repository's main branch, and the CI build is not # of the "pull_request" type). In that case, create the empty # file. if [[ "${CI_REPO_SLUG}" == "${GITHUB_REPO}" && "$CI_BRANCH" == "main" \ && "$CI_EVENT_TYPE" != "pull_request" && "$PUSH_IMAGE" == "1" ]] then echo "The image will be pushed to ${DOCKER_REPO}" touch $CI_FILE_PUSH_IMAGE_TO_REPO else echo "Skipping pushing the image to ${DOCKER_REPO} ..." if [ "${CI_REPO_SLUG}" != "${GITHUB_REPO}" ]; then echo "CI_REPO_SLUG=$CI_REPO_SLUG" echo "GITHUB_REPO=$GITHUB_REPO" fi [ "$CI_BRANCH" != "main" ] && echo "CI_BRANCH=$CI_BRANCH" [ "$CI_EVENT_TYPE" == "pull_request" ] && echo "CI_EVENT_TYPE=pull_request" [ "$PUSH_IMAGE" != "1" ] && echo "PUSH_IMAGE=$PUSH_IMAGE" fi exit 0 } if [[ -z "$OS" || -z "$OS_VER" ]]; then echo "ERROR: The variables OS and OS_VER have to be set properly " \ "(eg. OS=ubuntu, OS_VER=20.04)." exit 1 fi if [[ -z "$HOST_WORKDIR" ]]; then echo "ERROR: The variable HOST_WORKDIR has to contain a path to " \ "the root of this project on the host machine" exit 1 fi # Find all the commits for the current build if [ -n "$CI_COMMIT_RANGE" ]; then commits=$(git rev-list $CI_COMMIT_RANGE) else commits=$CI_COMMIT fi echo "Commits in the commit range:" for commit in $commits; do echo $commit; done # Get the list of files modified by the commits files=$(for commit in $commits; do git diff-tree --no-commit-id --name-only \ -r $commit; done | sort -u) echo "Files modified within the commit range:" for file in $files; do echo $file; done # Path to directory with Dockerfiles and image building scripts images_dir_name=images base_dir=utils/docker/$images_dir_name # Check if committed file modifications require the Docker image to be rebuilt for file in $files; do # Check if modified files are relevant to the current build if [[ $file =~ ^($base_dir)\/Dockerfile\.($OS)-($OS_VER)$ ]] \ || [[ $file =~ ^($base_dir)\/.*\.sh$ ]] \ || [[ "$REBUILD_ALWAYS" == "YES" ]] then rebuild_and_push_image fi done # Getting here means rebuilding the Docker image is not required. # Pull the image from ${DOCKER_REPO}. If pulling fails # (e.g. there is no desired image), then rebuild the image. docker pull ${DOCKER_REPO}:${IMG_VER}-${OS}-${OS_VER} || rebuild_and_push_image rpma-1.3.0/utils/docker/run-build.sh000077500000000000000000000223151443364775400173450ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2016-2023, Intel Corporation # Copyright (c) 2023 Fujitsu Limited # # # run-build.sh - is called inside a Docker container, # starts rpma build with tests. # set -e PREFIX=/usr CC=${CC:-gcc} TEST_DIR=${RPMA_TEST_DIR:-${DEFAULT_TEST_DIR}} EXAMPLE_TEST_DIR="/tmp/rpma_example_build" # turn off sanitizers only if (CI_SANITS == OFF) [ "$CI_SANITS" != "OFF" ] && CI_SANITS=ON if [ "$WORKDIR" == "" ]; then echo "Error: WORKDIR is not set" exit 1 fi if [ "$TEST_DIR" == "" ]; then echo "Error: RPMA_TEST_DIR is not set" exit 1 fi case "$PACKAGE_MANAGER" in "rpm"|"deb") # supported package managers ;; "none") # no package manager, install the library from sources echo "Notice: the librpma library will be installed from sources" PACKAGE_MANAGER="" ;; "") echo "Error: PACKAGE_MANAGER is not set" echo " Use 'rpm' or 'deb' to build packages or 'none' to install the library from sources." exit 1 ;; *) # unsupported package manager echo "Error: unsupported PACKAGE_MANAGER: $PACKAGE_MANAGER" echo " Use 'rpm' or 'deb' to build packages or 'none' to install the library from sources." exit 1 ;; esac function sudo_password() { echo $USERPASS | sudo -S $* } function upload_codecov() { printf "\n$(tput setaf 1)$(tput setab 7)COVERAGE ${FUNCNAME[0]} START$(tput sgr 0)\n" # set proper gcov command clang_used=$($CMAKE -LA -N . | grep CMAKE_C_COMPILER | grep clang | wc -c) if [[ $clang_used > 0 ]]; then gcovexe="llvm-cov gcov" else gcovexe="gcov" fi # run gcov exe, using their bash (remove parsed coverage files, set flag and exit 1 if not successful) # we rely on parsed report on codecov.io; the output is quite long, hence it's disabled using -X flag /opt/scripts/codecov -c -F $1 -Z -x "$gcovexe" -X "gcovout" printf "check for any leftover gcov files\n" leftover_files=$(find . -name "*.gcov" | wc -l) if [[ $leftover_files > 0 ]]; then # display found files and exit with error (they all should be parsed) find . -name "*.gcov" return 1 fi printf "$(tput setaf 1)$(tput setab 7)COVERAGE ${FUNCNAME[0]} END$(tput sgr 0)\n\n" } function compile_example_standalone() { rm -rf $EXAMPLE_TEST_DIR mkdir $EXAMPLE_TEST_DIR cd $EXAMPLE_TEST_DIR $CMAKE $1 # exit on error if [[ $? != 0 ]]; then cd - > /dev/null return 1 fi make -j$(nproc) cd - > /dev/null } # test standalone compilation of all examples function test_compile_all_examples_standalone() { EXAMPLES=$(ls -1 $WORKDIR/examples/) for e in $EXAMPLES; do DIR=$WORKDIR/examples/$e [ ! -d $DIR ] && continue [ ! -f $DIR/CMakeLists.txt ] && continue if [ "${LIBPROTOBUFC_FOUND}" == "NO" ]; then case $e in 09-flush-to-persistent-GPSPM|09scch-flush-to-persistent-GPSPM) echo echo "SKIP: Skipping the '$e' example, because libprotobuf-c is missing" echo continue ;; esac fi echo echo "###########################################################" echo "### Testing standalone compilation of example: $e" if [ "$PACKAGE_MANAGER" = "" ]; then echo "### (with librpma installed from RELEASE sources)" else echo "### (with librpma installed from RELEASE packages)" fi echo "###########################################################" compile_example_standalone $DIR done } ./prepare-for-build.sh # Use cmake3 instead of cmake if a version of cmake is v2.x, # because the minimum required version of cmake is 3.3. CMAKE_VERSION=$(cmake --version | grep version | cut -d" " -f3 | cut -d. -f1) [ "$CMAKE_VERSION" != "" ] && [ $CMAKE_VERSION -lt 3 ] && \ [ "$(which cmake3 2>/dev/null)" != "" ] && CMAKE=cmake3 || CMAKE=cmake # look for libprotobuf-c USR=$(find /usr -name "*protobuf-c.so*" || true) LIB=$(find /lib* -name "*protobuf-c.so*" || true) [ "$USR" == "" -a "$LIB" == "" ] && LIBPROTOBUFC_FOUND="NO" echo echo "##################################################################" echo "### Verify build with ASAN and UBSAN ($CC, DEBUG)" echo "##################################################################" mkdir -p $WORKDIR/build cd $WORKDIR/build CC=$CC \ $CMAKE .. -DCMAKE_BUILD_TYPE=Debug \ -DTEST_DIR=$TEST_DIR \ -DBUILD_DEVELOPER_MODE=1 \ -DDEBUG_USE_ASAN=${CI_SANITS} \ -DDEBUG_USE_UBSAN=${CI_SANITS} make -j$(nproc) ctest --output-on-failure cd $WORKDIR rm -rf $WORKDIR/build echo echo "##################################################################" echo "### Verify build and install (in dir: ${PREFIX}) ($CC, DEBUG)" echo "##################################################################" mkdir -p $WORKDIR/build cd $WORKDIR/build CC=$CC \ $CMAKE .. -DCMAKE_BUILD_TYPE=Debug \ -DTEST_DIR=$TEST_DIR \ -DCMAKE_INSTALL_PREFIX=$PREFIX \ -DTESTS_COVERAGE=$TESTS_COVERAGE \ -DBUILD_DEVELOPER_MODE=1 make -j$(nproc) ctest --output-on-failure sudo_password make -j$(nproc) install if [ "$TESTS_COVERAGE" == "1" ]; then upload_codecov tests fi test_compile_all_examples_standalone # Uninstall libraries cd $WORKDIR/build sudo_password make uninstall cd $WORKDIR rm -rf $WORKDIR/build echo echo "#####################################################################" echo "### Verify build with BUILD_FORCE_ODP_NOT_SUPPORTED=ON ($CC, DEBUG)" echo "#####################################################################" mkdir -p $WORKDIR/build cd $WORKDIR/build CC=$CC \ $CMAKE .. -DCMAKE_BUILD_TYPE=Debug \ -DTEST_DIR=$TEST_DIR \ -DBUILD_DEVELOPER_MODE=1 \ -DBUILD_FORCE_ODP_NOT_SUPPORTED=ON make -j$(nproc) || make ctest --output-on-failure cd $WORKDIR rm -rf $WORKDIR/build echo echo "#########################################################################################" echo "### Verify build with BUILD_FORCE_NATIVE_ATOMIC_WRITE_NOT_SUPPORTED=ON ($CC, DEBUG)" echo "#########################################################################################" mkdir -p $WORKDIR/build cd $WORKDIR/build CC=$CC \ $CMAKE .. -DCMAKE_BUILD_TYPE=Debug \ -DTEST_DIR=$TEST_DIR \ -DBUILD_DEVELOPER_MODE=1 \ -DBUILD_FORCE_NATIVE_ATOMIC_WRITE_NOT_SUPPORTED=ON make -j$(nproc) || make ctest --output-on-failure cd $WORKDIR rm -rf $WORKDIR/build echo echo "#########################################################################################" echo "### Verify build with BUILD_FORCE_NATIVE_FLUSH_NOT_SUPPORTED=ON ($CC, DEBUG)" echo "#########################################################################################" mkdir -p $WORKDIR/build cd $WORKDIR/build CC=$CC \ $CMAKE .. -DCMAKE_BUILD_TYPE=Debug \ -DTEST_DIR=$TEST_DIR \ -DBUILD_DEVELOPER_MODE=1 \ -DBUILD_FORCE_NATIVE_FLUSH_NOT_SUPPORTED=ON make -j$(nproc) || make ctest --output-on-failure cd $WORKDIR rm -rf $WORKDIR/build echo echo "##################################################################" echo "### Verify build and install (in dir: ${PREFIX}) ($CC, RELEASE)" echo "##################################################################" mkdir -p $WORKDIR/build cd $WORKDIR/build CC=$CC \ $CMAKE .. -DCMAKE_BUILD_TYPE=Release \ -DTEST_DIR=$TEST_DIR \ -DCMAKE_INSTALL_PREFIX=$PREFIX \ -DCPACK_GENERATOR=$PACKAGE_MANAGER \ -DBUILD_DEVELOPER_MODE=1 make -j$(nproc) ctest --output-on-failure # check if librpma uses rdma-core installed from sources RDMA_CORE_PATH="/rdma-core/build/lib" if ldd src/librpma.so | grep -q -e "$RDMA_CORE_PATH"; then RPMA_USES_RDMA_CORE_FROM_SOURCES=YES else RPMA_USES_RDMA_CORE_FROM_SOURCES=NO fi # If there is no supported package manager or # if librpma uses rdma-core installed from sources # install the librpma library from sources too, # otherwise make the librpma package and install it. if [ "$PACKAGE_MANAGER" = "" -o "$RPMA_USES_RDMA_CORE_FROM_SOURCES" = "YES" ]; then # install the library from sources sudo_password make -j$(nproc) install else # Do not install the library from sources here, # because it will be installed from the packages below. echo "##############################################################" echo "### Making and testing packages (RELEASE version) ..." echo "##############################################################" make -j$(nproc) package find . -iname "librpma*.$PACKAGE_MANAGER" if [ $PACKAGE_MANAGER = "deb" ]; then set -x dpkg-deb --info ./librpma*.deb || true dpkg-deb -c ./librpma*.deb || true sudo_password dpkg -i ./librpma*.deb set +x elif [ $PACKAGE_MANAGER = "rpm" ]; then set -x rpm -q --info ./librpma*.rpm || true rpm -q --list ./librpma*.rpm || true sudo_password rpm -ivh --force *.rpm set +x fi fi test_compile_all_examples_standalone if [ "$PACKAGE_MANAGER" = "" -o "$RPMA_USES_RDMA_CORE_FROM_SOURCES" = "YES" ]; then # uninstall the library, since it was installed from sources cd $WORKDIR/build sudo_password make uninstall elif [ $PACKAGE_MANAGER = "deb" ]; then echo "sudo -S dpkg --remove librpma-dev" echo $USERPASS | sudo -S dpkg --remove librpma-dev elif [ $PACKAGE_MANAGER = "rpm" ]; then echo "$ sudo -S rpm --erase librpma-devel" echo $USERPASS | sudo -S rpm --erase librpma-devel fi # Create pull requests with updated documentation or show the git diff only if [[ "$AUTO_DOC_UPDATE" == "1" || $AUTO_DOC_UPDATE == "show-diff-only" ]]; then cd $WORKDIR/build echo echo "Running auto doc update (AUTO_DOC_UPDATE=$AUTO_DOC_UPDATE)" ../utils/docker/run-doc-update.sh $AUTO_DOC_UPDATE fi cd $WORKDIR rm -rf $WORKDIR/build rpma-1.3.0/utils/docker/run-coverity.sh000077500000000000000000000042361443364775400201140ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2022, Intel Corporation # # # run-coverity.sh - runs the Coverity scan build # set -e set -x if [[ "$CI_REPO_SLUG" != "$GITHUB_REPO" \ && ( "$COVERITY_SCAN_NOTIFICATION_EMAIL" == "" \ || "$COVERITY_SCAN_TOKEN" == "" ) ]]; then echo echo "Skipping Coverity build:"\ "COVERITY_SCAN_TOKEN=\"$COVERITY_SCAN_TOKEN\" or "\ "COVERITY_SCAN_NOTIFICATION_EMAIL="\ "\"$COVERITY_SCAN_NOTIFICATION_EMAIL\" is not set" exit 0 fi # Prepare build environment ./prepare-for-build.sh CERT_FILE=/etc/ssl/certs/ca-certificates.crt TEMP_CF=$(mktemp) cp $CERT_FILE $TEMP_CF # Download Coverity certificate echo -n | openssl s_client -connect scan.coverity.com:443 | \ sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' | \ tee -a $TEMP_CF echo $USERPASS | sudo -S mv $TEMP_CF $CERT_FILE export COVERITY_SCAN_PROJECT_NAME="$CI_REPO_SLUG" export COVERITY_SCAN_BRANCH_PATTERN="$CI_BRANCH" export COVERITY_SCAN_BUILD_COMMAND="./utils/docker/coverity-command.sh $(nproc)" cd $WORKDIR # # Run the Coverity scan # # The 'travisci_build_coverity_scan.sh' script requires the following # environment variables to be set: # - TRAVIS_BRANCH - has to contain the name of the current branch # - TRAVIS_PULL_REQUEST - has to be set to 'true' in case of pull requests # export TRAVIS_BRANCH=${CI_BRANCH} [ "${CI_EVENT_TYPE}" == "pull_request" ] && export TRAVIS_PULL_REQUEST="true" # XXX: Patch the Coverity script. # Recently, this script regularly exits with an error, even though # the build is successfully submitted. Probably because the status code # is missing in response, or it's not 201. # Changes: # 1) change the expected status code to 200 and # 2) print the full response string. # # This change should be reverted when the Coverity script is fixed. # # The previous version was: # curl -s https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh | bash wget https://scan.coverity.com/scripts/travisci_build_coverity_scan.sh patch < utils/docker/0001-travis-fix-travisci_build_coverity_scan.sh.patch bash ./travisci_build_coverity_scan.sh [ -f /rpma/cov-int/scm_log.txt ] && cat /rpma/cov-int/scm_log.txt rpma-1.3.0/utils/docker/run-doc-update.sh000077500000000000000000000102211443364775400202640ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2018-2023, Intel Corporation # # # run-doc-update.sh - create pull requests with updated documentation # # USAGE: run-doc-update.sh [show-diff-only] # show-diff-only - do not create pull requests, show only the diff instead # set -e ARG1=$1 WORKDIR=$(pwd) USER_NAME="pmem" BOT_NAME="pmem-bot" VERSION="main" SOURCE_BRANCH=${CI_BRANCH} function set_up_repo() { local ORIGIN=$1 local UPSTREAM=$2 local REPO_NAME=$3 local BRANCH=$4 cd ${WORKDIR} git clone ${ORIGIN} ${REPO_NAME} cd ${REPO_NAME} git remote add upstream ${UPSTREAM} git config --local user.name ${BOT_NAME} git config --local user.email "${BOT_NAME}@intel.com" git remote update [ "$ARG1" == "show-diff-only" ] && return 0 # check if "upstream/${BRANCH}" is a valid branch if ! git log -1 upstream/${BRANCH} 2>/dev/null; then # BRANCH (set from ${CI_BRANCH}) is a tag, # but tags do not introduce changes in the code, # so there is no need to look for changes in the man pages. echo "Notice: it is a build from a tag - skipping updating man pages" exit 0 fi git checkout -B ${BRANCH} upstream/${BRANCH} } function commit_and_push_changes() { local ORIGIN=$1 local BRANCH_LOCAL=$2 local BRANCH_REMOTE=$3 local TITLE=$4 # Add, commit and push changes. # The 'git commit' command may fail if there is nothing to commit. # In such case we want to force push anyway (because there # might be an open pull request with changes which were reverted). git add -A git commit -m "${TITLE}" || true git push -f ${ORIGIN} ${BRANCH_LOCAL} # Makes pull request. # When there is already an open PR or there are no changes # an error is thrown, which we ignore. GITHUB_TOKEN=${DOC_UPDATE_GITHUB_TOKEN} hub pull-request -f \ -b ${USER_NAME}:${BRANCH_REMOTE} \ -h ${BOT_NAME}:${BRANCH_LOCAL} \ -m "${TITLE}" || true } # # update man pages in the "rpma" repo # ORIGIN_RPMA="https://${DOC_UPDATE_GITHUB_TOKEN}@github.com/${BOT_NAME}/rpma" UPSTREAM_RPMA="https://github.com/${USER_NAME}/rpma" TARGET_BRANCH="man-pages" [ "$ARG1" == "show-diff-only" ] && ORIGIN_RPMA=".." # set up the rpma repo set_up_repo ${ORIGIN_RPMA} ${UPSTREAM_RPMA} rpma ${SOURCE_BRANCH} # build docs mkdir ${WORKDIR}/rpma/build cd ${WORKDIR}/rpma/build cmake -DBUILD_DOC=ON -DBUILD_TESTS=OFF -DBUILD_EXAMPLES=OFF .. make -j$(nproc) doc # copy Markdown files outside the repo cp -R doc/md ${WORKDIR} # checkout the 'man-pages' branch cd ${WORKDIR}/rpma BRANCH_PR="doc-automatic-update-of-man-pages" git checkout -B $BRANCH_PR upstream/${TARGET_BRANCH} git clean -dfx # clean old content, since some files might have been deleted DOCS_DIR=${WORKDIR}/rpma/manpages/${VERSION}/ rm -r ${DOCS_DIR} mkdir ${DOCS_DIR} # copy new man pages cp -r ${WORKDIR}/md/*.md ${DOCS_DIR} if [ "$ARG1" == "show-diff-only" ]; then echo echo "########################################" if [ $(git diff | wc -l) -gt 0 ]; then echo "git diff of the generated documentation:" git diff else echo "No changes in the documentation." fi echo "########################################" echo exit 0 fi # add, commit and push changes to the rpma repo commit_and_push_changes ${ORIGIN_RPMA} ${BRANCH_PR} ${TARGET_BRANCH} "doc: automatic update of man pages" # # update man pages in the "pmem.github.io" repo # ORIGIN_PMEM_IO="https://${DOC_UPDATE_GITHUB_TOKEN}@github.com/${BOT_NAME}/pmem.github.io" UPSTREAM_PMEM_IO="https://github.com/${USER_NAME}/pmem.github.io" TARGET_BRANCH="main" # set up the pmem.github.io repo set_up_repo ${ORIGIN_PMEM_IO} ${UPSTREAM_PMEM_IO} pmem.github.io ${TARGET_BRANCH} # checkout the 'main' branch and copy man pages cd ${WORKDIR}/pmem.github.io BRANCH_PR="rpma-automatic-update-of-man-pages" git checkout -B $BRANCH_PR upstream/${TARGET_BRANCH} git clean -dfx # clean old content, since some files might have been deleted DOCS_DIR=${WORKDIR}/pmem.github.io/content/rpma/manpages/${VERSION}/ rm -r ${DOCS_DIR} mkdir ${DOCS_DIR} # copy new man pages cp -r ${WORKDIR}/md/*.md ${DOCS_DIR} # add, commit and push changes to the pmem.github.io repo commit_and_push_changes ${ORIGIN_PMEM_IO} ${BRANCH_PR} ${TARGET_BRANCH} "rpma: automatic update of man pages" exit 0 rpma-1.3.0/utils/docker/set-ci-vars.sh000077500000000000000000000052051443364775400176000ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2021, Intel Corporation # # set-ci-vars.sh -- set CI variables common for both: # Travis and GitHub Actions CIs # set -e # set version of Docker images (IMG_VER) source $(dirname ${BASH_SOURCE[0]})/images/set-images-version.sh function get_commit_range_from_last_merge { # get commit id of the last merge LAST_MERGE=$(git log --merges --pretty=%H -1) LAST_COMMIT=$(git log --pretty=%H -1) if [ "$LAST_MERGE" == "$LAST_COMMIT" ]; then # GitHub Actions commits its own merge in case of pull requests # so the first merge commit has to be skipped. LAST_MERGE=$(git log --merges --pretty=%H -2 | tail -n1) fi if [ "$LAST_MERGE" == "" ]; then # possible in case of shallow clones # or new repos with no merge commits yet # - pick up the first commit LAST_MERGE=$(git log --pretty=%H | tail -n1) fi COMMIT_RANGE="$LAST_MERGE..HEAD" # make sure it works now if ! git rev-list $COMMIT_RANGE >/dev/null; then COMMIT_RANGE="" fi echo $COMMIT_RANGE } COMMIT_RANGE_FROM_LAST_MERGE=$(get_commit_range_from_last_merge) if [ -n "$TRAVIS" ]; then CI_COMMIT=$TRAVIS_COMMIT CI_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" CI_BRANCH=$TRAVIS_BRANCH CI_EVENT_TYPE=$TRAVIS_EVENT_TYPE CI_REPO_SLUG=$TRAVIS_REPO_SLUG # CI_COMMIT_RANGE is usually invalid for force pushes - fix it when used # with non-upstream repository if [ -n "$CI_COMMIT_RANGE" -a "$CI_REPO_SLUG" != "$GITHUB_REPO" ]; then if ! git rev-list $CI_COMMIT_RANGE; then CI_COMMIT_RANGE=$COMMIT_RANGE_FROM_LAST_MERGE fi fi case "$TRAVIS_CPU_ARCH" in "amd64") CI_CPU_ARCH="x86_64" ;; *) CI_CPU_ARCH=$TRAVIS_CPU_ARCH ;; esac elif [ -n "$GITHUB_ACTIONS" ]; then CI_COMMIT=$GITHUB_SHA CI_COMMIT_RANGE=$COMMIT_RANGE_FROM_LAST_MERGE CI_BRANCH=$(echo $GITHUB_REF | cut -d'/' -f3) CI_REPO_SLUG=$GITHUB_REPOSITORY CI_CPU_ARCH="x86_64" # GitHub Actions supports only x86_64 case "$GITHUB_EVENT_NAME" in "schedule") CI_EVENT_TYPE="cron" ;; *) CI_EVENT_TYPE=$GITHUB_EVENT_NAME ;; esac else CI_COMMIT=$(git log --pretty=%H -1) CI_COMMIT_RANGE=$COMMIT_RANGE_FROM_LAST_MERGE CI_CPU_ARCH="x86_64" fi export CI_COMMIT=$CI_COMMIT export CI_COMMIT_RANGE=$CI_COMMIT_RANGE export CI_BRANCH=$CI_BRANCH export CI_EVENT_TYPE=$CI_EVENT_TYPE export CI_REPO_SLUG=$CI_REPO_SLUG export CI_CPU_ARCH=$CI_CPU_ARCH export CI_SANITS=$CI_SANITS export IMG_VER=$IMG_VER echo CI_COMMIT=$CI_COMMIT echo CI_COMMIT_RANGE=$CI_COMMIT_RANGE echo CI_BRANCH=$CI_BRANCH echo CI_EVENT_TYPE=$CI_EVENT_TYPE echo CI_REPO_SLUG=$CI_REPO_SLUG echo CI_CPU_ARCH=$CI_CPU_ARCH echo CI_SANITS=$CI_SANITS echo IMG_VER=$IMG_VER rpma-1.3.0/utils/docker/set-vars.sh000077500000000000000000000003441443364775400172060ustar00rootroot00000000000000#!/usr/bin/env bash # # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020, Intel Corporation # # # set-vars.sh - set required environment variables # set -e export CI_FILE_PUSH_IMAGE_TO_REPO=/tmp/push_image_to_repo_flag rpma-1.3.0/utils/mans_header.md000066400000000000000000000007101443364775400164210ustar00rootroot00000000000000--- draft: false layout: "library" slider_enable: true description: "" disclaimer: "The contents of this web site and the associated GitHub repositories are BSD-licensed open source." aliases: ["MANUAL_NAME_TO_REPLACE.html"] title: "librpma | PMDK" header: "librpma API version 1.3.0" --- {{< manpages >}} [comment]: <> (SPDX-License-Identifier: BSD-3-Clause) [comment]: <> (Copyright 2020-2023, Intel Corporation) rpma-1.3.0/utils/src2mans.sh000077500000000000000000000065651443364775400157360ustar00rootroot00000000000000#!/usr/bin/env bash # SPDX-License-Identifier: BSD-3-Clause # Copyright 2020-2023, Intel Corporation # # src2mans -- extract man pages from source files # DIR=$1 MAN_3=$2 MAN_7=$3 MANS_HEADER=$4 [ "$5" == "fix" ] && FIX=1 || FIX=0 if [ $# -lt 4 ] || [ ! -d $DIR ] || [ ! -f $MAN_3 ] || [ ! -f $MAN_7 ] || [ ! -f $MANS_HEADER ]; then echo "$ $0 $*" echo "Error: missing or wrong argument" echo echo "Usage: $(basename $0) [fix]" echo " - directory to be searched for *.h files" echo " - file containing list of section #3 manuals" echo " - file containing list of section #7 manuals" echo " - common header of markup manuals" echo " fix - fix files containing list of manuals" echo [ ! -d $DIR ] && echo "Error: $DIR does not exist or is not a directory" [ ! -f $MAN_3 ] && echo "Error: $MAN_3 does not exist or is not a regular file" [ ! -f $MAN_7 ] && echo "Error: $MAN_7 does not exist or is not a regular file" [ ! -f $MANS_HEADER ] && echo "Error: $MANS_HEADER does not exist or is not a regular file" exit 1 fi function check_manuals_list() { N=$1 LIST=$2 CURRENT=$3 FIX=$4 if ! diff $LIST $CURRENT; then if [ $FIX -eq 1 ]; then mv $CURRENT $LIST echo "Updated the file: $LIST" else echo "Error: current list of manuals($N) does match the file: $LIST" RV=1 fi fi } PANDOC=0 if which pandoc > /dev/null; then PANDOC=1 mkdir -p md else echo "Warning: pandoc not found, Markdown documentation will not be generated" >&2 fi ALL_MANUALS="$(mktemp)" find $DIR -name '*.h' -print0 | while read -d $'\0' MAN do MANUALS="$(mktemp)" ERRORS="$(mktemp)" src2man -r RPMA -v "RPMA Programmer's Manual" $MAN > $MANUALS 2> $ERRORS # gawk 5.0.1 does not recognize expressions \;|\,|\o as regex operator sed -i -r "/warning: regexp escape sequence \`[\][;,o]' is not a known regexp operator/d" $ERRORS # remove empty lines sed -i '/^$/d' $ERRORS if [[ -s "$ERRORS" ]]; then echo "src2man: errors found in the \"$MAN\" file:" cat $ERRORS exit 1 fi if [ $PANDOC -eq 1 ]; then for f in $(cat $MANUALS | xargs); do # get rid of a FILE section (last two lines of the file) mv $f $f.tmp head -n -2 $f.tmp > $f # Replace \fP with \fR in groff manuals, # because pandoc does not interpret \fP correctly # and generates incorrect markdown files. sed 's/\\fP/\\fR/g' $f > $f.tmp # generate a markdown file pandoc -s $f.tmp -o $f.tmp1 -f man -t markdown || break # remove the header tail -n +6 $f.tmp1 > $f.tmp2 # fix the name issue '**a **-' -> '**a** -' (fix only the line #4) sed -i '4s/ \*\*-/\*\* -/' $f.tmp2 # start with a custom header cat $MANS_HEADER | sed "s/MANUAL_NAME_TO_REPLACE/$f/g" > md/$f.md cat $f.tmp2 >> md/$f.md rm -f $f.tmp $f.tmp1 $f.tmp2 done fi # save all manuals cat $MANUALS >> $ALL_MANUALS rm $MANUALS $ERRORS done || exit 1 NEW_MAN_3="$(mktemp)" NEW_MAN_7="$(mktemp)" cat $ALL_MANUALS | grep -e '\.3' | sort > $NEW_MAN_3 cat $ALL_MANUALS | grep -e '\.7' | sort > $NEW_MAN_7 # check if all generated manuals are listed in the manuals' files RV=0 check_manuals_list 3 $MAN_3 $NEW_MAN_3 $FIX check_manuals_list 7 $MAN_7 $NEW_MAN_7 $FIX if [ $RV -eq 1 -a $FIX -eq 0 ]; then echo "In order to fix it, run 'make doc-fix'" echo fi rm -f $ALL_MANUALS $NEW_MAN_3 $NEW_MAN_7 exit $RV