pax_global_header00006660000000000000000000000064147546574070014535gustar00rootroot0000000000000052 comment=122aa3e74aae8929741a7f6c7629237d8b2bf66b receptor-1.5.3/000077500000000000000000000000001475465740700133665ustar00rootroot00000000000000receptor-1.5.3/.dockerignore000066400000000000000000000003631475465740700160440ustar00rootroot00000000000000.idea receptor receptor.exe receptor.app recepcert net receptorctl-test-venv/ .container-flag* .VERSION kubectl /receptorctl/AUTHORS /receptorctl/ChangeLog /receptor-python-worker/ChangeLog /receptor-python-worker/AUTHORS .vagrant/ Dockerfile receptor-1.5.3/.github/000077500000000000000000000000001475465740700147265ustar00rootroot00000000000000receptor-1.5.3/.github/codecov.yml000066400000000000000000000021511475465740700170720ustar00rootroot00000000000000--- coverage: status: project: default: target: auto threshold: 5% patch: default: target: auto threshold: 5% comment: layout: "header, diff, files, components" # PR comment layout behavior: default require_changes: false require_base: false require_head: true after_n_builds: 1 codecov: branch: devel notify: wait_for_ci: false after_n_builds: 1 component_management: default_rules: # default rules that will be inherited by all components statuses: # Status definitions - type: project # Default status = project target: auto threshold: 5% branches: - "!devel" individual_components: # Actual component breakdown - component_id: go # Required unique identifier for component (Should not be changed) name: Go # Display name (can be changed) paths: - cmd/** - internal/** - pkg/** - component_id: receptorctl name: Receptorctl paths: - receptorctl/** ignore: - "**/mock_*" - "**/*_test.go" - "receptorctl/tests" - "tests" receptor-1.5.3/.github/dependabot.yml000066400000000000000000000015011475465740700175530ustar00rootroot00000000000000--- version: 2 updates: - package-ecosystem: "gomod" directory: "/" schedule: interval: "daily" labels: - "dependencies" - "go" ignore: - dependency-name: "golang" versions: ["1.23", "1.24"] - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" labels: - "dependencies" - "github-actions" - package-ecosystem: "pip" directory: "/receptor-python-worker" groups: dependencies: patterns: - "*" schedule: interval: "daily" labels: - "dependencies" - "pip" - package-ecosystem: "pip" directory: "/receptorctl" groups: dependencies: patterns: - "*" schedule: interval: "daily" labels: - "dependencies" - "pip" receptor-1.5.3/.github/issue_labeler.yml000066400000000000000000000000371475465740700202670ustar00rootroot00000000000000--- needs_triage: - '.*' ... receptor-1.5.3/.github/workflows/000077500000000000000000000000001475465740700167635ustar00rootroot00000000000000receptor-1.5.3/.github/workflows/artifact-k8s-logs.sh000077500000000000000000000005071475465740700225660ustar00rootroot00000000000000#!/bin/bash PODS_DIR=/tmp/receptor-testing/K8sPods mkdir "$PODS_DIR" PODS="$(kubectl get pods --template '{{range.items}}{{.metadata.name}}{{"\n"}}{{end}}')" for pod in $PODS ; do mkdir "$PODS_DIR/$pod" kubectl get pod "$pod" --output=json > "$PODS_DIR/$pod/pod" kubectl logs "$pod" > "$PODS_DIR/$pod/logs" done receptor-1.5.3/.github/workflows/build_binary_from_ref.yml000066400000000000000000000023161475465740700240320ustar00rootroot00000000000000--- name: "Build binary from arbitratry repo / ref" on: # yamllint disable-line rule:truthy workflow_dispatch: inputs: repository: description: 'The receptor repository to build from.' required: true default: 'ansible/receptor' ref: description: 'The ref to build. Can be a branch or any other valid ref.' required: true default: 'devel' jobs: build: name: build runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 repository: ${{ github.event.inputs.repository }} ref: ${{ github.event.inputs.ref }} - name: Set up Go uses: actions/setup-go@v5 with: go-version: "1.22" - name: build-all target run: make receptor - name: Upload binary env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | pip install boto3 ansible -i localhost, -c local all -m aws_s3 \ -a "bucket=receptor-nightlies object=refs/${{ github.event.inputs.ref }}/receptor src=./receptor mode=put" receptor-1.5.3/.github/workflows/codeql-analysis.yml000066400000000000000000000053671475465740700226110ustar00rootroot00000000000000--- # For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: # yamllint disable-line rule:truthy push: branches: ["devel", release_*] pull_request: # The branches below must be a subset of the branches above branches: ["devel"] schedule: - cron: '18 2 * * 5' jobs: analyze: name: Analyze runs-on: ubuntu-latest permissions: actions: read contents: read security-events: write strategy: fail-fast: false matrix: language: ['go', 'python'] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs # queries: security-extended,security-and-quality # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild uses: github/codeql-action/autobuild@v3 # Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun # If the Autobuild fails above, remove it and uncomment the following three lines. # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. # - run: | # echo "Run, Build Application using script" # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 receptor-1.5.3/.github/workflows/coverage_reporting.yml000066400000000000000000000035021475465740700233720ustar00rootroot00000000000000--- name: Codecov on: # yamllint disable-line rule:truthy pull_request: # yamllint disable-line rule:empty-values push: branches: [devel] env: DESIRED_GO_VERSION: '1.22' jobs: go_test_coverage: name: go test coverage runs-on: ubuntu-latest strategy: fail-fast: false steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Go uses: actions/setup-go@v5 with: go-version: ${{ env.DESIRED_GO_VERSION }} - name: build and install receptor run: | make build-all sudo cp ./receptor /usr/local/bin/receptor - name: Download kind binary run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 && chmod +x ./kind - name: Create k8s cluster run: ./kind create cluster - name: Interact with the cluster run: kubectl get nodes - name: Run receptor tests with coverage run: make coverage - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v5 with: fail_ci_if_error: true verbose: true env: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - name: get k8s logs if: ${{ failure() }} run: .github/workflows/artifact-k8s-logs.sh - name: remove sockets before archiving logs if: ${{ failure() }} run: find /tmp/receptor-testing -name controlsock -delete - name: Artifact receptor data uses: actions/upload-artifact@v4.4.3 if: ${{ failure() }} with: name: test-logs path: /tmp/receptor-testing - name: Archive receptor binary uses: actions/upload-artifact@v4.4.3 with: name: receptor path: /usr/local/bin/receptor receptor-1.5.3/.github/workflows/dependency_review.yml000066400000000000000000000005261475465740700232100ustar00rootroot00000000000000--- name: 'Dependency Review' on: [pull_request] # yamllint disable-line rule:truthy permissions: contents: read jobs: dependency-review: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' uses: actions/checkout@v4 - name: 'Dependency Review' uses: actions/dependency-review-action@v4 ... receptor-1.5.3/.github/workflows/devel_image.yml000066400000000000000000000030411475465740700217450ustar00rootroot00000000000000--- name: Publish devel image on: # yamllint disable-line rule:truthy push: branches: [devel] jobs: release: runs-on: ubuntu-latest name: Push devel image steps: - uses: actions/checkout@v4 with: fetch-depth: 0 # setup qemu and buildx - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Install build dependencies run: | pip install build # we will first build the image for x86 and load it on the host for testing - name: Build Image run: | export CONTAINERCMD="docker buildx" export EXTRA_OPTS="--platform linux/amd64 --load" make container REPO=quay.io/${{ github.repository }} TAG=devel - name: Test Image run: docker run --rm quay.io/${{ github.repository }}:devel receptor --version - name: Login To Quay uses: docker/login-action@v3 with: username: ${{ secrets.QUAY_USERNAME }} password: ${{ secrets.QUAY_TOKEN }} registry: quay.io/${{ github.repository }} # Since x86 image is built in previous step # buildx will use cached image, hence overall time will not be affected - name: Build Multiarch Image & Push To Quay run: | export CONTAINERCMD="docker buildx" export EXTRA_OPTS="--platform linux/amd64,linux/ppc64le,linux/arm64 --push" make container REPO=quay.io/${{ github.repository }} TAG=devel receptor-1.5.3/.github/workflows/devel_whl.yml000066400000000000000000000015141475465740700214600ustar00rootroot00000000000000--- name: Publish nightly wheel on: # yamllint disable-line rule:truthy push: branches: [devel] jobs: sdist: runs-on: ubuntu-latest name: Build wheel steps: - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Install build dependencies run: | pip install build - name: Build wheel run: | make clean receptorctl_wheel - name: Upload wheel env: AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} run: | pip install boto3 ansible -i localhost, -c local all -m aws_s3 \ -a "bucket=receptor-nightlies object=receptorctl/receptorctl-0.0.0-py3-none-any.whl src=$(ls receptorctl/dist/*.whl | head -n 1) mode=put" receptor-1.5.3/.github/workflows/promote.yml000066400000000000000000000064261475465740700212030ustar00rootroot00000000000000--- name: Promote Release on: # yamllint disable-line rule:truthy release: types: [published] permissions: contents: write jobs: promote: runs-on: ubuntu-latest env: TAG: ${{github.event.release.tag_name}} steps: - name: Checkout Receptor uses: actions/checkout@v4 - name: Log in to GHCR run: | echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin - name: Log in to Quay run: | echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin - name: Copy Image to Quay uses: akhilerm/tag-push-action@v2.2.0 with: src: ghcr.io/${{ github.repository }}:${{env.TAG}} dst: | quay.io/${{ github.repository }}:${{env.TAG}} quay.io/${{ github.repository }}:latest - name: Check if floating tag is needed run: | if [[ $TAG == *"dev"* ]]; then echo "FLOATING_TAG=$(echo $TAG | sed 's/[0-9]\+$//')" >> $GITHUB_ENV else echo "FLOATING_TAG=$TAG" >> $GITHUB_ENV fi - name: Push floating tag to Quay uses: akhilerm/tag-push-action@v2.2.0 with: src: ghcr.io/${{ github.repository }}:${{env.TAG}} dst: quay.io/${{ github.repository }}:${{env.FLOATING_TAG}} - name: Install python uses: actions/setup-python@v5 - name: Install dependencies run: | python3 -m pip install twine build - name: Set official pypi info run: echo pypi_repo=pypi >> $GITHUB_ENV if: ${{ github.repository_owner == 'ansible' }} - name: Set unofficial pypi info run: echo pypi_repo=testpypi >> $GITHUB_ENV if: ${{ github.repository_owner != 'ansible' }} - name: Set receptor pypi version run: echo RECEPTORCTL_PYPI_VERSION=$(curl --silent https://pypi.org/pypi/receptorctl/json | jq --raw-output '"v" + .info.version') >> $GITHUB_ENV - name: Build receptorctl and upload to pypi if: ${{ env.RECEPTORCTL_PYPI_VERSION != env.TAG }} run: | make receptorctl_wheel receptorctl_sdist VERSION=$TAG twine upload \ -r ${{ env.pypi_repo }} \ -u ${{ secrets.PYPI_USERNAME }} \ -p ${{ secrets.PYPI_PASSWORD }} \ receptorctl/dist/* publish: runs-on: ubuntu-latest env: VERSION: ${{github.event.release.tag_name}} steps: - name: Checkout Receptor uses: actions/checkout@v4 - name: Set up Go uses: actions/setup-go@v5 with: go-version: "1.22" - name: Build packages run: | make build-package GOOS=linux GOARCH=amd64 BINNAME=receptor make build-package GOOS=linux GOARCH=arm64 BINNAME=receptor make build-package GOOS=darwin GOARCH=amd64 BINNAME=receptor make build-package GOOS=darwin GOARCH=arm64 BINNAME=receptor make build-package GOOS=windows GOARCH=amd64 BINNAME=receptor.exe make build-package GOOS=windows GOARCH=arm64 BINNAME=receptor.exe - name: Publish packages uses: softprops/action-gh-release@v2 with: files: |- dist/checksums.txt dist/*.tar.gz receptor-1.5.3/.github/workflows/pull_request.yml000066400000000000000000000136411475465740700222370ustar00rootroot00000000000000--- name: CI on: # yamllint disable-line rule:truthy pull_request: # yamllint disable-line rule:empty-values env: DESIRED_GO_VERSION: '1.22' DESIRED_GOLANGCI_LINT_VERSION: 'v1.60' DESIRED_PYTHON_VERSION: '3.12' jobs: lint-receptor: name: lint-receptor runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - uses: actions/setup-go@v5 with: go-version: ${{ env.DESIRED_GO_VERSION }} - name: golangci-lint uses: golangci/golangci-lint-action@v6 with: version: ${{ env.DESIRED_GOLANGCI_LINT_VERSION }} receptor: name: receptor (Go ${{ matrix.go-version }}) runs-on: ubuntu-latest strategy: fail-fast: false matrix: go-version: ["1.22"] steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Go uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} - name: build and install receptor run: | make build-all sudo cp ./receptor /usr/local/bin/receptor - name: Download kind binary run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 && chmod +x ./kind - name: Create k8s cluster run: ./kind create cluster - name: Interact with the cluster run: kubectl get nodes - name: Run receptor tests run: make test - name: get k8s logs if: ${{ failure() }} run: .github/workflows/artifact-k8s-logs.sh - name: remove sockets before archiving logs if: ${{ failure() }} run: find /tmp/receptor-testing -name controlsock -delete - name: Artifact receptor data for ${{ matrix.go-version }} uses: actions/upload-artifact@v4.4.3 if: ${{ failure() }} with: name: test-logs-${{ matrix.go-version }} path: /tmp/receptor-testing - name: Archive receptor binary for ${{ matrix.go-version }} uses: actions/upload-artifact@v4.4.3 with: name: receptor-${{ matrix.go-version }} path: /usr/local/bin/receptor receptorctl: name: Run receptorctl tests${{ '' }} # Nest jobs under the same sidebar category needs: receptor strategy: fail-fast: false matrix: python-version: # NOTE: The highest and the lowest versions come # NOTE: first as their statuses are most likely to # NOTE: signal problems early: - 3.12 - 3.8 - 3.11 - "3.10" - 3.9 uses: ./.github/workflows/reusable-nox.yml with: python-version: ${{ matrix.python-version }} session: tests-${{ matrix.python-version }} download-receptor: true go-version: '1.22' lint-receptorctl: name: Lint receptorctl${{ '' }} # Nest jobs under the same sidebar category strategy: fail-fast: false matrix: session: - check_style - check_format uses: ./.github/workflows/reusable-nox.yml with: python-version: '3.12' session: ${{ matrix.session }} container: name: container runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v5 with: python-version: ${{ env.DESIRED_PYTHON_VERSION }} - name: Install python dependencies run: pip install build - name: Build container run: make container REPO=receptor LATEST=yes - name: Write out basic config run: | cat << EOF > test.cfg --- - local-only: - control-service: service: control filename: /tmp/receptor.sock - work-command: worktype: cat command: cat EOF - name: Run receptor (and wait a few seconds for it to boot) run: | podman run --name receptor -d -v $PWD/test.cfg:/etc/receptor/receptor.conf:Z localhost/receptor sleep 3 podman logs receptor - name: Submit work and assert the output we expect run: | output=$(podman exec -i receptor receptorctl work submit cat -l 'hello world' -f) echo $output if [[ "$output" != "hello world" ]]; then echo "Output did not contain expected value" exit 1 fi receptorctl-test-coverage: name: Receptorctl test coverage needs: receptor runs-on: ubuntu-latest strategy: fail-fast: false matrix: session: - coverage steps: - name: Download the `receptor` binary uses: actions/download-artifact@v4 with: name: receptor-${{ env.DESIRED_GO_VERSION }} path: /usr/local/bin/ - name: Set executable bit on the `receptor` binary run: sudo chmod a+x /usr/local/bin/receptor - name: Set up nox uses: wntrblm/nox@2025.02.09 with: python-versions: ${{ env.DESIRED_PYTHON_VERSION }} - name: Check out the source code from Git uses: actions/checkout@v4 with: fetch-depth: 0 # Needed for the automation in Nox to find the last tag - name: Provision nox environment for ${{ matrix.session }} run: nox --install-only --session ${{ matrix.session }} working-directory: ./receptorctl - name: Run `receptorctl` nox ${{ matrix.session }} session run: nox --no-install --session ${{ matrix.session }} working-directory: ./receptorctl - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v5 with: directory: receptorctl files: receptorctl_coverage.xml fail_ci_if_error: true token: ${{ secrets.CODECOV_TOKEN }} verbose: true receptor-1.5.3/.github/workflows/reusable-nox.yml000066400000000000000000000034331475465740700221150ustar00rootroot00000000000000--- name: Receptorctl nox sessions on: # yamllint disable-line rule:truthy workflow_call: inputs: python-version: type: string description: The Python version to use. required: true session: type: string description: The nox session to run. required: true download-receptor: type: boolean description: Whether to perform go binary download. required: false default: false go-version: type: string description: The Go version to use. required: false env: FORCE_COLOR: 1 NOXSESSION: ${{ inputs.session }} jobs: nox: runs-on: ubuntu-latest name: >- # can't use `env` in this context: Run `receptorctl` ${{ inputs.session }} session steps: - name: Download the `receptor` binary if: fromJSON(inputs.download-receptor) uses: actions/download-artifact@v4 with: name: receptor-${{ inputs.go-version }} path: /usr/local/bin/ - name: Set executable bit on the `receptor` binary if: fromJSON(inputs.download-receptor) run: sudo chmod a+x /usr/local/bin/receptor - name: Set up nox uses: wntrblm/nox@2025.02.09 with: python-versions: ${{ inputs.python-version }} - name: Check out the source code from Git uses: actions/checkout@v4 with: fetch-depth: 0 # Needed for the automation in Nox to find the last tag sparse-checkout: receptorctl - name: Provision nox environment for ${{ env.NOXSESSION }} run: nox --install-only working-directory: ./receptorctl - name: Run `receptorctl` nox ${{ env.NOXSESSION }} session run: nox --no-install working-directory: ./receptorctl receptor-1.5.3/.github/workflows/scorecard.yml000066400000000000000000000060061475465740700214550ustar00rootroot00000000000000# This workflow uses actions that are not certified by GitHub. They are provided # by a third-party and are governed by separate terms of service, privacy # policy, and support documentation. name: Scorecard supply-chain security on: # For Branch-Protection check. Only the default branch is supported. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection branch_protection_rule: # To guarantee Maintained check is occasionally updated. See # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained schedule: - cron: '41 15 * * 4' push: branches: ["devel"] # Declare default permissions as read only. permissions: read-all jobs: analysis: name: Scorecard analysis runs-on: ubuntu-latest permissions: # Needed to upload the results to code-scanning dashboard. security-events: write # Needed to publish results and get a badge (see publish_results below). id-token: write # Uncomment the permissions below if installing in a private repository. # contents: read # actions: read steps: - name: "Checkout code" uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: persist-credentials: false - name: "Run analysis" uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 with: results_file: results.sarif results_format: sarif # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: # - you want to enable the Branch-Protection check on a *public* repository, or # - you are installing Scorecard on a *private* repository # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. # repo_token: ${{ secrets.SCORECARD_TOKEN }} # Public repositories: # - Publish results to OpenSSF REST API for easy access by consumers # - Allows the repository to include the Scorecard badge. # - See https://github.com/ossf/scorecard-action#publishing-results. # For private repositories: # - `publish_results` will always be set to `false`, regardless # of the value entered here. publish_results: true # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF # format to the repository Actions tab. - name: "Upload artifact" uses: actions/upload-artifact@184d73b71b93c222403b2e7f1ffebe4508014249 # v4.4.2 with: name: SARIF file path: results.sarif retention-days: 5 # Upload the results to GitHub's code scanning dashboard (optional). # Commenting out will disable upload of results to your repo's Code Scanning dashboard - name: "Upload to code-scanning" uses: github/codeql-action/upload-sarif@v3 with: sarif_file: results.sarif receptor-1.5.3/.github/workflows/stage.yml000066400000000000000000000060341475465740700206140ustar00rootroot00000000000000--- name: Stage Release on: # yamllint disable-line rule:truthy workflow_dispatch: inputs: version: description: 'Version to release. (x.y.z) Will create a tag / draft release.' required: true default: '' ref: description: 'The ref to tag. Can only be the 40 character SHA.' required: true default: '' confirm: description: 'Are you sure? Set this to yes.' required: true default: 'no' name: description: 'Name of the person in single quotes who will create a tag / draft release.' required: true default: '' type: string email: description: 'Email of the person who will create a tag / draft release.' required: true default: '' env: DESIRED_PYTHON_VERSION: '3.12' jobs: stage: runs-on: ubuntu-latest permissions: packages: write contents: write steps: - name: Verify inputs run: | set -e if [[ ${{ github.event.inputs.confirm }} != "yes" ]]; then >&2 echo "Confirm must be 'yes'" exit 1 fi if [[ ${{ github.event.inputs.version }} == "" ]]; then >&2 echo "Set version to continue." exit 1 fi exit 0 - name: Checkout receptor uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.ref }} - name: Install python uses: actions/setup-python@v5 with: python-version: ${{ env.DESIRED_PYTHON_VERSION }} - name: Install dependencies run: | python3 -m pip install build # setup qemu and buildx - name: Set up QEMU uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Log in to registry run: | echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin - name: Lowercase github owner run: | echo "OWNER=$(echo 'console.log("${{ github.repository_owner }}".toLowerCase())' | node -)" >> $GITHUB_ENV - name: Build container image run: | make container CONTAINERCMD="docker buildx" EXTRA_OPTS="--platform linux/amd64,linux/ppc64le,linux/arm64 --push" REPO=ghcr.io/${{ env.OWNER }}/receptor VERSION=v${{ github.event.inputs.version }} LATEST=yes - name: Get current time uses: josStorer/get-current-time@v2 id: current-time - name: Create draft release run: | ansible-playbook tools/ansible/stage.yml \ -e version="${{ github.event.inputs.version }}" \ -e repo=${{ env.OWNER }}/receptor \ -e github_token=${{ secrets.GITHUB_TOKEN }} \ -e target_commitish="${{ github.event.inputs.ref }}" \ -e tagger_name="${{ github.event.inputs.name }}" \ -e tagger_email="${{ github.event.inputs.email }}" \ -e time="${{ steps.current-time.outputs.time }}" \ -v receptor-1.5.3/.github/workflows/test-reporting.yml000066400000000000000000000047361475465740700225060ustar00rootroot00000000000000--- name: Generate junit test report on: # yamllint disable-line rule:truthy pull_request: # yamllint disable-line rule:empty-values push: branches: [devel] env: DESIRED_GO_VERSION: '1.22' jobs: generate_junit_test_report: name: go test coverage runs-on: ubuntu-latest strategy: fail-fast: false steps: - name: Checkout uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Go uses: actions/setup-go@v5 with: go-version: ${{ env.DESIRED_GO_VERSION }} - name: build and install receptor run: | make build-all sudo cp ./receptor /usr/local/bin/receptor - name: Download kind binary run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 && chmod +x ./kind - name: Create k8s cluster run: ./kind create cluster - name: Interact with the cluster run: kubectl get nodes - name: Install go junit reporting run: go install github.com/jstemmer/go-junit-report/v2@latest - name: Run receptor tests run: go test -v 2>&1 $(go list ./... | grep -vE '/tests/|mock_|example') | go-junit-report > report.xml - name: Upload test results to dashboard if: >- !cancelled() && github.event_name == 'push' && github.repository == 'ansible/receptor' && github.ref_name == github.event.repository.default_branch run: >- curl -v --user "${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_USER }}:${{ secrets.PDE_ORG_RESULTS_UPLOAD_PASSWORD }}" --form "xunit_xml=@report.xml" --form "component_name=receptor" --form "git_commit_sha=${{ github.sha }}" --form "git_repository_url=https://github.com/${{ github.repository }}" "${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_URL }}/api/results/upload/" - name: get k8s logs if: ${{ failure() }} run: .github/workflows/artifact-k8s-logs.sh - name: remove sockets before archiving logs if: ${{ failure() }} run: find /tmp/receptor-testing -name controlsock -delete - name: Artifact receptor data uses: actions/upload-artifact@v4 if: ${{ failure() }} with: name: test-logs path: /tmp/receptor-testing - name: Archive receptor binary uses: actions/upload-artifact@v4 with: name: receptor path: /usr/local/bin/receptor receptor-1.5.3/.github/workflows/triage_new.yml000066400000000000000000000007351475465740700216370ustar00rootroot00000000000000--- name: Triage on: # yamllint disable-line rule:truthy issues: types: - opened jobs: triage: runs-on: ubuntu-latest name: Label steps: - name: Label issues uses: github/issue-labeler@v3.4 with: repo-token: "${{ secrets.GITHUB_TOKEN }}" not-before: 2021-12-07T07:00:00Z configuration-path: .github/issue_labeler.yml enable-versioned-regex: 0 if: github.event_name == 'issues' receptor-1.5.3/.gitignore000066400000000000000000000006261475465740700153620ustar00rootroot00000000000000.DS_Store .idea .vscode kind receptor receptor.exe receptor.app recepcert /net .container-flag* .VERSION kubectl /receptorctl/.nox /receptorctl/.VERSION /receptorctl/AUTHORS /receptorctl/ChangeLog /receptor-python-worker/.VERSION /receptor-python-worker/ChangeLog /receptor-python-worker/AUTHORS /receptorctl/venv/ receptorctl-test-venv/ .vagrant/ /docs/build /dist /test-configs coverage.txt venv /vendor receptor-1.5.3/.gitleaks.toml000066400000000000000000000002211475465740700161370ustar00rootroot00000000000000[allowlist] description = "Global Allowlist" paths = [ '''pkg/certificates/ca_test.go''', '''pkg/netceptor/tlsconfig_test.go''', ] receptor-1.5.3/.golangci.yml000066400000000000000000000063271475465740700157620ustar00rootroot00000000000000--- run: timeout: 10m linters: disable-all: true enable: - asciicheck - bodyclose - depguard - dogsled - durationcheck - exportloopref - gci - gocritic - godot - gofmt - gofumpt - goheader - goimports - gomodguard - gosec - gosimple - govet - importas - ineffassign - makezero - misspell - nakedret - nilerr - nlreturn - noctx - nolintlint - prealloc - predeclared - rowserrcheck - sqlclosecheck - staticcheck - stylecheck - tparallel - typecheck - unconvert - unused - wastedassign - whitespace linters-settings: depguard: rules: main: files: - "$all" - "!$test" - "!**/functional/**/*.go" allow: - "$gostd" - "github.com/ansible/receptor/internal/version" - "github.com/ansible/receptor/cmd" - "github.com/ansible/receptor/pkg" - "github.com/creack/pty" - "github.com/fsnotify/fsnotify" - "github.com/ghjm/cmdline" - "github.com/golang-jwt/jwt/v4" - "github.com/google/shlex" - "github.com/gorilla/websocket" - "github.com/jupp0r/go-priority-queue" - "github.com/minio/highwayhash" - "github.com/pbnjay/memory" - "github.com/quic-go/quic-go" - "github.com/rogpeppe/go-internal/lockedfile" - "github.com/songgao/water" - "github.com/vishvananda/netlink" - "github.com/spf13/viper" - "github.com/spf13/cobra" - "k8s.io/api/core" - "k8s.io/apimachinery/pkg" - "k8s.io/client-go" - "github.com/grafana/pyroscope-go" - "github.com/sirupsen/logrus" tests: files: - "$test" - "**/functional/**/*.go" allow: - "$gostd" - "github.com/ansible/receptor/pkg" - "github.com/ansible/receptor/tests/utils" - "github.com/fortytw2/leaktest" - "github.com/fsnotify/fsnotify" - "github.com/gorilla/websocket" - "github.com/prep/socketpair" - "github.com/google/go-cmp/cmp" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/remotecommand" - "github.com/quic-go/quic-go" - "github.com/quic-go/quic-go/logging" - "github.com/AaronH88/quic-go" issues: # Dont commit the following line. # It will make CI pass without telling you about errors. # fix: true exclude: - "lostcancel" # TODO: Context is not canceled on multiple occasions. Needs more detailed work to be fixed. - "SA2002|thelper|testinggoroutine" # TODO: Test interface used outside of its routine, tests need to be rewritten. - "G306" # TODO: Restrict perms of touched files. - "G402|G404" # TODO: Make TLS more secure. - "G204" # gosec is throwing a fit, ignore. ... receptor-1.5.3/.readthedocs.yaml000066400000000000000000000020351475465740700166150ustar00rootroot00000000000000--- # Read the Docs configuration file for Sphinx projects # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the OS, Python version and other tools you might need build: os: ubuntu-lts-latest tools: golang: "1.22" python: "3.12" # You can also specify other tool versions: # nodejs: "20" # rust: "1.70" # Build documentation in the "docs/" directory with Sphinx sphinx: configuration: docs/source/conf.py # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs # builder: "dirhtml" # Fail on all warnings to avoid broken references fail_on_warning: true # Optionally build your docs in additional formats such as PDF and ePub # formats: # - pdf # - epub # Optional but recommended, declare the Python requirements required # to build your documentation # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: install: - requirements: docs/source/requirements.txt ... receptor-1.5.3/.sonarcloud.properties000066400000000000000000000001771475465740700177400ustar00rootroot00000000000000sonar.python.version=3.8, 3.9, 3.11, 3.12 sonar.sources=. sonar.test.inclusions=**/*_test.go, receptorctl/tests/ sonar.tests=. receptor-1.5.3/LICENSE.md000066400000000000000000000221301475465740700147700ustar00rootroot00000000000000Apache License ============== _Version 2.0, January 2004_ _<>_ ### Terms and Conditions for use, reproduction, and distribution #### 1. Definitions “License” shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. “Licensor” shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. “Legal Entity” shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, “control” means **(i)** the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the outstanding shares, or **(iii)** beneficial ownership of such entity. “You” (or “Your”) shall mean an individual or Legal Entity exercising permissions granted by this License. “Source” form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. “Object” form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. “Work” shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). “Derivative Works” shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. “Contribution” shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, “submitted” means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as “Not a Contribution.” “Contributor” shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. #### 2. Grant of Copyright License Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. #### 3. Grant of Patent License Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. #### 4. Redistribution You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: * **(a)** You must give any other recipients of the Work or Derivative Works a copy of this License; and * **(b)** You must cause any modified files to carry prominent notices stating that You changed the files; and * **(c)** You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and * **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. #### 5. Submission of Contributions Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. #### 6. Trademarks This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. #### 7. Disclaimer of Warranty Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. #### 8. Limitation of Liability In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. #### 9. Accepting Warranty or Additional Liability While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. receptor-1.5.3/Makefile000066400000000000000000000161761475465740700150410ustar00rootroot00000000000000# If the current git commit has been tagged, use as the version. (e.g. v1.4.5) # Otherwise the version is +git (e.g. 1.4.5+f031d2) OFFICIAL_VERSION := $(shell if VER=`git describe --exact-match --tags 2>/dev/null`; then echo $$VER; else echo ""; fi) ifneq ($(OFFICIAL_VERSION),) VERSION := $(OFFICIAL_VERSION) else ifneq ($(shell git tag --list),) VERSION := $(shell git describe --tags | cut -d - -f -1)+git$(shell git rev-parse --short HEAD) else ifeq ($(VERSION),) VERSION := $(error No tags found in git repository) # else VERSION was passed as a command-line argument to make endif # When building Receptor, tags can be used to remove undesired # features. This is primarily used for deploying Receptor in a # security sensitive role, where it is desired to have no possibility # of a service being accidentally enabled. Features are controlled # using the TAGS environment variable, which is a comma delimeted # list of zero or more of the following: # # no_controlsvc: Disable the control service # # no_backends: Disable all backends (except external via the API) # no_tcp_backend: Disable the TCP backend # no_udp_backend: Disable the UDP backend # no_websocket_backend: Disable the websocket backent # # no_services: Disable all services # no_proxies: Disable the TCP, UDP and Unix proxy services # no_ip_router: Disable the IP router service # # no_tls_config: Disable the ability to configure TLS server/client configs # # no_workceptor: Disable the unit-of-work subsystem (be network only) # # no_cert_auth: Disable commands related to CA and certificate generation TAGS ?= ifeq ($(TAGS),) TAGPARAM= else TAGPARAM=--tags $(TAGS) endif DEBUG ?= ifeq ($(DEBUG),1) DEBUGFLAGS=-gcflags=all="-N -l" else DEBUGFLAGS= endif GO ?= go receptor: $(shell find pkg -type f -name '*.go') ./cmd/receptor-cl/receptor.go CGO_ENABLED=0 GOFLAGS="-buildvcs=false" $(GO) build \ -o receptor \ $(DEBUGFLAGS) \ -ldflags "-X 'github.com/ansible/receptor/internal/version.Version=$(VERSION)'" \ $(TAGPARAM) \ ./cmd/receptor-cl clean: @rm -fv .container-flag* @rm -fv .VERSION @rm -rfv dist/ @rm -fv $(KUBECTL_BINARY) @rm -fv packaging/container/receptor @rm -rfv packaging/container/RPMS/ @rm -fv packaging/container/*.whl @rm -fv receptor receptor.exe receptor.app net @rm -fv receptorctl/dist/* @rm -fv receptor-python-worker/dist/* @rm -rfv receptorctl-test-venv/ ARCH=amd64 OS=linux KUBECTL_BINARY=./kubectl STABLE_KUBECTL_VERSION=$(shell curl --silent https://storage.googleapis.com/kubernetes-release/release/stable.txt) kubectl: if [ "$(wildcard $(KUBECTL_BINARY))" != "" ]; \ then \ FOUND_KUBECTL_VERSION=$$(./kubectl version --client=true | head --lines=1 | cut --delimiter=' ' --field=3); \ else \ FOUND_KUBECTL_VERSION=; \ fi if [ "${FOUND_KUBECTL_VERSION}" != "$(STABLE_KUBECTL_VERSION)" ]; \ then \ curl \ --location \ --output $(KUBECTL_BINARY) \ https://storage.googleapis.com/kubernetes-release/release/$(STABLE_KUBECTL_VERSION)/bin/$(OS)/$(ARCH)/kubectl; \ chmod 0700 $(KUBECTL_BINARY); \ fi lint: @golint cmd/... pkg/... example/... receptorctl-lint: receptor receptorctl/.VERSION @cd receptorctl && nox -s lint format: @find cmd/ pkg/ -type f -name '*.go' -exec $(GO) fmt {} \; fmt: format pre-commit: @pre-commit run --all-files build-all: @echo "Running Go builds..." && \ GOOS=windows $(GO) build \ -o receptor.exe \ ./cmd/receptor-cl && \ GOOS=darwin $(GO) build \ -o receptor.app \ ./cmd/receptor-cl && \ $(GO) build \ example/*.go && \ $(GO) build \ -o receptor \ -ldflags "-X 'github.com/ansible/receptor/internal/version.Version=$(VERSION)'" \ ./cmd/receptor-cl BINNAME='receptor' CHECKSUM_PROGRAM='sha256sum' GOARCH=$(ARCH) GOOS=$(OS) DIST := receptor_$(shell echo '$(VERSION)' | sed 's/^v//')_$(GOOS)_$(GOARCH) build-package: @echo "Building and packaging binary for $(GOOS)/$(GOARCH) as dist/$(DIST).tar.gz" && \ mkdir -p dist/$(DIST) && \ GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 $(GO) build \ -o dist/$(DIST)/$(BINNAME) \ $(DEBUGFLAGS) \ -ldflags "-X 'github.com/ansible/receptor/internal/version.Version=$(VERSION)'" \ $(TAGPARAM) \ ./cmd/receptor-cl && \ tar -C dist/$(DIST) -zcf dist/$(DIST).tar.gz $(BINNAME) && \ cd dist/ && \ $(CHECKSUM_PROGRAM) $(DIST).tar.gz >> checksums.txt RUNTEST ?= ifeq ($(RUNTEST),) TESTCMD = else TESTCMD = -run $(RUNTEST) endif BLOCKLIST='/tests/|mock_|example' COVERAGE_FILE='coverage.txt' coverage: build-all PATH="${PWD}:${PATH}" \ $(GO) test $$($(GO) list ./... | grep -vE $(BLOCKLIST)) \ $(TESTCMD) \ -count=1 \ -cover \ -covermode=atomic \ -coverprofile=$(COVERAGE_FILE) \ -race \ -timeout 5m test: receptor PATH="${PWD}:${PATH}" \ $(GO) test $$($(GO) list ./...) \ $(TESTCMD) \ -count=1 \ -race \ -timeout 5m receptorctl-test: receptorctl/.VERSION receptor @cd receptorctl && nox -s tests testloop: receptor @i=1; while echo "------ $$i" && \ make test; do \ i=$$((i+1)); done kubetest: kubectl ./kubectl get nodes version: @echo $(VERSION) > .VERSION @echo ".VERSION created for $(VERSION)" receptorctl/.VERSION: echo $(VERSION) > $@ RECEPTORCTL_WHEEL = receptorctl/dist/receptorctl-$(VERSION:v%=%)-py3-none-any.whl $(RECEPTORCTL_WHEEL): receptorctl/README.md receptorctl/.VERSION $(shell find receptorctl/receptorctl -type f -name '*.py') @cd receptorctl && python3 -m build --wheel receptorctl_wheel: $(RECEPTORCTL_WHEEL) RECEPTORCTL_SDIST = receptorctl/dist/receptorctl-$(VERSION:v%=%).tar.gz $(RECEPTORCTL_SDIST): receptorctl/README.md receptorctl/.VERSION $(shell find receptorctl/receptorctl -type f -name '*.py') @cd receptorctl && python3 -m build --sdist receptorctl_sdist: $(RECEPTORCTL_SDIST) receptor-python-worker/.VERSION: echo $(VERSION) > $@ RECEPTOR_PYTHON_WORKER_WHEEL = receptor-python-worker/dist/receptor_python_worker-$(VERSION:v%=%)-py3-none-any.whl $(RECEPTOR_PYTHON_WORKER_WHEEL): receptor-python-worker/README.md receptor-python-worker/.VERSION $(shell find receptor-python-worker/receptor_python_worker -type f -name '*.py') @cd receptor-python-worker && python3 -m build --wheel # Container command can be docker or podman CONTAINERCMD ?= podman # Repo without tag REPO := quay.io/ansible/receptor # TAG is VERSION with a '-' instead of a '+', to avoid invalid image reference error. TAG := $(subst +,-,$(VERSION)) # Set this to tag image as :latest in addition to :$(VERSION) LATEST := EXTRA_OPTS ?= space := $(subst ,, ) CONTAINER_FLAG_FILE = .container-flag-$(VERSION)$(subst $(space),,$(subst /,,$(EXTRA_OPTS))) container: $(CONTAINER_FLAG_FILE) $(CONTAINER_FLAG_FILE): $(RECEPTORCTL_WHEEL) $(RECEPTOR_PYTHON_WORKER_WHEEL) @tar --exclude-vcs-ignores -czf packaging/container/source.tar.gz . @cp $(RECEPTORCTL_WHEEL) packaging/container @cp $(RECEPTOR_PYTHON_WORKER_WHEEL) packaging/container $(CONTAINERCMD) build $(EXTRA_OPTS) packaging/container --build-arg VERSION=$(VERSION:v%=%) -t $(REPO):$(TAG) $(if $(LATEST),-t $(REPO):latest,) touch $@ tc-image: container @cp receptor packaging/tc-image/ @$(CONTAINERCMD) build packaging/tc-image -t receptor-tc .PHONY: lint format fmt pre-commit build-all test clean testloop container version receptorctl-tests kubetest receptor-1.5.3/README.md000066400000000000000000000071241475465740700146510ustar00rootroot00000000000000# Receptor [![codecov](https://codecov.io/gh/ansible/receptor/branch/devel/graph/badge.svg?token=RAW5Bvh3hM)](https://codecov.io/gh/ansible/receptor)[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/ansible/receptor/badge)](https://api.securityscorecards.dev/projects/github.com/ansible/receptor) Receptor is an overlay network intended to ease the distribution of work across a large and dispersed collection of workers. Receptor nodes establish peer-to-peer connections with each other via existing networks. Once connected, the Receptor mesh provides datagram (UDP-like) and stream (TCP-like) capabilities to applications, as well as robust unit-of-work handling with resiliency against transient network failures. See the readthedocs page for Receptor at: ## Terminology and Concepts * _Receptor_: The Receptor application taken as a whole, that typically runs as a daemon. * _Receptorctl_: A user-facing command line used to interact with Receptor, typically over a Unix domain socket. * _Netceptor_: The networking part of Receptor. Usable as a Go library. * _Workceptor_: The unit-of-work handling of Receptor, which makes use of Netceptor. Also usable as a Go library. * _Node_: A single running instance of Receptor. * _Node ID_: An arbitrary string identifying a single node, analogous to an IP address. * _Service_: An up-to-8-character string identifying an endpoint on a Receptor node that can receive messages. Analogous to a port number in TCP or UDP. * _Backend_: A type of connection that Receptor nodes can pass traffic over. Current backends include TCP, UDP and websockets. * _Control Service_: A built-in service that usually runs under the name `control`. Used to report status and to launch and monitor work. ## How to Get It The easiest way to check out Receptor is to run it as a container. Images are kept on the Quay registry. To use this, run: ```bash [docker|podman] pull quay.io/ansible/receptor [docker|podman] run -d -v /path/to/receptor.conf:/etc/receptor/receptor.conf:Z receptor ``` ## Use as a Go library This code can be imported and used from Go programs. The main libraries are: * _Netceptor_: * _Workceptor_: See the `example/` directory for examples of using these libraries from Go. ## Use as a command-line tool The `receptor` command runs a Receptor node with access to all included backends and services. See `receptor --help` for details. The command line is organized into entities which take parameters, like: `receptor --entity1 param1=value1 param2=value1 --entity2 param1=value2 param2=value2`. In this case we are configuring two things, `entity1` and `entity2`, each of which takes two parameters. Distinct entities are marked with a double dash, and bare parameters attach to the immediately preceding entity. Receptor can also take its configuration from a file in YAML format. The allowed directives are the same as on the command line, with a top-level list of entities and each entity receiving zero or more parameters as a dict. The above command in YAML format would look like this: ```bash --- - entity1: param1: value1 param2: value1 - entity2: param1: value2 param2: value2 ``` ## Python Receptor and the 0.6 versions As of June 25th, this repo is the Go implementation of Receptor. If you are looking for the older Python version of Receptor, including any 0.6.x version, it is now located at . receptor-1.5.3/cmd/000077500000000000000000000000001475465740700141315ustar00rootroot00000000000000receptor-1.5.3/cmd/config.go000066400000000000000000000170401475465740700157270ustar00rootroot00000000000000package cmd import ( "fmt" "os" "reflect" "github.com/ansible/receptor/pkg/backends" "github.com/ansible/receptor/pkg/certificates" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/services" "github.com/ansible/receptor/pkg/types" "github.com/ansible/receptor/pkg/workceptor" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) type Initer interface { Init() error } type Preparer interface { Prepare() error } type Runer interface { Run() error } type Reloader interface { Reload() error } type ReceptorConfig struct { // Used pointer structs to apply defaults to config Node *types.NodeCfg Trace logger.TraceCfg LogLevel *logger.LoglevelCfg `mapstructure:"log-level"` ControlServices []*controlsvc.CmdlineConfigUnix `mapstructure:"control-services"` TLSClients []netceptor.TLSClientConfig `mapstructure:"tls-clients"` TLSServer []netceptor.TLSServerConfig `mapstructure:"tls-servers"` WorkCommands []workceptor.CommandWorkerCfg `mapstructure:"work-commands"` WorkKubernetes []*workceptor.KubeWorkerCfg `mapstructure:"work-kubernetes"` WorkSigning workceptor.SigningKeyPrivateCfg `mapstructure:"work-signing"` WorkVerification workceptor.VerifyingKeyPublicCfg `mapstructure:"work-verification"` IPRouters []services.IPRouterCfg TCPClients []services.TCPProxyOutboundCfg `mapstructure:"tcp-clients"` TCPServers []services.TCPProxyInboundCfg `mapstructure:"tcp-servers"` UDPClients []services.TCPProxyInboundCfg `mapstructure:"udp-clients"` UDPServers []services.TCPProxyInboundCfg `mapstructure:"udp-servers"` UnixSocketClients []services.UnixProxyOutboundCfg `mapstructure:"unix-socket-clients"` UnixSocketServers []services.UnixProxyInboundCfg `mapstructure:"unix-socket-servers"` } type CertificatesConfig struct { InitCA certificates.InitCAConfig `mapstructure:"cert-init"` MakeReq []certificates.MakeReqConfig `mapstructure:"cert-makereqs"` SignReq []certificates.SignReqConfig `mapstructure:"cert-signreqs"` } type BackendConfig struct { TCPListeners []*backends.TCPListenerCfg `mapstructure:"tcp-listeners"` UDPListeners []*backends.UDPListenerCfg `mapstructure:"udp-listeners"` WSListeners []*backends.WebsocketListenerCfg `mapstructure:"ws-listeners"` TCPPeers []*backends.TCPDialerCfg `mapstructure:"tcp-peers"` UDPPeers []*backends.UDPDialerCfg `mapstructure:"udp-peers"` WSPeers []*backends.WebsocketDialerCfg `mapstructure:"ws-peers"` LocalOnly backends.NullBackendCfg `mapstructure:"local-only"` } func PrintPhaseErrorMessage(configName string, phase string, err error) { fmt.Printf("ERROR: %s for %s on %s phase\n", err, configName, phase) } func ParseReceptorConfig(configFile string) (*ReceptorConfig, error) { var receptorConfig ReceptorConfig err := viper.Unmarshal(&receptorConfig) if err != nil { return nil, err } return &receptorConfig, nil } func ParseCertificatesConfig(configFile string) (*CertificatesConfig, error) { var certifcatesConfig CertificatesConfig err := viper.Unmarshal(&certifcatesConfig) if err != nil { return nil, err } return &certifcatesConfig, nil } func ParseBackendConfig(configFile string) (*BackendConfig, error) { var backendConfig BackendConfig err := viper.Unmarshal(&backendConfig) if err != nil { return nil, err } return &backendConfig, nil } func isConfigEmpty(v reflect.Value) bool { isEmpty := true for i := 0; i < v.NumField(); i++ { if reflect.Value.IsZero(v.Field(i)) { continue } isEmpty = false } return isEmpty } func RunConfigV2(v reflect.Value) { phases := []string{"Init", "Prepare", "Run"} for _, phase := range phases { for i := 0; i < v.NumField(); i++ { if reflect.Value.IsZero(v.Field(i)) { continue } switch v.Field(i).Kind() { case reflect.Slice: for j := 0; j < v.Field(i).Len(); j++ { RunPhases(phase, v.Field(i).Index(j)) } default: RunPhases(phase, v.Field(i)) } } } } // RunPhases runs the appropriate function (Init, Prepare, Run) on a command. func RunPhases(phase string, v reflect.Value) { cmd := v.Interface() var err error if phase == "Init" { switch c := cmd.(type) { case Initer: err = c.Init() if err != nil { PrintPhaseErrorMessage(v.Type().Name(), phase, err) } default: } } if phase == "Prepare" { switch c := cmd.(type) { case Preparer: err = c.Prepare() if err != nil { PrintPhaseErrorMessage(v.Type().Name(), phase, err) } default: } } if phase == "Run" { switch c := cmd.(type) { case Runer: err = c.Run() if err != nil { PrintPhaseErrorMessage(v.Type().Name(), phase, err) } default: } } } // ReloadServices iterates through key/values calling reload on applicable services. func ReloadServices(v reflect.Value) { for i := 0; i < v.NumField(); i++ { // if the services is not initialised, skip if reflect.Value.IsZero(v.Field(i)) { continue } var err error switch v.Field(i).Kind() { case reflect.Slice: // iterate over all the type fields for j := 0; j < v.Field(i).Len(); j++ { serviceItem := v.Field(i).Index(j).Interface() switch c := serviceItem.(type) { // check to see if the selected type field satisfies reload // call reload on cfg object case Reloader: err = c.Reload() if err != nil { PrintPhaseErrorMessage(v.Type().Name(), "reload", err) } // if cfg object does not satisfy, do nothing default: } } // runs for non slice fields default: switch c := v.Field(i).Interface().(type) { case Reloader: err = c.Reload() if err != nil { PrintPhaseErrorMessage(v.Type().Name(), "reload", err) } default: } } } } func RunConfigV1() { cl := cmdline.NewCmdline() cl.AddConfigType("node", "Specifies the node configuration of this instance", types.NodeCfg{}, cmdline.Required, cmdline.Singleton) cl.AddConfigType("local-only", "Runs a self-contained node with no backend", backends.NullBackendCfg{}, cmdline.Singleton) cl.AddConfigType("pyroscope-client", "Profile Receptor using Pyroscope, client ", types.ReceptorPyroscopeCfg{}, cmdline.Singleton) // Add registered config types from imported modules for _, appName := range []string{ "receptor-version", "receptor-logging", "receptor-tls", "receptor-certificates", "receptor-control-service", "receptor-command-service", "receptor-ip-router", "receptor-proxies", "receptor-backends", "receptor-workers", } { cl.AddRegisteredConfigTypes(appName) } osArgs := os.Args[1:] err := cl.ParseAndRun(osArgs, []string{"Init", "Prepare", "Run"}, cmdline.ShowHelpIfNoArgs) if err != nil { fmt.Printf("Error: %s\n", err) os.Exit(1) } if cl.WhatRan() != "" { // We ran an exclusive command, so we aren't starting any back-ends os.Exit(0) } configPath := "" for i, arg := range osArgs { if arg == "--config" || arg == "-c" { if len(osArgs) > i+1 { configPath = osArgs[i+1] } break } } // only allow reloading if a configuration file was provided. If ReloadCL is // not set, then the control service reload command will fail if configPath != "" { // create closure with the passed in args to be ran during a reload reloadParseAndRun := func(toRun []string) error { return cl.ParseAndRun(osArgs, toRun) } err = controlsvc.InitReload(configPath, reloadParseAndRun) if err != nil { fmt.Printf("Error: %s\n", err) os.Exit(1) } } } receptor-1.5.3/cmd/defaults.go000066400000000000000000000050671475465740700162770ustar00rootroot00000000000000package cmd import "github.com/ansible/receptor/pkg/types" func SetTCPListenerDefaults(config *BackendConfig) { for _, listener := range config.TCPListeners { if listener.Cost == 0 { listener.Cost = 1.0 } if listener.BindAddr == "" { listener.BindAddr = "0.0.0.0" } } } func SetUDPListenerDefaults(config *BackendConfig) { for _, listener := range config.UDPListeners { if listener.Cost == 0 { listener.Cost = 1.0 } if listener.BindAddr == "" { listener.BindAddr = "0.0.0.0" } } } func SetWSListenerDefaults(config *BackendConfig) { for _, listener := range config.WSListeners { if listener.Cost == 0 { listener.Cost = 1.0 } if listener.BindAddr == "" { listener.BindAddr = "0.0.0.0" } if listener.Path == "" { listener.BindAddr = "/" } } } func SetUDPPeerDefaults(config *BackendConfig) { for _, peer := range config.UDPPeers { if peer.Cost == 0 { peer.Cost = 1.0 } if !peer.Redial { peer.Redial = true } } } func SetTCPPeerDefaults(config *BackendConfig) { for _, peer := range config.TCPPeers { if peer.Cost == 0 { peer.Cost = 1.0 } if !peer.Redial { peer.Redial = true } } } func SetWSPeerDefaults(config *BackendConfig) { for _, peer := range config.WSPeers { if peer.Cost == 0 { peer.Cost = 1.0 } if !peer.Redial { peer.Redial = true } } } func SetCmdlineUnixDefaults(config *ReceptorConfig) { for _, service := range config.ControlServices { if service.Permissions == 0 { service.Permissions = 0o600 } if service.Service == "" { service.Service = "control" } } } func SetLogLevelDefaults(config *ReceptorConfig) { if config.LogLevel == nil { return } if config.LogLevel.Level == "" { config.LogLevel.Level = "error" } } func SetNodeDefaults(config *ReceptorConfig) { if config.Node == nil { config.Node = &types.NodeCfg{} } if config.Node.DataDir == "" { config.Node.DataDir = "/tmp/receptor" } } func SetKubeWorkerDefaults(config *ReceptorConfig) { for _, worker := range config.WorkKubernetes { if worker.AuthMethod == "" { worker.AuthMethod = "incluster" } if worker.StreamMethod == "" { worker.StreamMethod = "logger" } } } func SetReceptorConfigDefaults(config *ReceptorConfig) { SetCmdlineUnixDefaults(config) SetLogLevelDefaults(config) SetNodeDefaults(config) SetKubeWorkerDefaults(config) } func SetBackendConfigDefaults(config *BackendConfig) { SetTCPListenerDefaults(config) SetUDPListenerDefaults(config) SetWSListenerDefaults(config) SetTCPPeerDefaults(config) SetUDPPeerDefaults(config) SetWSPeerDefaults(config) } receptor-1.5.3/cmd/receptor-cl/000077500000000000000000000000001475465740700163505ustar00rootroot00000000000000receptor-1.5.3/cmd/receptor-cl/receptor.go000066400000000000000000000015611475465740700205250ustar00rootroot00000000000000package main import ( "os" "github.com/ansible/receptor/cmd" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" ) func main() { logger := logger.NewReceptorLogger("") var isV2 bool newArgs := []string{} for _, arg := range os.Args { if arg == "--config-v2" { isV2 = true continue } newArgs = append(newArgs, arg) } os.Args = newArgs if isV2 { logger.Info("Running v2 cli/config") cmd.Execute() } else { cmd.RunConfigV1() } for _, arg := range os.Args { if arg == "--help" || arg == "-h" { os.Exit(0) } } if netceptor.MainInstance.BackendCount() == 0 { logger.Warning("Nothing to do - no backends are running.\n") logger.Warning("Run %s --help for command line instructions.\n", os.Args[0]) os.Exit(1) } logger.Info("Initialization complete\n") <-netceptor.MainInstance.NetceptorDone() } receptor-1.5.3/cmd/root.go000066400000000000000000000101011475465740700154340ustar00rootroot00000000000000package cmd import ( "fmt" "os" "reflect" receptorVersion "github.com/ansible/receptor/internal/version" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/fsnotify/fsnotify" "github.com/spf13/cobra" "github.com/spf13/viper" ) var ( cfgFile string version bool backendConfig *BackendConfig ) // rootCmd represents the base command when called without any subcommands. var rootCmd = &cobra.Command{ Use: "receptor", Short: "Run a receptor instance.", Long: ` Receptor is an overlay network intended to ease the distribution of work across a large and dispersed collection of workers. Receptor nodes establish peer-to-peer connections with each other via existing networks. Once connected, the receptor mesh provides datagram (UDP-like) and stream (TCP-like) capabilities to applications, as well as robust unit-of-work handling with resiliency against transient network failures.`, Run: handleRootCommand, } func Execute() { err := rootCmd.Execute() if err != nil { os.Exit(1) } } func init() { cobra.OnInitialize(initConfig) rootCmd.Flags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/receptor.yaml)") rootCmd.Flags().BoolVar(&version, "version", false, "Show the Receptor version") } // initConfig reads in config file and ENV variables if set. func initConfig() { l := logger.NewReceptorLogger("") if cfgFile != "" { viper.SetConfigFile(cfgFile) } else { home, err := os.UserHomeDir() cobra.CheckErr(err) viper.AddConfigPath(home) viper.SetConfigType("yaml") viper.SetConfigName("receptor") } viper.AutomaticEnv() viper.OnConfigChange(func(e fsnotify.Event) { l.Info("Config file changed: %s\n", e.Name) var newConfig *BackendConfig viper.Unmarshal(&newConfig) // used because OnConfigChange runs twice for some reason // allows to skip empty first config isEmpty := isConfigEmpty(reflect.ValueOf(*newConfig)) if isEmpty { return } SetBackendConfigDefaults(newConfig) isEqual := reflect.DeepEqual(*backendConfig, *newConfig) if !isEqual { // fmt.Println("reloading backends") // this will do a reload of all reloadable services // TODO: Optimize to only reload services that have config change // NOTE: Make sure to account for two things // if current config had two services then new config has zero cancel those backends // if services has two items in a slice and one of them has changed iterate and reload on changed service netceptor.MainInstance.CancelBackends() l.Info("Reloading backends") ReloadServices(reflect.ValueOf(*newConfig)) backendConfig = newConfig return } l.Info("No reloadable backends were found.") }) // TODO: use env to turn off watch config viper.WatchConfig() err := viper.ReadInConfig() if err == nil { fmt.Fprintln(os.Stdout, "Using config file:", viper.ConfigFileUsed()) } } func handleRootCommand(cmd *cobra.Command, args []string) { if version { fmt.Println(receptorVersion.Version) os.Exit(0) } if cfgFile == "" && viper.ConfigFileUsed() == "" { fmt.Fprintln(os.Stderr, "Could not locate config file (default is $HOME/receptor.yaml)") os.Exit(1) } receptorConfig, err := ParseReceptorConfig(cfgFile) if err != nil { fmt.Printf("unable to decode into struct, %v", err) os.Exit(1) } certifcatesConfig, err := ParseCertificatesConfig(cfgFile) if err != nil { fmt.Printf("unable to decode into struct, %v", err) os.Exit(1) } backendConfig, err = ParseBackendConfig(cfgFile) if err != nil { fmt.Printf("unable to decode into struct, %v", err) os.Exit(1) } isEmptyReceptorConfig := isConfigEmpty(reflect.ValueOf(*receptorConfig)) isEmptyReloadableServicesConfig := isConfigEmpty(reflect.ValueOf(*backendConfig)) RunConfigV2(reflect.ValueOf(*certifcatesConfig)) if isEmptyReceptorConfig && isEmptyReloadableServicesConfig { fmt.Println("empty receptor config, skipping...") os.Exit(0) } SetReceptorConfigDefaults(receptorConfig) SetBackendConfigDefaults(backendConfig) RunConfigV2(reflect.ValueOf(*receptorConfig)) RunConfigV2(reflect.ValueOf(*backendConfig)) } receptor-1.5.3/docs/000077500000000000000000000000001475465740700143165ustar00rootroot00000000000000receptor-1.5.3/docs/Makefile000066400000000000000000000017711475465740700157640ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) @echo -e " doc8 doc8 linter" @echo -e " lint All documentation linters (including linkcheck)" @echo -e " rstcheck rstcheck linter" .PHONY: doc8 help lint Makefile rstcheck server doc8: doc8 --ignore D001 . # Documentation linters lint: doc8 linkcheck rstcheck rstcheck: -rstcheck --recursive --warn-unknown-settings . # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). # Includes linkcheck %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) server: python3 -m http.server receptor-1.5.3/docs/diagrams/000077500000000000000000000000001475465740700161055ustar00rootroot00000000000000receptor-1.5.3/docs/diagrams/AddListenerBackend.md000066400000000000000000000071371475465740700221050ustar00rootroot00000000000000```mermaid sequenceDiagram participant Application participant Netceptor participant Netceptor closures participant Netceptor connectInfo ReadChan participant Netceptor connectInfo WriteChan Application->>+Netceptor: New Netceptor-->>-Application: netceptor instance n1 Application->>+Backend tcp.go: NewTCPListener Backend tcp.go-->>-Application: backendsTCPListener b1 Application->>+Netceptor: AddBackend(b1) Netceptor->>Netceptor: backendCancel(context) Netceptor->>+Backend tcp.go:(Backend Interface) b1.Start(context, waitgroup) Backend tcp.go->>+Backend utils.go:listenerSession(context, waitgroup, logger, ListenFunc, AcceptFunc, CancelFunc) Backend utils.go->>+Backend tcp.go:ListenFunc() Backend tcp.go-->>-Backend utils.go:error Backend utils.go->>Backend utils.go:create chan BackendSession Backend utils.go-)+Backend sessChan Closure:go closure loop Every time AcceptFunc is called Backend sessChan Closure->>Backend sessChan Closure:AcceptFunc end Backend utils.go-->>-Backend tcp.go:chan (Backend Interface) BackendsSession Backend tcp.go-->>-Netceptor: chan (Backend Interface) BackendsSession Netceptor->>+Netceptor closures: go closure Netceptor closures->>+Netceptor closures: go closure Netceptor closures->>Netceptor closures: runProtocol(context, backend session, backendInfo) Netceptor closures->>+Netceptor connectInfo ReadChan: make (chan []byte) Netceptor closures->>+Netceptor connectInfo WriteChan: make (chan []byte) Netceptor closures-)Netceptor closures: protoReader(backend session) loop Backend sessChan Closure->>Netceptor closures: sess.Recv(1 * time.Second) Netceptor closures->>Netceptor connectInfo ReadChan: ci.ReadChan <- buf end loop Netceptor connectInfo WriteChan->>Netceptor closures: message, more = <-ci.WriteChan Netceptor closures->>Backend sessChan Closure: sess.Send(message) end Netceptor closures-)Netceptor closures: protoWriter(backend session) Netceptor closures-)+Netceptor closures: s.sendInitialConnectMessage(ci, initDoneChan) Netceptor closures-)Netceptor closures: s.translateStructToNetwork(MsgTypeRoute, s.makeRoutingUpdate(0)) Netceptor closures-)-Netceptor closures: context.Done() Netceptor connectInfo ReadChan->>Netceptor closures: data = <-ci.ReadChan alt established alt MsgTypeData Netceptor closures->>Netceptor closures: translateDataToMessage(data []byte) else MsgTypeRoute Netceptor closures->>Netceptor closures: s.handleRoutingUpdate(ri, remoteNodeID) else MsgTypeServiceAdvertisement Netceptor closures->>Netceptor closures: s.handleServiceAdvertisement(data, remoteNodeID) else MsgTypeReject Netceptor closures->>Netceptor closures: return error end else !established alt msgType == MsgTypeRoute Netceptor closures->>Netceptor closures: add connection else msgType == MsgTypeReject Netceptor closures->>Netceptor closures: return error end end Netceptor closures->>Netceptor connectInfo WriteChan: ci.WriteChan <- ConnectMessage Netceptor closures->>-Netceptor closures: wg Done() Backend sessChan Closure->>Backend sessChan Closure:CancelFunc Backend sessChan Closure-->>-Application:context.Done() Netceptor->>Netceptor closures: wg Done() Netceptor closures-->>-Netceptor: Netceptor connectInfo ReadChan-->>-Netceptor closures: context.Done() Netceptor connectInfo WriteChan-->>-Netceptor closures: context.Done() ``` receptor-1.5.3/docs/make.bat000066400000000000000000000014371475465740700157300ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd receptor-1.5.3/docs/requirements.txt000066400000000000000000000000671475465740700176050ustar00rootroot00000000000000doc8 pbr rstcheck >= 6 six sphinx sphinx_ansible_theme receptor-1.5.3/docs/source/000077500000000000000000000000001475465740700156165ustar00rootroot00000000000000receptor-1.5.3/docs/source/conf.py000066400000000000000000000132071475465740700171200ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = "receptor" copyright = "Red Hat Ansible" author = "Red Hat Ansible" # The full version, including alpha/beta/rc tags # release = '0.0.0' # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autosectionlabel", "pbr.sphinxext", ] autosectionlabel_prefix_document = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ["Thumbs.db", ".DS_Store"] pygments_style = "ansible" language = "en" master_doc = "index" source_suffix = ".rst" # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_ansible_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] sidebar_collapse = False # -- Options for HTML output ------------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = "receptorrdoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } latex_documents = [ (master_doc, "receptor.tex", "receptor Documentation", "Red Hat Ansible", "manual"), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ("receptorctl/receptorctl_index", "receptorctl", "receptor client", [author], 1), ( "receptorctl/receptorctl_connect", "receptorctl-connect", "Establishes a connection between local client and a Receptor node.", [author], 1, ), ("receptorctl/receptorctl_ping", "receptorctl-ping", "Tests the network reachability of Receptor nodes.", [author], 1), ("receptorctl/receptorctl_reload", "receptorctl-reload", "Reloads the Receptor configuration for the connected node.", [author], 1), ("receptorctl/receptorctl_status", "receptorctl-status", "Displays the status of the Receptor network.", [author], 1), ( "receptorctl/receptorctl_traceroute", "receptorctl-traceroute", "Displays the network route that packets follow to Receptor nodes.", [author], 1, ), ( "receptorctl/receptorctl_version", "receptorctl-version", "Displays version information for receptorctl and\ the Receptor node to which it is connected.", [author], 1, ), ("receptorctl/receptorctl_work_cancel", "receptorctl-work-cancel", "Terminates one or more units of work.", [author], 1), ("receptorctl/receptorctl_work_list", "receptorctl-work-list", "Displays known units of work.", [author], 1), ("receptorctl/receptorctl_work_release", "receptorctl-work-release", "Deletes one or more units of work.", [author], 1), ("receptorctl/receptorctl_work_results", "receptorctl-work-results", "Gets results for units of work.", [author], 1), ("receptorctl/receptorctl_work_submit", "receptorctl-work-submit", "Requests a Receptor node to run a unit of work.", [author], 1), ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "receptor", "receptor Documentation", author, "receptor", "Overlay network to establish a persistent mesh.", "Miscellaneous", ), ] # -- Options for QtHelp output ------------------------------------------- # -- Options for linkcheck builder --------------------------------------- linkcheck_report_timeouts_as_broken = False linkcheck_timeout = 30 # -- Options for xml builder --------------------------------------------- xml_pretty = True # -- Options for C domain ------------------------------------------------ # -- Options for C++ domain ---------------------------------------------- # -- Options for Python domain ------------------------------------------- # -- Options for Javascript domain --------------------------------------- receptor-1.5.3/docs/source/contributing.rst000066400000000000000000000044601475465740700210630ustar00rootroot00000000000000****************** Contributor guide ****************** Receptor is an open source project that lives at https://github.com/ansible/receptor .. contents:: :local: =============== Code of conduct =============== All project contributors must abide by the `Ansible Code of Conduct `_. ============ Contributing ============ Receptor welcomes community contributions! See the :ref:`dev_guide` for information about receptor development. ------------- Pull requests ------------- Contributions to Receptor go through the Github pull request process. An initial checklist for your change to increase the likelihood of acceptance: - No issues when running linters/code checkers - No issues from unit/functional tests - Write descriptive and meaningful commit messages. See `How to write a Git commit message `_ and `Learn to write good commit message and description `_. =============== Release process =============== Before starting the release process verify that `make test` and `go test tests/goroutines/simple_config.go` tests pass. Maintainers have the ability to run the `Stage Release`_ workflow. Running this workflow will: - Build and push the container image to ghcr.io. This serves as a staging environment where the image can be tested. - Create a draft release at ``_ After the draft release has been created, edit it and populate the description. Once you are done, click "Publish release". After the release is published, the `Promote Release `_ workflow will run automatically. This workflow will: - Publish ``receptorctl`` to `PyPI `_. - Pull the container image from ghcr.io, re-tag, and push to `Quay.io `_. - Build binaries for various OSes/platforms, and attach them to the `release `_. .. note:: If you need to re-run `Stage Release`_ more than once, delete the tag beforehand to prevent the workflow from failing. .. _Stage Release: https://github.com/ansible/receptor/actions/workflows/stage.yml receptor-1.5.3/docs/source/developer_guide.rst000066400000000000000000000311211475465740700215100ustar00rootroot00000000000000.. _dev_guide: =============== Developer guide =============== Receptor is an open source project that lives at `ansible/receptor repository ` .. contents:: :local: See the :ref:`contributing:contributing` for more general details. --------- Debugging --------- ^^^^^^^^^^^ Unix Socket ^^^^^^^^^^^ If you don't want to use receptorctl to control nodes, `socat ` can be used to interact with unix sockets directly. Example: .. code-block:: bash echo -e '{"command": "work", "subcommand": "submit", "node": "execution", "worktype": "cat", }\n"Hi"' | socat - UNIX-CONNECT:/tmp/control.sock ------- Linters ------- All code must pass a suite of Go linters. There is a pre-commit yaml file in the Receptor repository that points to the linter suite. It is strongly recommmended to install the pre-commit yaml so that the linters run locally on each commit. .. code-block:: bash cd $HOME go get github.com/golangci/golangci-lint/cmd/golangci-lint pip install pre-commit cd receptor pre-commit install See `Pre commit `_ and `Golangci-lint `_ for more details on installing and using these tools. ------- Testing ------- ^^^^^^^^^^^ Development ^^^^^^^^^^^ Write unit tests for new features or functionality. Add/Update tests for bug fixes. ^^^^^^^ Mocking ^^^^^^^ We are using gomock to generate mocks for our unit tests. The mocks are living inside of a package under the real implementation, prefixed by ``mock_``. An example is the package mock_workceptor under pkg/workceptor. In order to genenerate a mock for a particular file, run: .. code-block:: bash mockgen -source=pkg/filename.go -destination=pkg/mock_pkg/mock_filename.go For example, to create/update mocks for Workceptor, we can run: .. code-block:: bash mockgen -source=pkg/workceptor/workceptor.go -destination=pkg/workceptor/mock_workceptor/workceptor.go ^^^^^^^^^^ Kubernetes ^^^^^^^^^^ Some of the tests require access to a Kubernetes cluster; these tests will load in the kubeconfig file located at ``$HOME/.kube/config``. One simple way to make these tests work is to start minikube locally before running ``make test``. See https://minikube.sigs.k8s.io/docs/start/ for more information about minikube. To skip tests that depend on Kubernetes, set environment variable ``export SKIP_KUBE=1``. ^^^^^^^^^ Execution ^^^^^^^^^ Pull requests must pass a suite of unit and integration tests before being merged into ``devel``. ``make test`` will run the full test suite locally. ----------- Source code ----------- The following sections help orient developers to the Receptor code base and provide a starting point for understanding how Receptor works. ^^^^^^^^^^^^^^^^^^^^^ Parsing receptor.conf ^^^^^^^^^^^^^^^^^^^^^ Let's see how items in the config file are mapped to Golang internals. As an example, in tcp.go .. code-block:: go cmdline.RegisterConfigTypeForApp("receptor-backends", "tcp-peer", "Make an outbound backend connection to a TCP peer", TCPDialerCfg{}, cmdline.Section(backendSection)) "tcp-peer" is a top-level key (action item) in receptor.conf .. code-block:: yaml tcp-peers: - address: localhost:2222 ``RegisterConfigTypeForApp`` tells the cmdline parser that "tcp-peer" is mapped to the ``TCPDialerCfg{}`` structure. ``main()`` in ``receptor.go`` is the entry point for a running Receptor process. In ``receptor.go`` (modified for clarity): .. code-block:: go cl.ParseAndRun("receptor.conf", []string{"Init", "Prepare", "Run"}) A Receptor config file has many action items, such as ```node``, ``work-command``, and ``tcp-peer``. ``ParseAndRun`` is how each of these items are instantiated when Receptor starts. Specifically, ParseAndRun will run the Init, Prepare, and Run methods associated with each action item. Here is the Prepare method for ``TCPDialerCfg``. By the time this code executes, the cfg structure has already been populated with the data provided in the config file. .. code-block:: go // Prepare verifies the parameters are correct. func (cfg TCPDialerCfg) Prepare() error { if cfg.Cost <= 0.0 { return fmt.Errorf("connection cost must be positive") } return nil } This simply does a check to make sure the provided Cost is valid. The Run method for the ``TCPDialerCfg`` object: .. code-block:: go // Run runs the action. func (cfg TCPDialerCfg) Run() error { logger.Debug("Running TCP peer connection %s\n", cfg.Address) host, _, err := net.SplitHostPort(cfg.Address) if err != nil { return err } tlscfg, err := netceptor.MainInstance.GetClientTLSConfig(cfg.TLS, host, "dns") if err != nil { return err } b, err := NewTCPDialer(cfg.Address, cfg.Redial, tlscfg) if err != nil { logger.Error("Error creating peer %s: %s\n", cfg.Address, err) return err } err = netceptor.MainInstance.AddBackend(b, cfg.Cost, nil) if err != nil { return err } return nil } This gets a new TCP dialer object and passes it to the netceptor ``AddBackend`` method, so that it can be processed further. ``AddBackend`` will start proper Go routines that periodically dial the address defined in the TCP dialer structure, which will lead to a proper TCP connection to another Receptor node. In general, when studying how the start up process works in Receptor, take a look at the ``Init``, ``Prepare``, and ``Run`` methods throughout the code, as these are the entry points to running those specific components of Receptor. ^^^^ Ping ^^^^ Studying how pings work in Receptor will provide a useful glimpse into the internal workings of netceptor -- the main component of Receptor that handles connections and data traffic over the mesh. ``receptorctl --socket /tmp/foo.sock ping bar`` The control-service on `foo` will receive this command and subsequently call the following, **ping.go::ping** .. code-block:: go func ping(nc *netceptor.Netceptor, target string, hopsToLive byte) (time.Duration, string, error) { pc, err := nc.ListenPacket("") ``target`` is the target node, "bar" in this case. ``nc.ListenPacket("")`` starts a new ephemeral service and returns a ``PacketConn`` object. This is a datagram connection that has a WriteTo() and ReadFrom() method for sending and receiving data to other nodes on the mesh. **packetconn.go::ListenPacket** .. code-block:: go pc := &PacketConn{ s: s, localService: service, recvChan: make(chan *messageData), advertise: false, adTags: nil, connType: ConnTypeDatagram, hopsToLive: s.maxForwardingHops, } s.listenerRegistry[service] = pc return pc, nil ``s`` is the main netceptor object, and a reference to the PacketConn object is stored in netceptor's ``listenerRegistry`` map. **ping.go::ping** .. code-block:: go _, err = pc.WriteTo([]byte{}, nc.NewAddr(target, "ping")) Sends an empty message to the address "bar:ping" on the mesh. Recall that nodes are analogous to DNS names, and services are like port numbers. ``WriteTo`` calls ``sendMessageWithHopsToLive`` **netceptor.go::sendMessageWithHopsToLive** .. code-block:: go md := &messageData{ FromNode: s.nodeID, FromService: fromService, ToNode: toNode, ToService: toService, HopsToLive: hopsToLive, Data: data, } return s.handleMessageData(md) Here the message is constructed with essential information such as the source node and service, and the destination node and service. The Data field contains the actual message, which is empty in this case. ``handleMessageData`` calls ``forwardMessage`` with the ``md`` object. **netceptor.go::forwardMessage** .. code-block:: go nextHop, ok := s.routingTable[md.ToNode] The current node might not be directly connected to the target node, and thus netceptor needs to determine what is the next hop to pass the data to. ``s.routingTable`` is a map where the key is a destination ("bar"), and the value is the next hop along the path to that node. In a simple two-node setup with `foo` and `bar`, ``s.routingTable["bar"] == "bar"``. **netceptor.go::forwardMessage** .. code-block:: go c, ok := s.connections[nextHop] c.WriteChan <- message ``c`` here is a ``ConnInfo`` object, which interacts with the various backend connections (UDP, TCP, websockets). ``WriteChan`` is a golang channel. Channels allows communication between separate threads (Go routines) running in the application. When `foo` and `bar` had first started, they established a backend connection. Each node runs the netceptor runProtocol go routine, which in turn starts a protoWriter go routine. **netceptor.go::protoWriter** .. code-block:: go case message, more := <-ci.WriteChan: err := sess.Send(message) So before the "ping" command was issued, this protoWriter Go routine was already running and waiting to read messages from WriteChan. ``sess`` is a BackendSession object. BackendSession is an abstraction over the various available backends. If `foo` and `bar` are connected via TCP, then ``sess.Send(message)`` will pass along data to the already established TCP session. **tcp.go::Send** .. code-block:: go func (ns *TCPSession) Send(data []byte) error { buf := ns.framer.SendData(data) n, err := ns.conn.Write(buf) ``ns.conn`` is net.Conn object, which is part of the Golang standard library. At this point the message has left the node via a backend connection, where it will be received by `bar`. Let's review the code from `bar`'s perspective and how it handles the incoming message that is targeting its "ping" service. On the receiving side, the data will first be read here **tcp.go::Recv** .. code-block:: go n, err := ns.conn.Read(buf) ns.framer.RecvData(buf[:n]) Recv was called in protoReader Go routine, similar to the protoWriter when the message sent from `foo`. Note that ``ns.conn.Read(buf)`` might not contain the full message, so the data is buffered until the ``messageReady()`` returns true. The size of the message is tagged in the message itself, so when Recv has received N bytes, and the message is N bytes, Recv will return. **netceptor.go::protoReader** .. code-block:: go buf, err := sess.Recv(1 * time.Second) ci.ReadChan <- buf The data is passed to a ReadChan channel. **netceptor.go::runProtocol** .. code-block:: go case data := <-ci.ReadChan: message, err := s.translateDataToMessage(data) err = s.handleMessageData(message) The data is read from the channel, and deserialized into an actual message format in ``translateDataToMessage``. **netceptor.go::handleMessageData** .. code-block:: go if md.ToNode == s.nodeID { handled, err := s.dispatchReservedService(md) This checks whether the destination node indicated in the message is the current node. If so, the message can be dispatched to the service. "ping" is a reserved service in the netceptor instance. .. code-block:: go s.reservedServices = map[string]func(*messageData) error{ "ping": s.handlePing, } **netceptor.go::handlePing** .. code-block:: go func (s *Netceptor) handlePing(md *messageData) error { return s.sendMessage("ping", md.FromNode, md.FromService, []byte{}) } This is the ping reply handler. It sends an empty message to the FromNode (`foo`). The FromService here is not "ping", but rather the ephemeral service that was created from ``ListenPacket("")`` in ping.go on `foo`. With ``trace`` enabled in the Receptor configuration, the following log statements show the reply from ``bar``, .. code-block:: bash TRACE --- Received data length 0 from foo:h73opPEh to bar:ping via foo TRACE --- Sending data length 0 from bar:ping to foo:h73opPEh So the ephemeral service on `foo` is called h73opPEh (randomly generated string). From here, the message from `bar` will passed along in a very similar fashion as the original ping message sent from `foo`. Back on node `foo`, the message is received receive the message where it is finally handled in ping.go **ping.go::ping** .. code-block:: go _, addr, err := pc.ReadFrom(buf) .. code-block:: go case replyChan <- fromNode: .. code-block:: go case remote := <-replyChan: return time.Since(startTime), remote, nil The data is read from the PacketConn object, written to a channel, where it is read later by the ping() function, and ping() returns with the roundtrip delay, ``time.Since(startTime)``. receptor-1.5.3/docs/source/getting_started_guide/000077500000000000000000000000001475465740700221625ustar00rootroot00000000000000receptor-1.5.3/docs/source/getting_started_guide/creating_a_basic_network.rst000066400000000000000000000033041475465740700277220ustar00rootroot00000000000000 .. _creating_a_basic_network: ############################### Creating a basic 3-node network ############################### In this section, we will create a three-node network. The three nodes are: foo, bar, and baz. `foo -> bar <- baz` foo and baz are directly connected to bar with TCP connections. foo can reach baz by sending network packets through bar. *********************** Receptor configurations *********************** 1. Create three configuration files, one for each node. ``foo.yml`` .. code-block:: yaml - node: id: foo - control-service: service: control filename: /tmp/foo.sock - tcp-peer: address: localhost:2222 - log-level: level: debug ... ``bar.yml`` .. code-block:: yaml --- - node: id: bar - control-service: service: control filename: /tmp/bar.sock - tcp-listener: port: 2222 - log-level: level: debug ... ``baz.yml`` .. code-block:: yaml --- - node: id: baz - control-service: service: control filename: /tmp/baz.sock - tcp-peer: address: localhost:2222 - log-level: level: debug - work-command: workType: echo command: bash params: "-c \"while read -r line; do echo $line; sleep 1; done\"" allowruntimeparams: true ... 2. Run the services in separate terminals. .. code-block:: bash ./receptor --config foo.yml .. code-block:: bash ./receptor --config bar.yml .. code-block:: bash ./receptor --config baz.yml .. seealso:: :ref:`configuring_receptor_with_a_config_file` Configuring Receptor with a configuration file :ref:`connecting_nodes` Detail on connecting receptor nodes receptor-1.5.3/docs/source/getting_started_guide/index.rst000066400000000000000000000014721475465740700240270ustar00rootroot00000000000000############################# Getting started with Receptor ############################# Receptor is an overlay network that distributes work across large and dispersed collections of worker nodes. Receptor nodes establish peer-to-peer connections through existing networks. Once connected, the Receptor mesh provides datagram (UDP-like) and stream (TCP-like) capabilities to applications, as well as robust unit-of-work handling with resiliency against transient network failures. .. image:: mesh.png .. toctree:: :maxdepth: 1 :caption: Contents: introduction installing_receptor creating_a_basic_network trying_sample_commands .. seealso:: :ref:`interacting_with_nodes` Further examples of working with nodes :ref:`connecting_nodes` Detail on connecting receptor nodes receptor-1.5.3/docs/source/getting_started_guide/installing_receptor.rst000066400000000000000000000006661475465740700267730ustar00rootroot00000000000000 .. _installing_receptor: ################### Installing Receptor ################### 1. `Download receptor `_ 2. Install receptor (per installation guide below) 3. Install receptorctl .. code-block:: bash pip install receptorctl .. seealso:: :ref:`installing` Detailed installation instructions :ref:`using_receptor_containers` Using receptor in containers receptor-1.5.3/docs/source/getting_started_guide/introduction.rst000066400000000000000000000007411475465740700254370ustar00rootroot00000000000000######################## Introduction to receptor ######################## Receptor is an overlay network. It eases the work distribution across a large and dispersed collection of workers Receptor nodes establish peer-to-peer connections with each other through existing networks Once connected, the receptor mesh provides: * Datagram (UDP-like) and stream (TCP-like) capabilities to applications * Robust unit-of-work handling * Resiliency against transient network failures receptor-1.5.3/docs/source/getting_started_guide/mesh.png000066400000000000000000000563501475465740700236350ustar00rootroot00000000000000PNG  IHDREaoiCCPicc(u;KA?%D"jbB"BPKM|5%le7A`c!X6 (?W#a$2{?̹̜GXײVk W7Ifs 3Qz?z$-aCp%PF**NWSLC8Z fV;7[\/s::F$`7O^< JHo@ԢtMJMOrvŶ?]9)8*W%/w⺮rzHNT ϡcZͪ#D7neoֶgI pHYs  ~ IDATx^Sֆ ĮHQTT Q/" ^^"Bbǎ(4EE *w?$'9)z<''{;Y{ZS/b$" " " " " "P@Kl-" " " " " MQpĪ@D@D@D@D@D@@ H(8bU " " " " " Cs@D@D@D@D@Dx*9 " " " " "PpR< XJ`" uL@Gz%:vhgۛE @X)Ѷ" " " B` .0|} @Qn#@;wYkW_}eXc2&G@>7-ƍ]'m1(WR-A."PڬNu䁀<@T" " " " " /@"PnzO?c=֬Jfȑ'4?Ymn5h AI&|̚5࿱zr!^zaoE-2vy%~3/G9uQfu-[R?(+6IԶCɯ-Zv_vޱy(K.]L^}ՔiIkƦ]r%p B76Uh "o„ .[l >]p'*=)Ze(?~B{x iӦ}ݗ*?]W;jҟhV >-\0lذ*OU`*F)|M|VQ 6lsuYE3eV ƌSV:#"%LȪ\()c눀W<`e >}zлwpwI'%E}2/_曁}*]PqO L]?/ا\7k-,H[o w؞G}4l'|aAѱNBPgK}~C{S`- gws\}vGǵȳZk}_]> 'Xc[o l cko}2VX-ě7oܝ~igHM }8Ү6m;?C߾}Cv"nR$ ţGMmW}}m֥tbMS۹s'g^x!LgǶihhv$>@ N,]#;2Aq؝*6huU ¸UW lXIσf֧#!݉JۗT g"Q]}}!!Dѽk8ymJd/"h1W+=أH_5" " "Px=qEuM7V7Ub#Ui$o OSD NUmԨQʺh7lPlg: K}>mt(%+Oqmcp;vG1)(wr{5qDcijJyD@D@Dh H(ڡQj@th}_. Z̺8W$<,h1FeWS E?fG(R: _|E9RSgo,D$?^gƍ?΍Ϝg~DD@D@ʉ|+ X.W78? {6GhoٰnqǿdžP6s/vա" " "PxmF6Q.؈DF' IO(h<ݎLo0AOW̟ y OĆ+pۨX >*nPؐƆu8=3l7p:=mH*(f׿;v3F\>N;ڠA=L+09|HiDž-o(@\jTOӽbFCFϤn]_~e#F~BBgo* ds0N(O>)3˴AXgEDCr:]og9۽О?>sOX~ PI{0hwEkQքVr^Ԁg4DU:y{ ?m(C8  ]tSAgp 'GtHgdMEpHD@J/65U<(3.c.ݻwT`p?Q1u. <0~,@)۟v%(Qq ƍe92х:g>8Y=z"g$ t~30PZ8_=(?vg!3fT)6:qU7ߑ'U1v8pS+՘1cRˎGDǔ|(o H@G{nE @/Nis:N؝BzߊTȸE9>-w];w|Pk[S!c\[hM'^$2iO!$]p /f_o`z7<Ȅq|g\qjoٲeF{3)[iDݤ%L%XNxh/O n^Pʭ?<j)6"j'V)@xPE@D@D@D@D@⑞RYt㑎@ ѱSmjy|S˭?%?  3ذF ,"  %(*D矍 ]TRcD@Rݪ!" " "Pr:.4DDtH(RKE@D@D@@&MYK! ţtJ-oy7CDH(RSE@D@D@K`뭷6SN" ţKM/Ν;̝;(̛7p[dS~\ ̞{i.=J_-~핈&}w*PD@D@DXc ӷo_ӯ_BWQ 64ӦM3͛7(}4Q֭k2뮻޽y뭷dO!Æ sQ>lsGN8|׮.6S~|yiȑ#M6m\YwmN=T祎J.DG%." " %LK/53g4{6gϮӞԯ_.A5jo+ן[nŰjO<˥? 4\p.0J _ޕ|Jm/Tk`'ӹvVZbXt><"_}U{9݈v ~Wv:$dg|6]vp̹K.1{#8tᄀ:;gukQnLǎ:&S$?aeA 剝 8!M-/r}9"O?th~ѝ%SIuL^׾h1¼˛aKTm Qӱ:e`&9Z՜rmO o-IY4S \:~ƗG`N_|̜R 齙Uum”(*2va)ԇ“,q R4MuL3i䶤+?1_̍r0}ɧ`Ģ x4qҢE wȏ?^ lHTAT؅O?9z#?gr RxaGsBPv՗-v'IGtL6iU>mԳNj>9|T9" " " FE_R.@8G5"g8a'rßy݉Y 7 Boȡjƌcn*,~ى쉝0&Md.\(U7!KC;IDq]6BaׂW]us**GBs=9ve (qAr-cΆ2HS׫Y!" " "PRX`PO6 ʵDc=N;d6d p^Ţxs8?/Jucs] Bgi( Qaw=~pċS%YXh\#\q㜓7QtW8C:>v3]aqi݄<+0s>_"[ѷTG:&ϜSnTECWv4{R>:*;Ք%I|K]vr ^E!F 0>};a<7p]hBMA`ϔsUJv`#6o(r&[,tQAe^8q;'w\“qJ'IDg ~ h# MNmaJ>&E#pJ}7%*>H#?*KD@JvHH/|L+QtȪ_ #3/C3g"P,djU,#v@ 0%@<" "H@GҌpD};4yD p`VT8J""_ɿ?43D@D Lmthv+WjN@;5g" " "@˖-EGD@D B@GtH6J:J&@T٠'Hc*i6hV&CCtQ!ЎG欔RD hJ@j?4;D@D VED 5EJCD@' S43D[D 5"怾ꊼЎG)$V" "H@GҌPT+""Ziv@jj!" Umthvb>*P;D sȜR$PM HM@f$4#J&V"Ziv@djfh+W&CCtO]W"Ps9;ժ#yt|DQQ1TFPTrY+*3/c'VJ SamQK5H (ҁQŋF_~*UM" 9ЎGUT.EܱWQMSfUWiv#R<+UDU[ժ$I#jU{?ӤIګP5䅀L`y^ )S?t`խxf̙fذaD%䝀v`%r%PV7t ootbf̘aƏӧ6mڄ;tPs'Y0No5O?{]o߾az~d"gtAfWt+b%bݬY3sךǜtIf2۷wM/̎;h+E&Md `z!s9%˄ ~甙v>^:u;%SuE2"=ufz)ꪫ=,f֬YvS\~ nKۚgy|h@y_}F)hA^z饰i- vمRX}nI ]7n|V *f}ZXU[Jq|`*?_@Fr}^٧!y#GL(?\sMwx$ S|oahd_jKu_g_tuցH3fw*#Çwٝv$f:K,D0w!aR:DE T]?+J; `V[c9&ۘ,bR5"K{b-oT>`3eʔE?aÆ{ ".bz('xp>Al&^8s8,>S7+AH\䗈@x\:$R~9s>5~w15ȿ˥GOc=fqwDe]wuDv&taz%r$PG-bnvϞ=;G\Y{ϥKq3%"PS\l't|@typE~cM ΡNpiwgϞVyC2Bvb(W\;/pÙT|#\?H(i"/ozYU*׬χK.O,~yU=&NsB^c=֙&@灔lPD@#ՙ^""ZtBC8p`:.%O+^W8ya<>=ezPԁ%@d+^%:Wwu\x[nb٨" "%PV$yQ2oslҁgi@&V|Il_1H< {쁪C 9æ:%bਣ" "` TɂN<}mٲ췿LLMlr!Dk% `˜] +N8l Nwϣj۶;o!W[[r27gD' !/bHGMJFOS:u?;@v389٬q<@mLz34w^#!zdBSiD@*@Yx DtMݩ.۷~CBt8 ڃ\+N4ggPx[U~ OJ#c>x뭷I|N\ NV&L(KHZλol;3qDHD\p"YV{SD~7Pڇ ` 2@Z l',xb >.. awx sӧ&LpQKXptꫯdrSEJ'9yƎknv盱F9㎫B!(+3:]Qv2y Zptԩ@Nqv{&%`RaLS"#m@/7~Ş[_n=ƷGH!P>Ń[-$ ţ2]Z% ţVq2LR<*skURXwuZnm b9s)"PHR_W_}%ţ&;KZr-P)1$Zk-_/*Gʂ,(aU'H+M4c**@>}}'8" %D]<ȸ?fYfh"3w\*А"P,7o*Ԃg}f6mZJUyF9~?$'KQӅGD /#3o6/U:nv=J}T|\q'b-̻[{&HHy -eB ۑ%z 9x7\>@I6`aO""Px'-cԨQ1j@nNKСC]{<Ȓjw)7v5tV DW^,X~DqQ3C}x7MM K^m5%" O@ߌ)ƈ/21f̘IP D iUWu "(|[2809蠃t=sfڴiXȅjȑ#݁i4޷gϞ毿2 6tx|uzTP=~ۣGE;Sرc-֨D@r! ţzmi B)Vx.LyK1 OᡁsCi>:H/J#pw"Zun}WD@2&_; IJݺusOrQ<?p"G|?s P68ãqrrڊV4ہDD@D 5xz2 'Yg$M`ܸqPM T2.̌?펟p B}xA+ATW\q1bDZJ H4of5xuf9 )S~\sY~3ɦ4" "Pxd0;w6\pKٷo_ π[n){キ_sTB^w}|+ +" Y!2Z8 jw0@yЮGyzQ3]t19MkVr@ŀO4mϚ5KYpS'ڵQcdOO7?;dnȾ % #ˁ\aÆO>9J.G`رδ N"NW_}o]UD@D xduszs衇fY@c馛~pfX5,RDh[a~0gy9쳫mygN;4|䗈@9`3gMvڕCuRo7|cj+/-Zq4(nRSQ.KD }6|}c=f4hPWD@D Hz3pQ~0Qc-*JG={:'>qfʔ)9Gk#PR>PD|_7|.zR^}Us5׸'Ήx%uNSv!x)Tƍ >\pfܹT%-"u@Ȗ|WhlvСCE)}0`.U@xdK,ƏNѯ}:dY@n(+hw%B[(W~o9ҭ[BUrE ={=.;Қ4RQuD@GW"PCR쳄qJ;$*T R('G ޿{;{k׮TvHNO)@ H(0B"oW_4k,|XR; hkq)E($'|NŎ\ +"RJf&a.矇/h*}ݴ^#LLO9{c&)e62UF| UD6m*$9y͢D:bV0(~!_G٨݌,LήJ Z__`Al>.:/_X| ?%UIR23VDNJѠ@}LL{eN<0e]L{W^2#!q|'V'>+Μ9&>9Ouv >!;uM/,(۷o?C@YLu"iWF,x8NbꪫFxqW\qŒB鯿^{#Y`NXU0{E+KG/,,WBRh8T5C? IÆ E2{VJ2|SwEv2)Bk'fFqw[o5#F0w\N6l[l馛\P N8aQ"M曆(_5j"h;0sכ;Çפ )<%uРAUZzAI&@D:tA~Ÿ9"^{!*s=4is(n~gĐQMH΂!jd2d%X"AIarF W}եԿM.\XEEXxr u]7})c(^i|-YXdOޣp.Y9iРA0#DTQSo@JPc9FA&8WܑGwŃcơ.OPguVDIp g5~Pg7?Y/ bMw[Ar4băSTuy&TWΏcpP?xwKLg^fNFxdCV-8 O0b<sW)q9YPXRm䇘Wuw&+$s[/RFJ߲K/~TEݍY֌ #}IDAT+ ->(݂c=-nGfY(<5FQd/-r'(Se]ݻà 5B)]wVZvUfrx+‚cCis`F8v{otGd#<4=z9 uiGpb4*A:(v" uM13i<ӄYn۶ϓN:<Κ+$˙g-l O ⒦AM=P\L:)D,= YW05 |!6~:X]t1=XQ>}\~8ѳgO{K%u-PX_6R<*,WL6GT)yd". ݐT">:7GrQL'ygZkJ7I K QE"PʲeqwE"YpfP3H,Y;"kq8| N>ds!8d8?1&T,49D\T0͘PwT0b wP 0AH6cDP{aNaMw՜s9EfO= sμ e ]?'|R>X0т 0_}SjP<>D1s.Ts)?sX3;jT@Db0as+FygXv2;(IV2a`O]๲B0f+avY`́ A uW`wBba4%]IFvýg66zYMUrEj` i`PQ"ȵcq9u暦K.fʔ)Ֆc=4k,d;{Z֋-2Æ s}8̗8O?5wk+hk/,;j(7k+ݺu3cǎjkUD@D@DXņD*]  :uE{RK/{kf͚UO<ol>X0L?NO`˿;: ~*͙={v[':kF0q*6mX)}|;1w}r[nr 'dGD@]K""Px& ,o~sʂyV]uU@q6T`w8ܵ=#O,X.,O>dxm>c?.^A>NCB=O=T w}wаaCW x+B ~`Ի}NAƷz+!/W ̛7{W(>/c6w\W/cuI'݂<႞|~`0f̘P`=PHΝ;:Xo&.k6;DrciӦF3@xdL9D. HKV DK/Z/< 5c@ڵsiPl6m& = }o)¢t,Vf_ǚo3τ}{úA|p&(S.E]^sߦ>?櫩fUա|Һu밯0|C1Y1#CHPܬY(S첋KxkUZs>3Ψr6l2hܸqpM7꽠D\H(T?*|<@RY8i|O@nA0VaH`5Vp/eα@ ]z5ǟ_?Ӝy>QR#փqBb.Q 6{a*~X& ~by믿ۅLUpfUa`ͳgvgw$ 0N5Kcvw6Vy'$\o'E"" " N@UhԩS5} 5i51-vyygk&O(5q48Aꫯ6,9-80IN,X`Qly8#֏":q:}l/vĽE)YeUb\~5;v͋ȢrA`π A*(R |^To:y,o9ߣtlFgtI l J! ţRFZ K%,(F,8'L`;{JBϪ.%BTb}\ b7T)wtĚAUI(L|יEB[,')b~}: 8e]rHae` bs=͕W^iإ.T^l&ƚhgJW]b! ţXFB5Τ^%oD,vmg3Ŋ HHO/)\p=uh?&v JWuyC{ڶJn'ƈ:y@^B"ȘK7:UPo*9]x`̴0ѱ ;((PL[HD@D@D H(Ub = 9,uѣ3.Y<&/d1As)v] S_"ކu O~cóFיD /sy_ָ 0AqS@`ǐ3 ψ#bY5F730bw†q'~{QcCuü[@syQkR8ʵÞǛp @-( |(b2c)VBݞ)`~DuTS3uhJF:rfw] w[Os663mB<4n|m\砏Vw!c2m0 `(~~pE+à C 7b% ţXGF*qs=.=PW?;{8F<@Wc=fqB,>\a EʼnwǴ(gRBw^PhyUvp(~W8*6,/9JQJ`3̞?>smb[~8vi:h;GuhȐ! Ηg?6LKOZ!tfo*n-)5 iw|I{`_>r{{ vG$o0B_݅Hϟb#"rB={nx9܎Ӵ}=i͔ZSCqeEtBg!,>5=@C=s5ʕ,cabQd&o 8Az*{UNe><=0w28q7U}>-AE@j@^{@i1NjeاՁ=#Njk.Xeck}b')ל͂9ɛyEOYgSNݝpYТX*٧TY;⮳0[32w(.ҭW`M P's ]*IPXо}P1OaÂosך*}b6Wxr: m'In aoNHƞO=:,w (svdGD@fOEDDԣyBQa"PBˇWMtnN'<r7]>3)cKt;G<^ǔ qgZ6u"Wa3F0˅}ċq&p~:ҕ"Piw/"DD (nZX@kMwfDzˤ,1(TnB(95e bKL@.wJj 9H" " "Paжm 뵺+K@G鎝Z." " K^fX2PEԪFL2&)3HG̥tiu]D8ȹ8AȐ#:6`9LDhǣFAmHK>0F26,;vlJ "P\Q\ֈ@E0` cK.ƞd?ƞSќy(E2*QSE@D@Dp ä@QQãƉ@yUyz!" " " " EM@GQ'" " " " A@Gyz!" " " " EM@GQ'" " " " A@Gyz!" " " " EM@GQ'" " " " A@Gyz!" " " " EM@GQ'" " " " AbTqDIENDB`receptor-1.5.3/docs/source/getting_started_guide/trying_sample_commands.rst000066400000000000000000000022541475465740700274550ustar00rootroot00000000000000################### Try Sample Commands ################### .. note:: You must complete the prior steps of network setup and Receptor installation for these commands to work. 1. Show network status .. code-block:: bash receptorctl --socket /tmp/foo.sock status 2. Ping node baz from node foo .. code-block:: bash receptorctl --socket /tmp/foo.sock ping baz 3. Submit work from foo to baz and stream results back to foo .. code-block:: bash seq 10 | receptorctl --socket /tmp/foo.sock work submit --node baz echo --payload - -f 4. List work units .. code-block:: bash receptorctl --socket /tmp/foo.sock work list --node foo 5. Get work unit id using jq .. code-block:: bash receptorctl --socket /tmp/foo.sock work list --node foo | jq --raw-output '.|keys|first' 6. Re-stream the work results from work unit .. code-block:: bash receptorctl --socket /tmp/foo.sock work results work_unit_id Congratulations, Receptor is now ready to use! .. seealso:: :ref:`control_service_commands` Control service commands :ref:`creating_a_basic_network` Creating a Basic Network :ref:`installing_receptor` Installing Receptor receptor-1.5.3/docs/source/index.rst000066400000000000000000000016631475465740700174650ustar00rootroot00000000000000.. receptor documentation master file, created by sphinx-quickstart on Sat May 1 19:39:15 2021. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. ############################### Ansible Receptor documentation ############################### Receptor is an overlay network intended to ease the distribution of work across a large and dispersed collection of workers. Receptor nodes establish peer-to-peer connections with each other via existing networks. Once connected, the receptor mesh provides datagram (UDP-like) and stream (TCP-like) capabilities to applications, as well as robust unit-of-work handling with resiliency against transient network failures. .. toctree:: :maxdepth: 2 installation getting_started_guide/index user_guide/index developer_guide receptorctl/index porting_guide/index roadmap/index upgrade/index contributing receptor-1.5.3/docs/source/installation.rst000066400000000000000000000012671475465740700210570ustar00rootroot00000000000000.. _installing: ******************* Installation guide ******************* Download and extract precompiled binary for your OS and platform from `the releases page on GitHub `_ Alternatively, you can compile Receptor from source code (Golang 1.22+ required) .. code-block:: bash make receptor Test the installation with the following commands: .. code-block:: bash receptor --help receptor --version The preferred way to interact with Receptor nodes is to use the ``receptorctl`` command line tool .. code-block:: bash pip install receptorctl ``receptorctl`` will be used in various places throughout this documentation. receptor-1.5.3/docs/source/porting_guide/000077500000000000000000000000001475465740700204555ustar00rootroot00000000000000receptor-1.5.3/docs/source/porting_guide/PORTING_2.rst000066400000000000000000000011651475465740700225550ustar00rootroot00000000000000-------------------------- Receptor 2.x Porting Guide -------------------------- .. contents:: :local: We suggest you read this page along with the Receptor 2 Changelog to understand what updates you may need to make ^^^^^^^^^^^^^^^^^ Software Versions ^^^^^^^^^^^^^^^^^ "" Go "" TBD """""" Python """""" TBD ^^^^^^^^ Features ^^^^^^^^ """""""""" Deprecated """""""""" TBD """"""" Removed """"""" TBD ^^^^^^^^^^^^ Command Line ^^^^^^^^^^^^ TBD ^^^^^^^^^^^^^ Configuration ^^^^^^^^^^^^^ TBD ^^^^^^^^^^^^^^^ Control Service ^^^^^^^^^^^^^^^ TBD ^^^^^^^^^ Netceptor ^^^^^^^^^ TBD ^^^^^^^^^^ Workceptor ^^^^^^^^^^ TBD receptor-1.5.3/docs/source/porting_guide/index.rst000066400000000000000000000001021475465740700223070ustar00rootroot00000000000000.. toctree:: :maxdepth: 1 :glob: receptor_porting_index receptor-1.5.3/docs/source/porting_guide/receptor_porting_index.rst000066400000000000000000000004331475465740700257630ustar00rootroot00000000000000======================= Receptor Porting Guides ======================= This section lists porting guides that can help in updating your environment from one version of Receptor to the next. .. toctree:: :maxdepth: 1 :glob: :caption: Receptor Porting Guides PORTING_2 receptor-1.5.3/docs/source/receptorctl/000077500000000000000000000000001475465740700201445ustar00rootroot00000000000000receptor-1.5.3/docs/source/receptorctl/index.rst000066400000000000000000000000751475465740700220070ustar00rootroot00000000000000.. toctree:: :maxdepth: 1 :glob: receptorctl_index receptor-1.5.3/docs/source/receptorctl/receptorctl_connect.rst000066400000000000000000000022051475465740700247340ustar00rootroot00000000000000------- connect ------- .. contents:: :local: ``receptorctl connect`` establishes a connection between local client and a Receptor node. Command syntax: ``receptorctl --socket= connect `` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` ``remote_node`` is the identifier of a Receptor node. ``remote_control_service`` is the service name of a Receptor node. .. seealso:: :ref:`connect_to_csv` Connect to any Receptor control service running on the mesh. receptor-1.5.3/docs/source/receptorctl/receptorctl_index.rst000066400000000000000000000013451475465740700244160ustar00rootroot00000000000000======================== Receptor client commands ======================== The Receptor client, ``receptorctl``, provides a command line interface for interacting with and managing Receptor nodes. .. toctree:: :maxdepth: 1 :glob: :caption: Receptorctl commands receptorctl_connect receptorctl_ping receptorctl_reload receptorctl_status receptorctl_traceroute receptorctl_version receptorctl_work_cancel receptorctl_work_list receptorctl_work_release receptorctl_work_results receptorctl_work_submit .. attention: Receptor has commands that are intended to provide internal functionality. These commands are not supported by ``receptorctl``: - ``work force-release``. - ``work status``. receptor-1.5.3/docs/source/receptorctl/receptorctl_ping.rst000066400000000000000000000021651475465740700242450ustar00rootroot00000000000000---- ping ---- .. contents:: :local: ``receptorctl ping`` tests the network reachability of Receptor nodes. Command syntax: ``receptorctl --socket= [--count ] [--delay ] ping `` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` ``count`` specifies the number of pings to send. The value must be a positive integer. The default is ``4``. ``delay`` specifies the time, in seconds, to wait between pings. The value must be a positive float. The default is ``1.0``. receptor-1.5.3/docs/source/receptorctl/receptorctl_reload.rst000066400000000000000000000015501475465740700245530ustar00rootroot00000000000000------ reload ------ .. contents:: :local: ``receptorctl reload`` reloads the Receptor configuration for the connected node. Command syntax: ``receptorctl --socket= reload`` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` receptor-1.5.3/docs/source/receptorctl/receptorctl_status.rst000066400000000000000000000104411475465740700246270ustar00rootroot00000000000000------ status ------ .. contents:: :local: ``receptorctl status`` displays the status of the Receptor network. Command syntax: ``receptorctl --socket= status [--json]`` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` ``--json`` option returns the output in JSON format. The default output format is human-readable. Using this argument allows output to be machine consumable. For example, piping into ``jq``. The output is divided into sections listed below. Field values may be listed in their own section. Columns are the actual JSON node values. ^^^^^^^^^^^^ Node section ^^^^^^^^^^^^ .. list-table:: Node section :header-rows: 1 :widths: auto * - Column - Description * - ``."NodeID"`` - Node identifier. * - ``."SystemCPUCount"`` - Number of logical CPU cores on the node. * - ``.SystemMemoryMiB"`` - Available memory (MiB) of the node. * - ``."Version"`` - Receptor version. ^^^^^^^^^^^^^^^^^^^ Connections section ^^^^^^^^^^^^^^^^^^^ .. list-table:: Connections section :header-rows: 1 :widths: auto * - Column - Description * - ``."Connections"`` - Connections. * - ``."Connections"."Cost"`` - Metric (route preference) to reach NodeID. * - ``."Connections"."NodeID"`` - Node ID. ^^^^^^^^^^^^^^^^^^^^^^^^^ Known connections section ^^^^^^^^^^^^^^^^^^^^^^^^^ .. list-table:: Known connections section :header-rows: 1 :widths: auto * - Column - Description * - ``"KnownConnectionCosts"`` - Known Connections. * - ``"KnownConnectionCosts".""`` - Remote node ID. * - ``"KnownConnectionCosts"."".""`` - Cost to get to NodeID 1 through NodeID 2. ^^^^^^^^^^^^^^^^^^^^^ Routing Table Section ^^^^^^^^^^^^^^^^^^^^^ .. list-table:: Routing Table section :header-rows: 1 :widths: auto * - Column - Description * - ``."RoutingTable"`` - Routing Table. * - ``."RoutingTable".""`` - List of NodeID(s) used to get to desired NodeID. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Service Advertisement Section ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. list-table:: Service Advertisement Section :header-rows: 1 :widths: auto * - Column - Description * - ``."Advertisements"`` - Advertisements. * - ``."Advertisements"."ConnType"`` - Connection type (see below for values). * - ``."Advertisements"."NodeID"`` - Node identifier issuing advertisement. * - ``."Advertisements"."Service"`` - Receptor services on node. * - ``."Advertisements"."Tags"`` - Tags associated with node. * - ``."Advertisements"."Time"`` - Timestamp when advertisement sent. ====================== Execution Node Section ====================== .. list-table:: Execution Node section :header-rows: 1 :widths: auto * - Column - Description * - ``."Advertisements"."WorkCommands"`` - Execution Node work commands. * - ``."Advertisements"."WorkCommands"."Secure"`` - Boolean indicating whether the work commands are signed. * - ``."Advertisements"."WorkCommands"."WorkType"`` - Work command(s) supported. =============== Connection Type =============== .. list-table:: Connection Types :header-rows: 1 :widths: auto * - ConnType Value - Description * - 0 - Datagram. * - 1 - Stream. * - 2 - StreamTLS. ==== Tags ==== .. list-table:: Tags :header-rows: 1 :widths: auto * - Tags - Description * - network - Network name. * - route_* - Route information for specified node. * - type - Node type. receptor-1.5.3/docs/source/receptorctl/receptorctl_traceroute.rst000066400000000000000000000016211475465740700254610ustar00rootroot00000000000000---------- traceroute ---------- .. contents:: :local: ``receptorctl traceroute`` Displays the network route that packets follow to Receptor nodes. Command syntax: ``receptorctl --socket= traceroute `` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` receptor-1.5.3/docs/source/receptorctl/receptorctl_version.rst000066400000000000000000000016171475465740700247760ustar00rootroot00000000000000------- version ------- .. contents:: :local: ``receptorctl version`` displays version information for receptorctl and the Receptor node to which it is connected. Command syntax: ``receptorctl --socket= version`` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` receptor-1.5.3/docs/source/receptorctl/receptorctl_work_cancel.rst000066400000000000000000000020711475465740700255730ustar00rootroot00000000000000----------- work cancel ----------- .. contents:: :local: ``receptorctl work cancel`` terminates one or more units of work. Command syntax: ``receptorctl --socket= work cancel <> [...]`` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` ``Unit ID`` is a unique identifier for a work unit (job). When running the ``work cancel`` command, you should specify the ``Unit ID`` for the Receptor node to which you are connected. receptor-1.5.3/docs/source/receptorctl/receptorctl_work_list.rst000066400000000000000000000046511475465740700253270ustar00rootroot00000000000000--------- work list --------- .. contents:: :local: ``receptorctl work list`` displays known units of work Command syntax: ``receptorctl --socket= work list`` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` The output is divided into work unit sections listed below. Field values might be listed separately. Columns are the actual JSON node values. ^^^^^^^^^^^^^^^^^ Work unit section ^^^^^^^^^^^^^^^^^ .. list-table:: Work unit section :header-rows: 1 :widths: auto * - Column - Description * - ``."Work unit string"`` - Random eight character work unit (job) string. * - ``."Work unit string"."Detail"`` - Work unit output. * - ``."Work unit string"."ExtraData"`` - Additional information added for specific worktypes. * - ``.""Work unit string"."State"`` - Current state for the work unit (int). * - ``.""Work unit string"."StateName"`` - Human-readable current state for the work unit. * - ``.""Work unit string"."StdoutSize"`` - Size of the work unit output (bytes). * - ``.""Work unit string"."WorkType"`` - Execution request type for the work unit. ^^^^^^^^^^^^^^^^ Work unit states ^^^^^^^^^^^^^^^^ .. list-table:: Work unit states :header-rows: 1 :widths: auto * - State - StateName - Description * - ``0`` - ``Pending`` - Work unit has not started. * - ``1`` - ``Running`` - Work unit is currently executing. * - ``2`` - ``Succeeded`` - Work unit completed without error. * - ``3`` - ``Failed`` - Work unit encountered an error or unexpected condition and did not complete. * - ``4`` - ``Canceled`` - Work unit was terminated externally. receptor-1.5.3/docs/source/receptorctl/receptorctl_work_release.rst000066400000000000000000000024741475465740700257750ustar00rootroot00000000000000------------ work release ------------ .. contents:: :local: ``receptorctl work release`` deletes one or more units of work. Command syntax: ``receptorctl --socket= work release [<>] <> [...]`` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` ``Unit ID`` is a unique identifier for a work unit (job). When running the ``work release`` command, you should specify the ``Unit ID`` for the Receptor node to which you are connected. ``--all`` deletes all work units known by the Receptor node to which you are connected. ``--force`` deletes work units locally on the Receptor node to which you are connected and takes effect even if the remote Receptor node is unreachable. receptor-1.5.3/docs/source/receptorctl/receptorctl_work_results.rst000066400000000000000000000022051475465740700260460ustar00rootroot00000000000000------------ work results ------------ .. contents:: :local: ``receptorctl work results`` gets results for successfully completed, failed, stopped, or currently running, units of work. Command syntax: ``receptorctl --socket= work results [<>] <> [...]`` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` ``Unit ID`` is a unique identifier for a work unit (job). When running the ``work results`` command, you should specify the ``Unit ID`` for the Receptor node to which you are connected. receptor-1.5.3/docs/source/receptorctl/receptorctl_work_submit.rst000066400000000000000000000050721475465740700256550ustar00rootroot00000000000000----------- work submit ----------- .. contents:: :local: ``receptorctl work submit`` requests a Receptor node to run a unit of work. Command syntax: ``receptorctl --socket= work submit [<>] <> [<>]`` ``socket_path`` is the control socket address for the Receptor connection. The default is ``unix:`` for a Unix socket. Use ``tcp://`` for a TCP socket. The corresponding environment variable is ``RECEPTORCTL_SOCKET``. .. code-block:: text ss --listening --processes --unix 'src = unix:' Netid State Recv-Q Send-Q Local Address:Port Peer Address:Port Process u_str LISTEN 0 4096 /tmp/local.sock 38130170 * 0 users:(("receptor",pid=3226769,fd=7)) ``ps -fp $(pidof receptor)`` ``lsof -p `` ``WorkType`` specifies an execution request type for the work unit. Use the ``receptorctl status`` command to find available work types for Receptor nodes. ``Runtime Parameters`` are parameters passed by Receptor to the work command. ^^^^^^^^^^^^^^^^^^^ Work submit options ^^^^^^^^^^^^^^^^^^^ You can use the following options with the ``work submit`` command: .. list-table:: :header-rows: 1 :widths: auto * - Option - Description * - ``-a``, ``--param <>=<>`` - Adds a Receptor parameter in key=value format. * - ``-f``, ``--follow`` - Keeps Receptorctl to remain attached to the job and displays the job results. * - ``-l``, ``--payload-literal <>`` - Uses the value of ``<>`` as the literal unit of work data. * - ``-n``, ``--no-payload`` - Sends an empty payload. * - ``--node <>`` - Specifies the Receptor node on which the work runs. The default is the local node. * - ``-p``, ``--payload <>`` - Specifies the file that contains data for the unit of work. Specify ``-`` for standard input (stdin). * - ``--rm`` - Releases the work unit after completion. * - ``--signwork`` - Digitally signs remote work submissions to standard output (stdout). * - ``--tls-client <>`` - Specifies the TLS client that submits work to a remote node. * - ``--ttl <>`` - Specifies the time to live (TTL) for remote work requests in ``##h##m##s`` format; for example ``1h20m30s`` or ``30m10s``. Use the ``receptorctl work list`` command to display units of work on Receptor nodes and determine appropriate TTL values. receptor-1.5.3/docs/source/requirements.txt000066400000000000000000000000441475465740700211000ustar00rootroot00000000000000pbr sphinx sphinx-ansible-theme six receptor-1.5.3/docs/source/roadmap/000077500000000000000000000000001475465740700172415ustar00rootroot00000000000000receptor-1.5.3/docs/source/roadmap/ROADMAP_2.rst000066400000000000000000000004551475465740700213030ustar00rootroot00000000000000========== Receptor 2 ========== **Receptor Freeze: TBD** **Target: TBD** .. contents:: :local: --------------- Release Manager --------------- Aaron Hetherington (IRC/GitHub: @AaronH88) ------------- Configuration ------------- --------- Netceptor --------- ---------- Workceptor ---------- receptor-1.5.3/docs/source/roadmap/index.rst000066400000000000000000000001021475465740700210730ustar00rootroot00000000000000.. toctree:: :maxdepth: 1 :glob: receptor_roadmap_index receptor-1.5.3/docs/source/roadmap/receptor_roadmap_index.rst000066400000000000000000000023501475465740700245100ustar00rootroot00000000000000================= Receptor Roadmaps ================= The ``Receptor`` team develops a roadmap for each major Receptor release. The latest roadmap shows current work; older roadmaps provide a history of the project. We don't publish roadmaps for minor or subminor versions. So 2.0 and 3.0 have roadmaps, but 2.1.0 and 2.10.1 do not. We incorporate team and community feedback in each roadmap, and aim for further transparency and better inclusion of both community desires and submissions. Each roadmap offers a *best guess*, based on the ``Receptor`` team's experience and on requests and feedback from the community, of what will be included in a given release. However, some items on the roadmap may be dropped due to time constraints, lack of community maintainers, and so on. Each roadmap is published both as an idea of what is upcoming in ``Receptor``, and as a medium for seeking further feedback from the community. You can submit feedback on the current roadmap by: - Creating issue on GitHub in `ansible/receptor repository `_ Go to `Ansible forum `_ to join the community discussions. .. toctree:: :maxdepth: 1 :glob: :caption: Receptor Roadmaps ROADMAP_2 receptor-1.5.3/docs/source/upgrade/000077500000000000000000000000001475465740700172455ustar00rootroot00000000000000receptor-1.5.3/docs/source/upgrade/UPGRADE_2.rst000066400000000000000000000002571475465740700213130ustar00rootroot00000000000000========== Receptor 2 ========== To upgrade an existing Receptor installation to the latest version, follow the instructions in :ref:`installing` .. contents:: :local: receptor-1.5.3/docs/source/upgrade/index.rst000066400000000000000000000001021475465740700210770ustar00rootroot00000000000000.. toctree:: :maxdepth: 1 :glob: receptor_upgrade_index receptor-1.5.3/docs/source/upgrade/receptor_upgrade_index.rst000066400000000000000000000006321475465740700245210ustar00rootroot00000000000000================= Receptor Upgrades ================= You can submit feedback on the Receptor upgrade guides can be submitted by: - Creating issue on github in `ansible/receptor repository `_ Go to `Ansible forum `_ to join the community discussions. .. toctree:: :maxdepth: 1 :glob: :caption: Receptor Upgrade Guides UPGRADE_2 receptor-1.5.3/docs/source/user_guide/000077500000000000000000000000001475465740700177515ustar00rootroot00000000000000receptor-1.5.3/docs/source/user_guide/basic_usage.rst000066400000000000000000000036341475465740700227560ustar00rootroot00000000000000Using Receptor =============== . contents:: :local: Using the Receptor CLI ---------------------- .. list-table:: Persistent Flags :header-rows: 1 :widths: auto * - Action - Description * - ``--config `` - Loads configuration options from a YAML file. * - ``--version`` - Display the Receptor version. * - ``--help`` - Display this help .. _configuring_receptor_with_a_config_file: Configuring Receptor with a config file ---------------------------------------- Receptor can be configured on the command-line, exemplified above, or via a yaml config file. All actions and parameters shown in ``receptor --help`` can be written to a config file. .. code-block:: yaml --- version: 2 node: id: foo local-only: local: true log-level: level: Debug Start receptor using the config file .. code-block:: bash receptor --config foo.yml Changing the configuration file does take effect until the receptor process is restarted. .. _using_receptor_containers: Use Receptor through a container image --------------------------------------- .. code-block:: bash podman pull quay.io/ansible/receptor Start a container, which automatically runs receptor with the default config located at ``/etc/receptor/receptor.conf`` .. code-block:: bash podman run -it --rm --name receptor quay.io/ansible/receptor In another terminal, issue a basic "status" command to the running receptor process .. code-block:: bash $ podman exec receptor receptorctl status Node ID: d9b5a8e3c156 Version: 1.0.0 System CPU Count: 8 System Memory MiB: 15865 Node Service Type Last Seen Tags Work Types d9b5a8e3c156 control Stream 2021-08-04 19:26:14 - - Note: the config file does not specify a node ID, so the hostname (on the container) is chosen as the node ID. receptor-1.5.3/docs/source/user_guide/configuration_options.rst000066400000000000000000000644141475465740700251360ustar00rootroot00000000000000============================== Receptor Configuration Options ============================== ^^^^^^^^^^^^^^^^ Control Services ^^^^^^^^^^^^^^^^ .. list-table:: Control Service (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``filename`` - Specifies the filename of a local Unix socket to bind to the service. - No default value. - string * - ``permissions`` - Socket file permissions - 0600 - int * - ``service`` - Receptor service name to listen on - control - string * - ``tls`` - Name of TLS server config for the Receptor listener - No default value. - string * - ``tcplisten`` - Local TCP port or host:port to bind to the control service - No default value. - string * - ``tcptls`` - Name of TLS server config for the TCP listener - No default value. - string .. code-block:: yaml control-services: - service: foo filename: /tmp/foo.sock ^^^^^^^^^ Log level ^^^^^^^^^ .. list-table:: Log Level :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``level`` - Log level: Error, Warning, Info or Debug - Error - string Add payload tracing using `RECEPTOR_PAYLOAD_TRACE_LEVEL=int` envorment variable and using log level debug. .. list-table:: RECEPTOR_PAYLOAD_TRACE_LEVEL options :header-rows: 1 :widths: auto * - Tracing level - Description * - 0 - No payload tracing log * - 1 - Log connection type * - 2 - Log connection type and work unit id * - 3 - Log connection type, work unit id and payload **Warning: Payload Tracing May Expose Sensitive Data** Please be aware that using payload tracing can potentially reveal sensitive information. This includes, but is not limited to, personal data, authentication tokens, and system configurations. Ensure that you only use tracing tools in a secure environment and avoid sharing trace output with unauthorized users. Always follow your organization's data protection policies when handling sensitive information. Proceed with caution! .. code-block:: yaml log-level: level: debug ^^^^ Qlog ^^^^ .. list-table:: Qlog :header-rows: 1 :widths: auto * - Variable - Description - Type * - QLOGDIR - environment variable to the directory path where logs will be stored - string Qlogs allows greater visibility in the Receptors mesh network. Set the QLOGDIR environment variable to the directory path where logs will be stored. Creating a new directory may help organize qlogs. Once logs are created they can be viewed using (qvis)[https://github.com/quiclog/qvis] ----------------- Run qvis locally: ----------------- - Git clone qvis repo - ``cd visualizations`` - ``run npm install`` - ``npm run serve`` - Qvis is now served on port 8080 - Upload qlogs to qvis and navigate to Sequence ^^^^^^^^^^^^^^^^ Pyroscope Client ^^^^^^^^^^^^^^^^ Use pyroscope-client to implement pyroscope push-mode to profile receptor and push to a Pyroscope server. .. list-table:: Pyroscope client :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``applicationName`` - Name of application used in Pyroscope UI - No default value. - string * - ``tags`` - Map of static tags. - No default value. - map of string * - ``serverAddress`` - Address of pyroscope server - No default value. - string * - ``basicAuthUser`` - Http basic auth user - No default value. - string * - ``basicAuthPassword`` - Http basic auth password - No default value. - string * - ``tenantID`` - Specify TenantId when using phlare multi-tenancy - No default value. - string * - ``uploadRate`` - Upload rate e.g. ``10s`` - 15s - string * - ``profileTypes`` - Optional profile types- ProfileGoroutines, ProfileMutexCount, ProfileMutexDuration, ProfileBlockCount, ProfileBlockDuration. - Profile types set by default- ProfileCPU, ProfileAllocObjects, ProfileAlloSpace, ProfileInuseObjects, ProfileInuseSpace. - list of sting * - ``disableGCRuns`` - This will disable automatic runtime.GC runs between getting the heap profiles - true - bool * - ``HTTPHeaders`` - Set the Authorization header manually - No default value. - map of string .. code-block:: yaml pyroscope-client: - applicationName: "receptor" serverAddress: "http://localhost:4040" profileTypes: - ProfileGoroutines - ProfileMutexCount - ProfileMutexDuration - ProfileBlockCount - ProfileBlockDuration ^^^^ Node ^^^^ .. list-table:: Node :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``id`` - Node ID can only contain a-z, A-Z, 0-9 or special characters . - _ @ : - local hostname - string * - ``datadir`` - Directory in which to store node data - /tmp/receptor - string * - ``firewallrules`` - Firewall Rules. See :ref:`firewall_rules` for syntax - No default value. - JSON * - ``maxidleconnectiontimeout`` - Max duration with no traffic before a backend connection is timed out and refreshed - No default value. - string .. code-block:: yaml node: id: foo ------------------------------------------ Configure resources used by other commands ------------------------------------------ ^^^^^^^^^^^ TLS Clients ^^^^^^^^^^^ .. list-table:: TLS Client (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``cert`` - Client certificate filename (required) - No default value. - string * - ``insecureskipverify`` - Accept any server cert - false - bool * - ``key`` - Client private key filename (required) - No default value. - string * - ``mintls13`` - Set minimum TLS version to 1.3. Otherwise the minimum is 1.2 - false - bool * - ``name`` - Name of this TLS client configuration (required) - No default value. - string * - ``pinnedservercert`` - Pinned fingerprint of required server certificate - No default value. - list of string * - ``rootcas`` - Root CA bundle to use instead of system trust - No default value. - string * - ``skipreceptornamescheck`` - if true, skip verifying ReceptorNames OIDs in certificate at startup - No default value. - bool .. code-block:: yaml tls-clients: - name: tlsclient cert: /tmp/certs/foo.crt key: /tmp/certs/key.crt ^^^^^^^^^^^ TLS Servers ^^^^^^^^^^^ .. list-table:: TLS Server (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``cert`` - Server certificate filename (required) - No default value. - string * - ``clientcas`` - Filename of CA bundle to verify client certs with - No default value. - string * - ``key`` - Server private key filename (required) - No default value. - string * - ``mintls13`` - Set minimum TLS version to 1.3. Otherwise the minimum is 1.2 - false - bool * - ``name`` - Name of this TLS server configuration (required) - No default value. - string * - ``pinnedclientcert`` - Pinned fingerprint of required client certificate - No default value. - list of string * - ``requireclientcert`` - Require client certificates - false - bool * - ``skipreceptornamescheck`` - Skip verifying ReceptorNames OIDs in certificate at startup - false - bool .. code-block:: yaml tls-servers: - name: tlsserver cert: /tmp/certs/foo.crt key: /tmp/certs/key.crt ---------------------------------------------------------------------- Options to configure back-ends, which connect Receptor nodes together ---------------------------------------------------------------------- ^^^^^^^^^^^^^ TCP listeners ^^^^^^^^^^^^^ .. list-table:: TCP Listener (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``allowedpeers`` - Peer node IDs to allow via this connection - No default value. - list of string * - ``bindaddr`` - Local address to bind to - 0.0.0.0 - string * - ``cost`` - Connection cost (weight) - 1.0 - float64 * - ``nodecost`` - Per-node costs - No default value. - float64 * - ``port`` - Local TCP port to listen on (required) - No default value. - int * - ``tls`` - Name of TLS server config - No default value. - string .. code-block:: yaml tcp-listeners: - port: 2223 ^^^^^^^^^ TCP Peers ^^^^^^^^^ .. list-table:: TCP Peer :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``address`` - Remote address (Host:Port) to connect to (required) - No default value. - string * - ``allowedpeers`` - Peer node IDs to allow via this connection - No default value. - list of string * - ``cost`` - Connection cost (weight) - 1.0 - float64 * - ``redial`` - Keep redialing on lost connection - true - bool * - ``tls`` - Name of TLS client configuration - No default value. - string .. code-block:: yaml tcp-peers: - address: localhost:2223 ^^^^^^^^^^^^^ UDP Listeners ^^^^^^^^^^^^^ .. list-table:: UDP Listener (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``allowedpeers`` - Peer node IDs to allow via this connection - No default value. - list of string * - ``bindaddr`` - Local address to bind to - 0.0.0.0 - string * - ``cost`` - Connection cost (weight) - 1.0 - float64 * - ``nodecost`` - Per-node costs - No default value. - float64 * - ``port`` - Local UDP port to listen on (required) - No default value. - int .. code-block:: yaml udp-listeners: - port: 2223 ^^^^^^^^^ UDP Peers ^^^^^^^^^ .. list-table:: UDP Peer (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value * - ``address=`` - Host:Port to connect to (required) - No default value. * - ``allowedpeers=<[]string (may be repeated)>`` - Peer node IDs to allow via this connection - No default value. * - ``cost=`` - Connection cost (weight) - 1.0 * - ``redial=`` - Keep redialing on lost connection - true .. code-block:: yaml udp-peers: - address: localhost:2223 ^^^^^^^^^^^^^^^^^^^ Websocket Listeners ^^^^^^^^^^^^^^^^^^^ .. list-table:: Websocket Listener :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``allowedpeers`` - Peer node IDs to allow via this connection - No default value. - list of string * - ``bindaddr`` - Local address to bind to - 0.0.0.0 - string * - ``cost`` - Connection cost (weight) - 1.0 - float64 * - ``nodecost`` - Per-node costs - No default value. - float64 * - ``path`` - URI path to the websocket server - \/ - string * - ``port`` - Local TCP port to run http server on (required) - No default value. - int * - ``tls`` - Name of TLS server configuration - No default value. - string .. code-block:: yaml ws-listeners: - port: 27198 ^^^^^^^^^^^^^^^ Websocket Peers ^^^^^^^^^^^^^^^ .. list-table:: Websocket Peer (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``address`` - URL to connect to (required) - No default value. - string * - ``allowedpeers`` - Peer node IDs to allow via this connection - No default value. - list of string * - ``cost`` - Connection cost (weight) - 1.0 - float64 * - ``extraheader`` - Sends extra HTTP header on initial connection - No default value. - string * - ``redial`` - Keep redialing on lost connection - true - bool * - ``tls`` - Name of TLS client config - No default value. - string .. code-block:: yaml ws-peers: - address: ws://localhost:27198 ------------------------------------------------------- Configure services that run on top of the Receptor mesh ------------------------------------------------------- ^^^^^^^^^^ IP Routers ^^^^^^^^^^ .. list-table:: IP Router (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``interface`` - Name of the local tun interface - No default value. - string * - ``localnet`` - Local /30 CIDR address (required) - No default value. - string * - ``networkname`` - Name of this network and service. (required) - No default value. - string * - ``routes`` - Comma separated list of CIDR subnets to advertise - No default value. - string .. code-block:: yaml ip-routers: - networkname: hello localnet: abc ^^^^^^^^^^^ TCP Clients ^^^^^^^^^^^ .. list-table:: TCP Client (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value * - ``address`` - Address for outbound TCP connection (required) - No default value. * - ``service`` - Receptor service name to bind to (required) - No default value. * - ``tlsserver`` - Name of TLS server config for the Receptor service - No default value. * - ``tlsclient`` - Name of TLS client config for the TCP connection - No default value. .. code-block:: yaml tcp-clients: - address: localhost:2223 service: foo ^^^^^^^^^^^ TCP Servers ^^^^^^^^^^^ .. list-table:: TCP Server (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``bindaddr`` - Address to bind TCP listener to - 0.0.0.0 - string * - ``port`` - Local TCP port to bind to (required) - No default value. - int * - ``remotenode`` - Receptor node to connect to (required) - No default value. - string * - ``remoteservice`` - Receptor service name to connect to (required) - No default value. - string * - ``tlsserver`` - Name of TLS server config for the TCP listener - No default value. - string * - ``tlsclient`` - Name of TLS client config for the Receptor connection - No default value. - string .. code-block:: yaml tcp-servers: - port: 2223 remotenode: foo remoteservice: foo ^^^^^^^^^^^ UDP Clients ^^^^^^^^^^^ .. list-table:: UDP Client (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``address`` - Address for outbound UDP connection (required) - No default value. - string * - ``service`` - Receptor service name to bind to (required) - No default value. - string .. code-block:: yaml udp-clients: - address: localhost:2223 service: foo ^^^^^^^^^^^ UDP Servers ^^^^^^^^^^^ .. list-table:: UDP Server (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``bindaddr`` - Address to bind UDP listener to - 0.0.0.0 - string * - ``port`` - Local UDP port to bind to (required) - No default value. - int * - ``remotenode`` - Receptor node to connect to (required) - No default value. - string * - ``remoteservice`` - Receptor service name to connect to (required) - No default value. - string .. code-block:: yaml udp-servers: - address: 2223 remotenode: foo remoteservice: foo ^^^^^^^^^^^^^^^^^^^ Unix Socket Clients ^^^^^^^^^^^^^^^^^^^ .. list-table:: Unix Socket Client (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``filename`` - Socket filename, which must already exist (required) - No default value. - string * - ``service`` - Receptor service name to bind to (required) - No default value. - string * - ``tls`` - Name of TLS server config for the Receptor connection - No default value. - string .. code-block:: yaml unix-socket-clients: - filename: /tmp/foo.sock service: foo ^^^^^^^^^^^^^^^^^^^ Unix Socket Servers ^^^^^^^^^^^^^^^^^^^ .. list-table:: Unix Socket Server (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``filename`` - Socket filename, which will be overwritten (required) - No default value. - string * - ``permissions`` - Socket file permissions - 0600 - int * - ``remotenode`` - Receptor node to connect to (required) - No default value. - string * - ``remoteservice`` - Receptor service name to connect to (required) - No default value. - string * - ``tls`` - Name of TLS client config for the Receptor connection - No default value. - string .. code-block:: yaml unix-socket-servers: - filename: /tmp/foo.sock remotenode: foo remoteservice: foo -------------------------------------------- Configure workers that process units of work -------------------------------------------- ^^^^^^^^^^^^^ Work Commands ^^^^^^^^^^^^^ .. list-table:: Work Command (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``allowruntimeparams`` - Allow users to add more parameters - false - bool * - ``command`` - Command to run to process units of work (required) - No default value. - string * - ``params`` - Command-line parameters - No default value. - string * - ``verifysignature`` - Verify a signed work submission - false - bool * - ``worktype`` - Name for this worker type (required) - No default value. - string .. code-block:: yaml work-commands: - command: cat worktype: cat ^^^^^^^^^^^^^^^ Work Kubernetes ^^^^^^^^^^^^^^^ .. list-table:: Work Kubernetes :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``allowruntimeauth`` - Allow passing API parameters at runtime - false - bool * - ``allowruntimecommand`` - Allow specifying image & command at runtime - false - bool * - ``allowruntimeparams`` - Allow adding command parameters at runtime - false - bool * - ``allowruntimepod`` - Allow passing Pod at runtime - false - bool * - ``authmethod`` - One of: kubeconfig, incluster - incluster - string * - ``command`` - Command to run in the container (overrides entrypoint) - No default value. - string * - ``deletepodonrestart`` - On restart, delete the pod if in pending state - true - bool * - ``image`` - Container image to use for the worker pod - No default value. - string * - ``kubeconfig`` - Kubeconfig filename (for authmethod=kubeconfig) - No default value. - string * - ``namespace`` - Kubernetes namespace to create pods in - No default value. - string * - ``params`` - Command-line parameters to pass to the entrypoint - No default value. - string * - ``pod`` - Pod definition filename, in json or yaml format - No default value. - string * - ``streammethod`` - Method for connecting to worker pods: logger or tcp - logger - string * - ``verifysignature`` - Verify a signed work submission - false - bool * - ``worktype`` - Name for this worker type (required) - No default value. - string .. code-block:: yaml work-kubernetes: - worktype: cat ^^^^^^^^^^^ Work Python ^^^^^^^^^^^ .. list-table:: Work Python [DEPRECATION WARNING] This option is not currently being used. This feature will be removed from receptor in a future release :header-rows: 1 :widths: auto * - Parameter - Description - Default value * - ``config=`` - Plugin-specific configuration - No default value. * - ``function=`` - Receptor-exported function to call (required) - No default value. * - ``plugin=`` - Python module name of the worker plugin (required) - No default value. * - ``worktype=`` - Name for this worker type (required) - No default value. ^^^^^^^^^^^^ Work Signing ^^^^^^^^^^^^ .. list-table:: Work Signing :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``privatekey`` - Private key to sign work submissions - No default value. - string * - ``tokenexpiration`` - Expiration of the signed json web token, e.g. 3h or 3h30m - No default value. - string .. code-block:: yaml work-signing: privatekey: /tmp/signworkprivate.pem tokenexpiration: 30m ^^^^^^^^^^^^^^^^^ Work Verification ^^^^^^^^^^^^^^^^^ .. list-table:: Work Verification :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``publickey`` - Public key to verify signed work submissions - No default value. - string .. code-block:: yaml work-verification: publickey: /tmp/signworkpublic.pem ----------------------------------------------------- Generate certificates and run a certificate authority ----------------------------------------------------- ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Certificate Authority Initialization ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. list-table:: Certificate Authority Initialization :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``bits`` - Bit length of the encryption keys of the certificate (required) - No default value. - int * - ``commonname`` - Common name to assign to the certificate (required) - No default value. - string * - ``notafter`` - Expiration (NotAfter) date/time, in RFC3339 format - No default value. - string * - ``notbefore`` - Effective (NotBefore) date/time, in RFC3339 format - No default value. - string * - ``outcert`` - File to save the CA certificate to (required) - No default value. - string * - ``outkey`` - File to save the CA private key to (required) - No default value. - string .. code-block:: yaml cert-init: commonname: test CA bits: 2048 outcert: /tmp/certs/ca.crt outkey: /tmp/certs/ca.key ^^^^^^^^^^^^^^^^^^^^^^^^^^^ Create Certificate Requests ^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. list-table:: Create Certificate Request (List item) :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``bits`` - Bit length of the encryption keys of the certificate - No default value. - int * - ``commonname`` - Common name to assign to the certificate (required) - No default value. - string * - ``dnsname`` - DNS names to add to the certificate - No default value. - list of string * - ``inkey`` - Private key to use for the request - No default value. - string * - ``ipaddress`` - IP addresses to add to the certificate - No default value. - list of string * - ``nodeid`` - Receptor node IDs to add to the certificate - No default value. - list of string * - ``outreq`` - File to save the certificate request to (required) - No default value. - string * - ``outkey`` - File to save the private key to (new key will be generated) - No default value. - string .. code-block:: yaml cert-makereqs: - address: localhost:2223 service: foo ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Sign Request and Produce Certificate ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. list-table:: Sign Request and Produce Certificate :header-rows: 1 :widths: auto * - Parameter - Description - Default value - Type * - ``cacert`` - CA certificate PEM filename (required) - No default value. - string * - ``cakey`` - CA private key PEM filename (required) - No default value. - string * - ``notafter`` - Expiration (NotAfter) date/time, in RFC3339 format - No default value. - string * - ``notbefore`` - Effective (NotBefore) date/time, in RFC3339 format - No default value. - string * - ``outcert`` - File to save the signed certificate to (required) - No default value. - string * - ``req`` - Certificate Request PEM filename (required) - No default value. - string * - ``verify`` - If true, do not prompt the user for verification - False - bool .. code-block:: yaml tcp-clients: - address: localhost:2223 service: foo receptor-1.5.3/docs/source/user_guide/connecting_nodes.rst000066400000000000000000000065271475465740700240340ustar00rootroot00000000000000.. _connecting_nodes: Connecting nodes ================ .. contents:: :local: Connect nodes through Receptor backends. TCP, UDP, and websockets are currently supported. For example, you can connect one Receptor node to another using the ``tcp-peers`` and ``tcp-listeners`` configuration options. Similarly you can connect Receptor nodes using the ``ws-peers`` and ``ws-listeners`` configuration options. .. image:: mesh.png :alt: Connected nodes as netceptor peers foo.yml .. code-block:: yaml --- version: 2 node: id: foo log-level: level: Debug tcp-listeners: - port: 2222 bar.yml .. code-block:: yaml --- version: 2 node: id: bar log-level: level: Debug tcp-peers: - address: localhost:2222 fish.yml .. code-block:: yaml --- version: 2 node: id: fish log-level: level: Debug tcp-peers: - address: localhost:2222 If we start the backends for each of these configurations, this will form a three-node mesh. Notice `bar` and `fish` are not directly connected to each other. However, the mesh allows traffic from `bar` to pass through `foo` to reach `fish`, as if `bar` and `fish` were directly connected. From three terminals we can start this example by using the container we provide on quay.io .. code-block:: bash podman run -it --rm --network host --name foo -v${PWD}/foo.yml:/etc/receptor/receptor.conf quay.io/ansible/receptor .. code-block:: bash podman run -it --rm --network host --name bar -v${PWD}/bar.yml:/etc/receptor/receptor.conf quay.io/ansible/receptor .. code-block:: bash podman run -it --rm --network host --name fish -v${PWD}/fish.yml:/etc/receptor/receptor.conf quay.io/ansible/receptor Logs from `fish` shows a successful connection to `bar` via `foo`. .. code-block:: text INFO 2021/07/22 23:04:31 Known Connections: INFO 2021/07/22 23:04:31 fish: foo(1.00) INFO 2021/07/22 23:04:31 foo: bar(1.00) fish(1.00) INFO 2021/07/22 23:04:31 bar: foo(1.00) INFO 2021/07/22 23:04:31 Routing Table: INFO 2021/07/22 23:04:31 foo via foo INFO 2021/07/22 23:04:31 bar via foo Configuring backends -------------------- ``redial`` If set to true, receptor will automatically attempt to redial and restore connections that are lost. ``cost`` User-defined metric that will be used by the mesh routing algorithm. If the mesh were represented by a graph node, then cost would be the length or weight of the edges between nodes. When the routing algorithm determines how to pass network packets from one node to another, it will use this cost to determine an efficient path. ``nodecost`` Cost to a particular node on the mesh, and overrides whatever is set in ``cost``. in foo.yml .. code-block:: yaml tcp-listeners: - port: 2222 cost: 1.0 nodecost: bar: 1.6 fish: 2.0 This means packets sent to `fish` have a cost of 2.0, whereas packets sent to `bar` have a cost of 1.6. If `haz` joined the mesh, it would get a cost of 1.0 since it's not in the nodecost map. The costs on both ends of the connection must match. For example, the ``tcp-peers`` configuration on ``fish`` must have a cost of ``2.0``, otherwise the connection will be refused. in fish.yml .. code-block:: yaml tcp-peers: - address: localhost:2222 cost: 2.0 receptor-1.5.3/docs/source/user_guide/edge.png000066400000000000000000001243701475465740700213720ustar00rootroot00000000000000PNG  IHDRkE=sBIT|dtEXtSoftwaregnome-screenshot>-tEXtCreation TimeFri 06 May 2022 11:08:29 AM EDTYP IDATxwxTUdIOZ %!i`]Z]]PtV(-@ ɤI33L/wZr^`Ν3;{gιsdł   pUAAA3Κ   UHtAAAB&  p5AAA   \߿;;; dRٞ={?,;;ѣGKe_1L3eo^WSSÓO>)͛7||ѢEJz=6l>j(o.]B!'$$Hex{{cggTKll?ORYII ~T(Ϟ=[*6lwFo!͘1$|ٲeJ啕RYBB |޽*V:g>^yf3mmmL$RTaRM7DJJU>(He{G||TsN||gRYVVRْ%K|h4&M$}}GQQT#3U>~G|VǡCΚ3/TVTTdSOVoY*VYl9tR|Iewƍ~*k֬GTTTtR|s=RYee%yyyzz"ɰ֮]+۷H裏2Ri/c0j͞=[z]mm-7IO<nnng|uwwqFR #?~\-vppwޑ;v~Z*+))ᮻ/^LCCTvCxx/_.x$&&J寽 ˥H`ݺuRYBBU>V\i/h4a9sX㩧:g>/^||z|9m۶I֭j;ݻW*KMM;;;ϛg}*wy9vÐ!CoJeӧOѣR?|T*wseJ%cƌחۤ՗Yx1d2zzz苬uʨ044AI\YY8x`ꢨV @`` d2L&ttt!5zzzEhh(`ssј77QXXN;#F ·BIGSS777Fy|8;;*壾~ˇ#cǎQ[[KUUYQQQA}}eGPPRСCqqq򑓓l>#]P>~~Ǹq㰷>(;;; F~yyp.(tvvIdd$pQYYy|? ::ww99pɉsCPzQ'44K{(J^u\.?fԨQڢP(ߢVJKKs-,,jDGG~ltֶZss3P3jxzzPSm":^z2ZZZ: p|V;v,g~Ŵ/W>zj}mqoQJe,X@aay.}vz&VAF&\ٺu+ǏPAl;AAqssC.w(pEm۶jx }v p-:u*QQQҐQA^!=Rp>}N0h"9AAZEGG3m4 ߡ ymܸQz||fMAAzmٲoQB}N0"YA~T*JRL/\w^A賳v iZJAAZ8p/BZAVd>볳tR***.IP p #׋iOA K.I B  7@Pw \iӦ]vb5AWa̙=OOE|C2sgD, p7ߡ y}ᇗffݱd  Ʒ~ˌ3PA@Ç'''>Vw|||DGMA555QUU`PZd *"j.dkΚ sNV^ZP*-- ¯TX233D, 577S]]- W#G\>gT*hK  pi}W >"DFF^vb@{{;?+VPA~qqmߡFL.rL&<ɓ7ouرcwc۶m睵S~Egg'qqq888\X,.{ \nSNb`c#C 1ܥ̮]?׷Cj%&&/F:::s0qDd2y볳vK`X0Lflllf666rEL&d2lm1bA&!00d3}g2/ppp`kzcݯ-rlq<~1>"AL&Wu;;k?ۆݗΒgzm3n[韛bAӡjW6g- [[[F#϶?oJ>X,F-Kw۷?o{zǾw9^2 {ԩSzmz&0͘f/$?ZqwR[[y/?sɰaøٹs'fNnv8q"o6X,^x?2uToͶm۸[eѢEP__χ~ĉ`lڴvw//2eXd }oʕ+1c/Nw&44kP[[K/_j59r QQQLc ׿9sq{w^z'r7~zz=fD.\HDDcƌ'… 駟X|9qqqL:-[f)}G(--`֭,\df߭[b2;geΜ9,\*nDRRgŊժZkk+FJkL&N8AVVwu6mBV~֬Yüy8~8}{/ [[[=CRR?XldϞ=|7԰ez=۷ogdggOn:>|o?>|$$$g}K,aڴi<=zy摔`ǏP(HKKZ[[~o'$==7|׳uVt:رX,԰~zZZZxW Až{}];A.@~~>6mbc IIItvvLT*#cybݻ}=tww;EEEc0HII!((6mڄB 99;wbXݝcrqj$$$;P2J%?Xz!/݁y\h}v^}U***.555tttqFlmmח;&O̤I8y$9ѣG#˹?>P('NeeeHFF9{L2\NPPTWWSEnn.===dggr۩ngddM7݄fbر$&&ֆLXs 555_ūJXXe̜ ¥U[[KiiyAP &蠦777}-ZDss3'N`DGG<СCoppp ..!C eĉ\.ꈋJŭފJرc}ҥKY`tuuBUU:N`qvv>}]&NH]]nnnJCᢢ(((D&L1c8zq6mԙ\x1---Finn3fP__Oii)&Ca2[z 0qaaaۣVIJJd2NCCL61ɉcǒ^'''Nii)mmmT*BCC&--{w(Y`C kggLj#曥3L&EEE^Ah"xqUbɒ%VZK[|%<<vÉb̙hpssKۛiHooospp@&IICWW(JJU nnnv;SK}p :tJ+Bvv6iiiTVV@@@mmm 2WWWJCBBhhdܹ& ׈~cذa̟?uqI ƸqillDoLOpptF;ǀj4 o6 ꋉ9kiidhZ, XBCCzLUU`6&??c2?~<۷o:ksח3qDX`رݻwc41LT* dbܸqD\\?7|3g^F4 999T*-<<.7GGGƌڵkʢ#GLee%J!CtgoHH_[[3FxuVy駯َ>zAݝ <͛7T*IMM%!!C+`ooOgg'===8::뱵p!ɣ>ҥK^OOrWU/oΜ9۷`BCC !<<}!ˉ ;;;zzz0í{L{М5kG%99ӧYAf( ,}/׋+Wwse7bbbx嗉&P9}_e2>>>o>;v,qA&9[~N^Ohh(...۷#GuMNsl_So۶F?OpIV/8,X^7|CCCE %%傎>j*BBB.IPbaϞ=޽'bccCqq1F"22T*ٴia0[/z5kk֬aժU >****AP\TB '==͛7Ç3`<… qttDPPYYŅ3gRYYɗ_~QTXL&Ϗ o~z^xAXVVC v-hllc 6 ^Okk+SN%<|j!XAb~7Dsw\< •dɒ Zr[o%M)  \]~38_^^^?~#GEOuDA_!CH /Y?23f0cƌEDy=' \LqwwPA.4u1bĈkn:LAAAѬYعsgyg-..NtAARSN%;;"dff^Н5 g֭[FPJNN2賳rJjkkD, VPA἞xK3uҥK:k Uh4b0.hX L4 77CtK3uVl6_AAKqYC&  /w/Ģǂp1t9 r߾}DGG_AAKKӉQPp1 }nggMA8bӃ^"j/\8E<&\,YJ0Ab~AKIՒÇ/{ [~_XXȱclu?3w ¯T%SRRJ#ptvvR__-r9xxx`ccVvr9T*ٰaNNN >>TUUQWWǼyX`...l߾[Յ...,\XϾ}puu%44O?jaĉ<#sNlB[[8::pBfϞMuu5< x{{ЀVe̙xӃsGccc\.;廊swwo6lĉ <^OSS%KZ> ??Ʉ ʕ+IOOrN0s5jNNN8991}t֭[w}dddIww7דFݲTNd2L6 '''t:&//ݻw}݇%ʦ)S0j(/KLe}d2sM h2,i`Rz:sa;F}AH 8ϡ}v^~ \Kz;)66rL&Z ɓ'.R=gtL&]]]rS#GֆlFTJ=KpttƆ3h4aoo/4 uƸվ@CC2 <$$h:;;@ѐ# 3b"""=.]_͖-[pvvߟncǞ7_bȐ!kd&* +1[,|{ Lf E lO,6L& 3)腍XU_+V}?,(A;μRl6Յ̌rGGG 4lwww 3ߥ;g(..`0&թ鱝>o'okkUWooo }?===Qggg&N믿.پd0ff4662b1 e[ĎB:p!(TB9Gߕ\af) gf3酵 5m8a}Wrz뭷xϘTHg}ƲeϬ 'Nh4uVZ-z;w'`0dRPF#ǏGV'|BQQرv陯+VH.rrr(//<, AAAҗwGGZ8f3@ӡofdeeICCIIIL0ٳgɓ'顼x9;ÇTt:լ^Tzzzx|M*++5jz+C  JÇu}?ԥ!1:u'N2ݘˤA&l6:ɑ rF.j '1.1kd***; A.>޽ & ¯MDDO>$?#رcdơCذa'Ndԩ$$$p t:>>>P(Xx1 ''j5G&h2Xn4440ydƌd"44͛7;xGٹs''Nޞ?BkM 1p@ɡ¢Ed̛7l{=<==1 3rH馛?vh4b4:^.ᅬ+& ^]wuYPPPСCv62=rcDc$00duW]58H:ld4wv)A7tH,}\f7nk֬a̘1,8AV_9n<==iii!==zlllcرc0(--ɓCgg'999j&44TeddPWWAÃbynƏOSSDGGdɓBLL zt% 8h4$%%1n8κ˗/sb61 0n80͔KWW `С1[[[i\qttdذa :GGGRRR0L8\b\:;;#((2 {gR_Q೮6ڡ C^ASk1ϗ]AAA 2K׶4 E͔8Q?`Xkcwj n1ҋ/3aNF[W7hh3-Io 5mmme6k٧~*=+µaӦM]5qJAաj¯n#{RJolgI`&& Wg{~~cl6m:\l0/89^80є"+CM6| t#ODau'FseL`l?v*}=Xd cƌvAzhxfMAUeҥڡ+iAYHE]+2ܜ5!gm62&DHGghݱoEݪcp'suM6wwu]828LhH+ :!~Lf=UnC_?fyhmmh4w \e˖-; `ƌZhd 4CQSSo&O̦MDVgHBB:RIuu5t vݝ***X,|p |tttζmfΝt:)))Arq ޞLʰX,lذ6oL[[z!Cc(--ETb2HNNÇc6ϧW_}EDD[nEVc2'::[Eee%'NΎ#GMJJ Z20L|w( vEcc#W&..oNILLŅIKKfHǶmJ4if!!!^OVV555j2331ܹ|Yɓ'[ck.)tuuIHHHёt|Ʋi&tVjd"%%n)yyya6/ƍGZZ$&&Mww7eee$''ʎ;0Ȉ#x뭷ɓdeeW_}ŀ8q֒N#%%GGG줺DYbLDD+VחtJJJX,ڵ RSSPZZJKK xyy_cggGII GaԨQ[A~~>sq)))!//qssc۶mL&9r$88Lpwwg8;;Z"ё{j"11!Cɓ'IKKcРAlذAG]]jzzz8vr#Ghѣj*|||P*P(Xz5R>ǩdž puu%??#G0b>C)L&vލ ǎHFjFxS\'gG|(73|ttt_Rņ8V `?o,#22˗ N/55&u;v?ێDYa6~l޼;;;T*Ǐ'n`?GAAvvvl۶ {skTS*GTM^K2(vϏlqrrĉj8y$Zؽ{7ZJ=СCy___㥾&iqôP[[ѣG 7(J;FHH}QUUEgg'd2l&55:xgݺuφ x|a4ٳgnPTr䫯">̨Qxw###*vvv8q)((d%***J)Jk.zU>}]$##___6n܈9akkˡCP(O̚5k|άzL&ǎbp1hhh ))֮];yyy$&&2l0V\R>݋L&⻐|=_}s](>Y顏b"HOOBiiiȑ#X,L:::ˣb˥755qIZZZ()):zU]E^^QRRBCC466Mss3eeeVINNh4MEEVZT*455QXXHCC444P^^.cCEEX!''jEPPP@}}=yyyPYYIII ?~\ڟrHLLh4JښJ%S__Oaa!UUUTWWKqa, J*̤]Gg^WWGQQdff"lhhh8#R>z:=HVVǏͥJ%k466Ccc#*JDZcǬ[z?<)91NVVTWWdôR^^NAADqq"`08z(mmmT*t?~ZMIItIdzZ&??Jjjjɑ:jJR#jRillDTZEEET*jkk͕.r477SYY)wRRʒꨯ25222hhhHLL2uz?ú:T*|VP*R>zCJ壠|Jhii)T*t棩IGo]DSSdeeIΕ*Tum}AC})S(bp :;;1]L)ETnr`4S6#g455'H/`c9|ec:;{{N8uT:^zkެŬ\CCs]%*M=k|---gK[XXH}}=T*9x=z֒'եh;MNN:^ߟ/x[QQOddtRZZJss3R7s-//233x(//DL멪";;.G~~>mmmgh4huvuuIb3Gnny|өVhhh:),,Osrrtz?Cc>*7Fޥ%Y!v CB ' rY6 $0bb*ɲdO3y`p${d[hV^m׾#>y|vQ__ٴicr>N u~}f3-Y IDAT`"5544'PSSCeeeq>^/5kƍtuux ~Eg3iGL7] zY^ (a j,B>d0W_=G|-&^|/n_  d{ͧ$p.̎=\Gy1yyCo`xX\ BQ~o} Ñ(bEQ+}_'++kXB$B Lrrrte{Pz~]~r337O9e W^ cmvnf{Cp|̴#Xp$:p8̟'?kX`$F,0ӀݢMz}!!=-><ϠWcJuWef#bjhh  &; xOVEE,N.`X#1b0@}m~Z5Ź|UZy*#E8{d*˲}&t*Ae~U}.?,0Ъዷ'";9tQOMeP(FG}z-S˲eDBԵyEqpDct+;p8{;\oK8R-!ٱc/^x!vZq.b"/E! QTAlFSt=oL:꿽lh4R˱I3Q[CQ{^GcgpbŊAڳ>Kwwh4%#"y<bb bՃ_{\:96SKQ㇋^`z]}~XWFTQAMM yyy%;ʨb8@1>`:7H©63Ts1XNtf6]~ Z Ȳ<&t7X,/~6FD"(rx JkJ)5{;B cqzp';!eH7bJ_uU$:Ls.z\.WqRp8CތRKrWO1-rylyS qIZưI8#+]Ge缁Pxu F9\Lgs 2?7dsoPFa<Ocs O{)ĒɌ3!Hy8NL"EslraP]OA*|!vPg`h[85ӋxIEvb l61{QυFEG)h57j}SQ,a^9s搑(b-YD>q{ᢋ.J8rX|`$33Sŋp8`r:B!筝n6lK(hkd^3x ֺ.6wa֫ =rm=X[8I&bbEH$N j6c?2.tzP=9o@Ӫg&/ǎzS(.fqf˗/qcD2%j&MAA FT}vzzzb'Q_짔f`@FVN勀ER " ju(b@ y(/'7H X,"n D XLmC&Y8G c8B0ggC~fM)ŚLo"=+سghdcXkk+W%Q8)555bm,BFoN0LvVPjNO`fVGQzEPuW:VGk0tclv~LzfUQ>O>Ox\Utb3HZEDI~fF&QT~Sl1~=c:t(1xdž4 $aZFeeL)Fpq:-/2p^Wpd#f6(ȶը Ec j:͡πȠlLf3n¦7 4-*a#:Am`8xTDz/PP(JBZ\f3׿p$; ,j&;\pGRVHyx\efΜnt/tv"uXIKKv5^NJiAl y6;I714tVA^fV"Qt, Hgs3YyX-OG>w _8F$g\V]R5u؍1fMJ'(a7wk6- &oyBODG{p!&ܹs'Ԟ5bpر<ʒGp8BҺ%N4L@Mϼ:Qmeddpn :?tSZ?S tG6PUEh D(ꚻٸ< gP3Gq6&I7PuvѪWP%_ᖃ%˪c^eQ?\s:67 (KǤϞӥ+xzj,YBqqqaefϞOxv{] qZ!R+BGGvtgfs# u\4eh[uGh1ghyo;K94zf%;Mx^oydَw,Zd8ݲtT~B]G ãaP [Q /CѠ=/VCMGVV ϶:(xN#'[ \E 5֭㩧+dɒ%Ɏ# wC<N":lL/$7](#Qՠ3(=(BZ]J'RY롽ߏGN‘9A(j-328=pDqfsH EJg>:p(EPYd%9i~dnn./2 .Lv1̙ó>ˢEE Ni/̙3/E[r%===\}dee%;F=\wuu 8SJ__/ロdG9_ҥKCʕ+Ab&i,Gq;# Cxg+6N[N?Edq*V RBdee(Zf5bO7irszZ>$sY|T d.<uL틐Z7+V`)p}?eҤIɎ"RS¶mۆdeb]̈́B[[lwߝ}-]^"H?} 8:_IYIöl`#\vAz!--r*iלaokae<  K'31# aMNB!ƼkO6v5d;8JR |YO)|֔``8{xkCB NRܪB&*HmW^yf$<onXB !bdTdQ;t{ NC-Ƞ28L/E!?=STepju1v xdBoH݋eBq)SL&s92ҌWX*B'PEq‘^w643(B8fRUlA/Ԋs<#p8B!>! lqƜ2BomϔB+&X,}=ou! M.L+E:,0ҹLǠX{ jBak_ZNB!<ه٤sXLz*m^E9F"tF<0ל3 3#كQRaXdfiGU'b\/$ ̷lٲa %Bn䌹a+ ͒ijw )̶2crZȮXtj4-n7p q)ۺ_W_%m.fM!8G /Ln4TEډj.O'H,0kr!v`T@(¡/^qJ`0`SI6_sT3O>)'!c͛yw&>k#͉ߔd# ["ިR_^7?O>!w]v;ijjPB!H |!M+շ `1YWQX[sw:пuS;ibmp %;8XbH{]&\vAB!q T9r1 M[w75Xv4rfJl4r=H{e[!RPbIk%!7L8&###QRZJJ 2H71]Sx>G;)$7Nڻ=yحQ)&dGB?G#BqӓaPŹdYAШUٸ74^O{;tXB/yOf'33s؂ !bSThj5ڳURaӸdI:^H4b@{ >{|ureM0Xf Ɏ"f G6n܈,B! [z5O<}}}Ɏڻ=ޗ۰?3N)SL+P+ v[;XidD !D*3gΐNXXB6ZB1A4M|Pn7& 3c~u.^ZsHO3uuZ _C^v w dB+V cbmո\a %B'Wq tpy(GYdf2Vl.ox\ {W bN?tꄥYB1>,]ٳgcۓ%娀ilP GckeY.%|r.;2Ȳqh:䢢$q{dWr'? ţE!8a555y晤%;JQUge鐋5-O3H7Oܹ_@֭ !&?CX(,,PbQO>|;|dDQxnFzd"m6z\d9I NC^AK!'kg;1EQV^=~/r=gϞc~W?(%# '<.aN'S+EQhoo7 Hm~~ IDATdʹ!:=1-_Yǵ6X*J(ʶRm%ukf8p 1/X`L6 z B[.aQ'RCOOQY*ʯH.zzz袋6mZQSWWǭJ~~~㤜ɹ\f2,F 3iw*e-حn-_CC`01nz{{={6 ,@QLpbIxW=gyyJ=΅aVZ7^/gq7t\.^~eV\IGGp ,]O~󩯯gӦMlذ!?֯_M7݄o+6k֬\.;g}W^y[83NW-ݴ H3OE!vMOѠ0#Bw{wo&|L27r-p)l6-[wïYf \s5\tE<żCpJ].~;lڴ?A^{5~i;LG̘A5xFjgP;Ut˨Jv, 1ގ Ayy9?IKKrj⪫BC]]---̙3'?) /^, @SUUFbPXXH04{1^yzzzD"w`0 is?!FC0СCn垙Inn.| {tp8lT* %98/FQ97C49I3 ? ,`<9R?i7~|k_cժU|k_~\uUdff`0p8{Fj5P( Xn .L`8m̜9s؂IR Xh<ì]w444~Oo B1/u^򻜢T*j ja=];=ie٠o #X &yGYn]tVbƌY1r.!O['}c4477h",XJb׮]8N\Ęe4),,χ(twwANNUUUvq\(B0^:Ip8|Ĉ8`(JGoF=ϢR͏^wBs0X:G @.#'d2vdtJYYz+o<Ɏ&$4HIZZ̅EsaioofΜ9̝;466yf"o6~72!ƚ >l>}QC7)™g믿Nee%f7o|C~Sʕ+[n!777qlj5^{-:{ Myy9<3fH"N@[[ vc|jdXY'::](I/&Wؼ1(X,n$FJJJ>cXQ.v;W\q7x#^#FQVV61v,_|Ha x+Xj5K.eҥ\tE;w.s=-XG\r%#T!>b^bᢋ.~hE#`0`2d=(@,GQSNiAƈWr]>: gVwn?*ŚbyW8IXL!cF5hL.Τ m}l{6'ڝQv6v+9 ٺu+2,D ikk;g$?uTL"B$륯X,(Db#Z,-gV1Z!B;5 IZ|9Hv !5kB!ƅu²e˰Ɏ3! .o7MN ̪CVQhT(džVZ7x;#1#`Ho&F#BqBm93XeHv'͇\49it㥪 r`&É H$]tY̟^lrfn>Ͷqdff&;b$6)w_$gG}'w0,sTqүiY:(̔,23ddDm';8ݻ)SRnbGr؂ !#!+++ƼH4ƧuZJiأgOc7-.ΫbpѠ%(@ !Đz뭼tcX7oްB!DDqۜL)`(ʞnu 2\8 ubŋs3k֬dGB ͛mYB1.ZV-[&wW;Dbz*K7"|;Zv#DqEQhO8 w‘ XGQ2Bz>#nwc!F@3'F#BqúuɎ2&b :PUdX ҨRAg^Hv !HX˅O!ĘW[[ˬYlɎ2lwݍ]v9J XjL+BU hG Վ\'rjul6cƌdGBNrrrNLx)..`B!H`Μ9ChtZw<,J3xI.YL/!~g o}@\:o ! ?-7oV %B!FO8y N5-Us\\xbtZpEQ muDa}ꗿ%Lv !qxᇉd͚Bqa2o</-.*`feIVQhQh'1Z!(Z#xXzFJdZk"˗/dGB #ko&}}}E!8a֭㥗^r%;ʘ`O7>|C,X-AFdwx{'o|T>‘Tjpyu{"B+_ !4iJxĝwISSӰB!F㡷wHJ&mqj*Ol pzj}Z_3v@(:-řxaJ6SNB;&T lقPB!=ΘF$#( EK0j`.v6vQ΅gL#/T-9|^w7Dza|dB!'kM<'Hv BS۷֖"dG8)^z)g}6999Ɏ2f:h,qDZb699 V-uu{PTD"]i禣RpB4rR]qrƻO?=NJOOf9QD ٱceeeɎ1k4ihdb>lc9>IFF,(卝Yfzdn7* ՚(c(C~7M:ޖ'~Z5&2 dauz1Ҏ=ʧRHO3Pgc[kXff&j^{-QƜh4Jkk+ 2\.WJ?p*JQIh4iNe&;Ƙ{n~a=\:d EEEɎ@SSӘK&}Ǟ={я~̙3ih4n1E!?'ԃl+mjr._՛uXX]I9<\tEzLv1i\s5lٲV& jbSߧz5}&O6FQrss͕ `T*NGiiiJQL$20W_}?n50)))IxKRa4ag ϼ4.KJH3⓵|rN9唔,HKKKv1iÆ  L(b!7"DʲL&Ɏ#q||'ٳ͛oI]]]carJdGIQ}!7ǧ?-͸}dN'hN"444PYY"k>h41,##4zzzX)i͚5<^xlْTb8B!X n\j56p$F[{[;OvD!F]CCӦMKv 1Bچ4 $aIML6 ?a[! ˖-[I '1NjZjP6+[h*pNWO/o=^2e$[ŤKv1ޓ5㌢(RsCpN>/''N)DJQu裏ʕ+K4ij OuL:͖(IUM}kNoeKjpNüBfOɧ`-m4z~1Pkk+̘1#QFC_:YYYJZZZiF"en6mDCCÀB!Ԕdb͟?s=woP܃/,?&*/iLsp橓HO3'+SOQVV&DZW^yeH '|SK(jjjatww'; )–-[_Rxؾ},B4!psiS(xaڝtK6;vC v^z%.2_m曇t!~Đl6-ZĮ]hjjbs\]6*++Yd xbt<䓴';8I===D%iZ;?Q {Ec wr#kxy\P#IgfɎ!(߄ڊ+d=8l̛̙7"kĘmc=Ygu&>ٳo~v>.'Z0#d`< OF8~l&fըEb#^*;;N:f`0s=sΑ)܊+hOUOX=쳲>Ij9sq:Z כHBUnn.K,.c2(//Nĸ|H'M!ƲyU_[ɂ" }޿?soOFfriTg{G%;8 H5kְzj.2L"S ǹe˖ 3nb\Zx1 &OLEE6m"''G~ljHKKKv˴i!k׮W_zDj32/ߍo=Iڸq#_>#%<]uU&sK.suV>&ƴ`0ȇ~HSS~:^x!EEEYddҥ\}dGI:V3)¦M̤uPURq999\wuZ w}7eeeJ? 33!$;G‡~HYY'??e˖/-8~)'O.Q&իWt:C;wVk4iL bp6!1r>S~|>k֬:<̙ٳyF|d@IGG7 ^z%8W_}5@w}7ّ&˗K3o>~n7\s hBihiiRj)I?>ygX|$1EQxbfZk'`!ŭ^'xdG׶nʝw 7׾5~_qdG6l؀Iv 1Dn7x￟x<΍7Ȃ dpD *avWgϞ"R\o~V^7, 1!1/'3NroH$o[v1) bljlld7o.\W\AFF\r 'O)B$ϣ>Jyy9Yx1&)({dž4X{'bhկr}u]ǖ-[IL`h7_-̙3=bpw~z)؄8z͛7s]w) IDATJ뮻.?%;_Ñ k׮;vK/k јx" .!Og?ٱ(--lb***b޼y|'<䓔2ydg{?f̚5 ^`ƌ裏R\\ɓWzvf͚%OGOCѮ_VMnn.+Wٳg (&33SSQ}}}<<8nN;4l6\gDB _K.Q$RYvv6sΥ^ X_~;kJ% KdE@vR֧֭ShNvZPu\Tk L("!֐@_B~>77{9 6 00Mfqơ}ww^Ȝt///ёZQnn.V^ DM6aĉ;ˆ#SN̙3 K3ѣضm233d,_~~~t}kLM4$puu4!t:jkk_+xD"sG[n믿Ƒ#G?>bqZrbϞ=8rV^:$ұc'`ܸqXn:mjbݸw0g/Tt:ۇÇ ,?___'LXX! ;}!KV =;#Gbڵ6wjdill?OlݺFš5k0o4(++3!F54 nܸ7իx'j*;5%%%a֭q^{5[@:ػw/<Ә5kAƽ{b :KW^_;w 66˗/Lj#NiZܹs1b^hfHMMŴi̝ʠ$p$&&=}Y 6 i]X#رcرc0o<,]G(NNFT|w8qbccoxm}}=<B*b…Xh""" lݺׯ_#F;~AB?ׯ#..˖-D"#G#F_Č3`gg :}@BϟZ/tjx"N<j< ^|E >i~`˖-xaii6քB!RRRh6矑7#<*@ h4T*̙38q_p,Z˖-{!]܌L$%%9r$͛ :BVVV6jjVV!ɐ_EEEa=i=Ǐǁ(DGGcԨQ 2d{O<6n܈0s2 qJBW_~Ajj*8̜9„t5ֈ٨T*deeıc兙3g">> } BZJvN:">>QQQHkOqq1N8$p(L8pvvfac\2w:}ZFKK  R)222wO>$Ogg^I"==gΜO?oooL0l[[[B&NARzSPP/^RDDDbcc www:N!=hc-((DŽ z+'2#55OFQQѣGcСlmmKqjhnnFss3 ***pEFܹs1{lx{{\z+._ \???"$$ptt=FzNRB`oƍ7;w <<1111c͝2z+<==1f;:t(lmm>n* J-Gnn.P[[GATTΊt D?++ AAAt105W^˗qe "NNN4n?؈466B&3RUUUpvvԩS裏bptt4w&+**BVV^K.A  ot{r_8Css3ԄZ塰R%%%hhh@XX&Oh7iKӡiiiHOOG^^ #F 88pttdM;ۤR)]&PP(hjj\.ǽ{P\\"wذaN 0ӧOӧi~ҿI$\rW\oRaÆg 7~2d!: MMMhjjBcc#p5梼2 رcz;p-˃5<<<www 6 ^^^pttdcGGG~ZZZqlmmFaL&CQQPVVJ 88aaa APPP hnnFAA 7ovvva#F@$Ύo˗/dT Fer͐娪;wPRR*TUU8"((aaa3f MBzMON&\v ʂD"qpss xyypqq %r G}}= JQ\\Ԡ1vXo577`)--N+9 _`yŋBll,̝Ni46t?/uuuFccAtKK 0j(8 4=AѰ? ?owЭ{@mS'''#&&fP\O>(RqAY{Gv5ڋۄBaCu[ß%?ITBRy9Tfޅ,|vmc3_JVn_7+uoPqz FW҂_bm5u{zMȃXb5BH0;;K#T}(F9ggϾ)1BrJ[0ZTL}!BtTVVbDN^3Pg$d 3f޽{QUUmB!=!++ gϞ\.7w*BH/^lh(xw!H%)B!+2?N6l0gWuJRTnIB!tCiBۤ<9$$$`ĉڻvBaa!V^ ??n}l۶ .]{oUVARܩҫzbFA \!ߑ娮FkkkvCCjjj؍{Z}}=***zo'''b BHmxvKRB1 !_ Tj4!]pi6X{hBH8þ}aÆag8/'''`ƍlZmm-vڅP :Cjju:8\pp<#pssCHH?SN!!!֭üy3f… ۶mCXXb1y455t:̜9k׮ɓիWݻ9'Ni쌠 lffq8}46mڄ۷cʔ)dXt).\Td߇Z@ ۷Ԅ;wʕ+>|8ߏr?~;wĦM JtRݻgaaKKKdggΝqٳ2 YYYx7/gΜAFF}]pWbXnݻ͛7ٳسg8ҰpB8pXnyTTT ;;* ;w:͛7wڠ#BgK.=pB2Pbcܸq󑓓&ܹs@UU7o+O/2^z%xxx_C-##~!|MP(QUUP3fƍxxx`ڴi9r$D"BBBBgbرxakks"&&SAAA0arssQSSC!$$(//NÒ%KZɓQ]] ڵk(++"""0uTxzzPT}w{< o<==XҷTo*lBy@C58eee/PPPVpb=---hhh1t7oF@@L¾ QQQ̆:88ɉŬhq0tP{) @TBRXtk]ӧOgqww= k׮7|$3QQQ[ɤ2=z!!!N6hG!8DZj5lmm!m6ddd`߾}Fmm--[Dkk+t:,,,؍UUU9s&qq>>(**Bqq1 ٽ{&M«?Oػw/8DFF 999PP*FzzIKUUUh4(//7N={.Z[[Q__"(((D"FAkk+222oH$(((FA}}=222p֭v/¹sq̙{ ,s:/<<?0#B*mc}ݒ! T:RDVV!'O")) =z4!J1c dgg/D@@\zꩧp-lٲ9s&cΜ9qヨ(9?cPAvГ|2t:QQQe˖A,c8w[L:!99'OƔ)S0sLbΝBUU-T]v!** FF{0gΜ,%B~ K}ZF>IB\ooo#??EEE6m/^ A.#++ Xh&MJX[[#66(,,D"APP,Y444prrBxx8 `DFF999d4i?---@xx8pvvƸqܹsܹsD#W^Eee%&Md2toP2iB`Æ &Mht/nIBH ggg;BH| |}}͝! ~agtP(DJJ %1B!'deeA&aʔ)ppp0w:B3: 7{#B!侍?3gΤ!>/0i~=k555DF/~#B!СCxG!͝ !DqƃO0"F!ϫT*Z6w*ףiB@&HkBHpI|7dN^u]; BH0X{衇B!%%%ԳF!KKK3iѩ_B-IB!o>; BH<5B!?8q"F'''sBHZ`萎h͎!>/&&:]M'##SLE3:u?!BT*Bу>BH!QVV!Ct<:H!d@PTP*jN^ekk .; BH0XF\!-[ܩB!D&=hcw߅D"y!TVVwܩҫzRIDATaiii4!]~z&2XQQQ-IB!|r; BH<&MEB|}}`cccTU;v0w BSB.\h!tyyy񁭭m&H!B\vv6`oooT!&:u*Ҍ'@`jBr9qS!W_EEEN0vt5B!BFFhTUNB]] BȀTrsB!tj&rhcoJJJ%)B!ҽ^y 6iB_7FSB  :k2puu5w`g-,, vvv !rߦMEA$;Bz' b4!]g#FDHH-9B!Bp?22jB!}TLL rrr̝! h~B!ӧ{n֚;BzUzz:ݲhcmǎ(++\! ( sB!t^螩~m#,, YYYl!!1n8>|ŒGFT*o7n੧b/b9sܹsk*J/a8bSNEFF޽,bnbۃ}vKKKìYX/DSSB, l{ii)^|E{+V0ݚ5kXGEqq1mٲFmPǎ3($;rAy$'']kk+>rH8{,&Nׯ<8( 5kAy|qE[z5Y\n޼ɶGjj*X|׮]X OOOܹm/))1(+W/bӧOT*e>q5;z(͛7.]2( &ѣG >;%KX|Æ y|ǡjxwXlcuV511~~~ C3gW_}eP|swwDzevT^x޳>kP^z%п͛Yl̘1q?~<9b?z!?vgŋYwEuu5q6ԡmy̙3ǠcSN!::Xz5^PP˗kfAL8k,444Z->#7n۽{7M8gΜa{",,ϟ?b%%%lQqFt͛7o6~'dkjjvZ[x1 Y\x۷Ylǎdy,bP{@(NZf'Xmߣh}kSѰ*cO3S6_~ʣ*Yvo2h4fz{1Uo;U{wF{T;+q0GaycF!B!#B!QcB!B jB!BHD5B!B部F!B!}5!B!!B!AX#B!>kB!KC $IENDB`receptor-1.5.3/docs/source/user_guide/edge_networks.rst000066400000000000000000000045531475465740700233520ustar00rootroot00000000000000Support for Edge Networks ========================= Recptor out-of-the-box has the ability to support complicated networking environments including edge networks. .. contents:: :local: Consider the following environment: .. image:: edge.png :alt: Network diagram with netceptor peers to edge network Configurable-Items ------------------- Receptor encapsulates the concepts of `below-the-mesh` and `above-the-mesh` connections. Please refer to :doc:`tls` for a better understanding of these networking layers. If a particular node in a network has higher than normal latency, we allow the users to define a finely-grained idle connection timeout value for any given Receptor node. This will help Receptor keep `below-the-mesh` tcp connections alive. Receptor will monitor backend connections for traffic and will timeout any connection that hasn't seen traffic for a period of time. Once the connection is dropped, a new connection is formed automatically. If a connection timeout occurs, the users can expect to see a message like this in their receptor logs. .. code-block:: text DEBUG 2022/04/07 12:48:56 Sending initial connection message ERROR 2022/04/07 12:48:56 Backend sending error read tcp 10.26.5 0.239:27199->10.102.21.131:35024: i/o **timeout** To circumvent this scenario from happening, users can leverage the `maxidleconnectiontimeout` parameter in their configuration files. `maxidleconnectiontimeout` A user-defined parameter in the configuration file that will set the `below-the-mesh` tcp connection timeout. The configuration files for the diagram above are listed below. foo.yml .. code-block:: yaml --- version: 2 node: id: foo maxidleconnectiontimeout: 60s log-level: level: Debug tcp-listeners: - port: 2222 bar.yml .. code-block:: yaml --- node: id: bar maxidleconnectiontimeout: 60s log-level: level: Debug tcp-peers: - address: localhost:2222 fish.yml .. code-block:: yaml --- node: id: fish maxidleconnectiontimeout: 60s log-level: level: Debug tcp-peers: - address: localhost:2222 *Note* - All Receptor nodes in the mesh must define a `maxidleconnectiontimeout` value, if this value is consumed on ANY node. The effective `maxidleconnectiontimeout` value is the minumum value between all the nodes in the mesh. receptor-1.5.3/docs/source/user_guide/firewall.rst000066400000000000000000000017211475465740700223110ustar00rootroot00000000000000.. _firewall_rules: Firewall Rules ============== Receptor has the ability to accept, drop, or reject traffic based on any combination of the following: - ``FromNode`` - ``ToNode`` - ``FromService`` - ``ToService`` Firewall rules are added under the ``node`` entry in a Receptor configuration file: .. code-block:: yaml # Accepts everything node: firewallrules: - action: "accept" .. code-block:: yaml # Drops traffic from `foo` to `bar`'s control service node: firewallrules: - action: "drop" fromnode: "foo" tonode: "bar" toservice: "control" .. code-block:: yaml # Rejects traffic originating from nodes like abcb, adfb, etc node: firewallrules: - action: "reject" fromnode: "/a.*b/" .. code-block:: yaml # Rejects traffic destined for nodes like abcb, AdfB, etc node: firewallrules: - action: "reject" tonode: "/(?i)a.*b/" receptor-1.5.3/docs/source/user_guide/index.rst000066400000000000000000000025331475465740700216150ustar00rootroot00000000000000******************* User guide ******************* This guide describes how to use receptor in multiple environments and uses the following terms: .. glossary:: backend A type of connection that receptor nodes can pass traffic over. Current backends include TCP, UDP and websockets. backend peers A node connected to another through receptor backends. control node A node running the receptor control service. control service A built-in service that usually runs under the name `control`. Used to report status and to launch and monitor work. netceptor The component of receptor that handles all networking functionality. netceptor peers A receptor node directly connected to another receptor node. node A single running instance of receptor. node ID An arbitrary string identifying a single node, analogous to an IP address. receptor The receptor application taken as a whole, which typically runs as a daemon. receptorctl A user-facing command line used to interact with receptor, typically over a Unix domain socket. workceptor The component of receptor that handles work units. .. toctree:: :maxdepth: 2 basic_usage configuration_options connecting_nodes edge_networks firewall interacting_with_nodes k8s tls workceptor receptor-1.5.3/docs/source/user_guide/interacting_with_nodes.rst000066400000000000000000000136021475465740700252370ustar00rootroot00000000000000 .. _interacting_with_nodes: Interacting with nodes ====================== .. contents:: :local: The ``control-service`` allows the user to issue commands like "status" or "work submit" to a receptor node. foo.yml .. code-block:: yaml --- version: 2 node: id: foo log-level: level: debug tcp-listeners: - port: 2222 control-services: - service: control filename: /tmp/foo.sock bar.yml .. code-block:: yaml --- version: 2 node: id: bar log-level: level: debug tcp-peers: - address: localhost:2222 control-services: - service: control If ``filename`` is set, receptor will create a unix domain socket. Use receptorctl to interact with the running receptor node via this domain socket (using "--socket"). The control service on `bar` does not have a ``filename`` set, but can be connected to using the "connect" command, as shown in the :ref:`connect_to_csv` section. The "status" command will display helpful information about mesh, including known connections, routing tables, control services, and work types. .. code-block:: console $ receptorctl --socket /tmp/foo.sock status Node ID: foo Version: 0.9.8.dev57-0.git20210722.4d0310f System CPU Count: 8 System Memory MiB: 15876 Connection Cost bar 1 Known Node Known Connections bar {'foo': 1} foo {'bar': 1} Route Via bar bar Node Service Type Last Seen Tags Work Types foo control Stream 2021-07-22 23:29:34 - - bar control Stream 2021-07-22 23:32:35 - - ReceptorControl ---------------- For a more programmatic way to interact with receptor nodes, use the ReceptorControl python class. .. code-block:: python from receptorctl import ReceptorControl r = ReceptorControl("/tmp/foo.sock") r.simple_command("work list") .. _connect_to_csv: Connect to control service --------------------------- Use the "connect" command to connect to any receptor control service running on the mesh. From here, issue a series of commands and examine the output, without disconnecting. .. code-block:: console $ receptorctl --socket /tmp/foo.sock connect bar control Receptor Control, node bar This will result in a bridged connection between the local domain socket on `foo`, and the control service listener from `bar`. One can also connect to the locally running control service in a similar manner .. code-block:: console $ receptorctl --socket /tmp/foo.sock connect localhost control Receptor Control, node foo "localhost" is a special keyword that tells receptor to connect to its own control-service. "localhost" can be used in all other control service commands that expect a node ID. Once connected to a control service, one can issue commands like "status" or "work list" and get JSON-formatted responses back. Keep in mind that a "work submit" command will require a payload. Type out the payload contents and press ctrl-D to send the EOF signal. The socket will then close and work will begin. See :ref:`user_guide/workceptor:workceptor` for more on submitting work via receptor. .. _control_service_commands: Control service commands -------------------------- A ``control-service`` can accept commands in two formats; a space-delimited string or JSON. In some cases, JSON accepts arguments that are not supported in the string format and are marked with `json-only` in the table below. String example: .. code-block:: console work submit bar echoint JSON example: .. code-block:: json { "command":"work", "subcommand":"submit", "node":"bar", "worktype":"echoint" } For 2-word commands like ``work submit`` the first word is the "command", and the second word is the "subcommand" The order of the parameters (from left to right) in the following table matter, as they are the order expected when issuing commands in string format. .. list-table:: :widths: 15 25 50 :header-rows: 1 * - command - required parameters - optional parameters * - status - - * - reload - - * - ping - target - * - traceroute - target - * - work list - - unitid * - work submit - node, worktype - tlsclient (`json-only`), ttl (`json-only`) * - work cancel - unitid - * - work release - unitid - * - work force-release - unitid - * - work results - unitid, startpos - The above table does not apply the receptorctl command-line tool. For the exact usage of the various receptorctl commands, type ``receptorctl --help``, or to see the help for a specific command, ``receptorctl work submit --help``. Reload ------- In general, changes to a receptor configuration file do not take effect until the receptor process is restarted. Any action items related to receptor backend connections can be reloaded, without a receptor restart. These include: .. code-block:: text tcp-peers tcp-listeners ws-peers ws-listeners udp-peers udp-listeners local-only Changes can include modifying, adding, or removing these items from the configuration file. After saving the configuration file to disk, connect to a control service and issue a ``reload`` command for the new changes to take effect. .. code-block:: console receptorctl --socket /tmp/foo.sock reload This command will cancel all running backend connections and sessions, re-parse the configuration file, and start the backends once more. This allows users to add or remove backend connections without disrupting ongoing receptor operations. For example, sending payloads or getting work results will only momentarily pause after a reload and will resume once the connections are reestablished. receptor-1.5.3/docs/source/user_guide/k8s.rst000066400000000000000000000060701475465740700212130ustar00rootroot00000000000000Kubernetes work =============== .. contents:: :local: Workceptor has the ability to launch Kubernetes pods to perform work. foo.yml .. code-block:: yaml --- version: 2 node: id: foo log-level: level: Debug tcp-listeners: port: 2222 control-services: - service: control filename: /tmp/foo.sock work-kubernetes: - worktype: kubeit authmethod: kubeconfig allowruntimeauth: true allowruntimepod: true allowruntimeparams: true kubeitpod.yml .. note:: You might need to set ``tcp-listeners``, ``tcp-peers``, or ``local-only`` before you can start the control service. See https://github.com/ansible/receptor/issues/518 .. code-block:: yaml apiVersion: v1 kind: Pod metadata: generateName: myapp-pod- labels: app: myapp spec: containers: - name: worker image: busybox command: ['sh', '-c', 'echo The Pod is running && sleep 6 && exit 0'] restartPolicy: Never Note: at least one of the containers in the pod spec must be named "worker". This is the container that stdin is passed into, and that stdout is retrieved from. First, we need the receptor control service running in order to be able to start a kubernetes work unit. .. code-block:: sh $ receptor -c foo.yml DEBUG 2022/01/17 10:05:56 Listening on TCP [::]:2222 INFO 2022/01/17 10:05:56 Running control service control INFO 2022/01/17 10:05:56 Initialization complete Now we can submit a kubernetes work unit. .. code-block:: bash $ receptorctl --socket /tmp/foo.sock work submit kubeit --param secret_kube_config=@$HOME/.kube/config --param secret_kube_pod=@kubeitpod.yml --no-payload Result: Job Started Unit ID: FfpQ4zk2 ``secret_kube_config`` The contents of kubeconfig file. The "@" tells receptorctl to read in a file name and pass the contents on. ``secret_kube_pod`` The contents of a pod definition. The "@" tells receptorctl to read in a file name and pass the contents on. Runtime params --------------- Additional parameters can be passed in when issuing a "work submit" command, using "--param" in receptorctl. These params must have the correct ``allowruntime*`` fields specified in the ``work-kubernetes`` definition. .. list-table:: :widths: 25 50 25 :header-rows: 1 * - param - description - permission * - kube_image - container image to use - allowruntimecommand * - kube_command - command container should run - allowruntimecommand * - kube_params - parameters to pass into kube_command - allowruntimeparams * - kube_namespace - kubernetes namespace to use - allowruntimeauth * - secret_kube_config - kubeconfig to authenticate with - allowruntimeauth * - secret_kube_pod - pod definition - allowruntimepod * - pod_pending_timeout - allowed duration for pod to be Pending - allowruntimeparams ``pod_pending_timeout`` is provided as a string, for example 1h20m30s or 30m10s. receptor-1.5.3/docs/source/user_guide/mesh.png000066400000000000000000000563501475465740700214240ustar00rootroot00000000000000PNG  IHDREaoiCCPicc(u;KA?%D"jbB"BPKM|5%le7A`c!X6 (?W#a$2{?̹̜GXײVk W7Ifs 3Qz?z$-aCp%PF**NWSLC8Z fV;7[\/s::F$`7O^< JHo@ԢtMJMOrvŶ?]9)8*W%/w⺮rzHNT ϡcZͪ#D7neoֶgI pHYs  ~ IDATx^Sֆ ĮHQTT Q/" ^^"Bbǎ(4EE *w?$'9)z<''{;Y{ZS/b$" " " " " "P@Kl-" " " " " MQpĪ@D@D@D@D@D@@ H(8bU " " " " " Cs@D@D@D@D@Dx*9 " " " " "PpR< XJ`" uL@Gz%:vhgۛE @X)Ѷ" " " B` .0|} @Qn#@;wYkW_}eXc2&G@>7-ƍ]'m1(WR-A."PڬNu䁀<@T" " " " " /@"PnzO?c=֬Jfȑ'4?Ymn5h AI&|̚5࿱zr!^zaoE-2vy%~3/G9uQfu-[R?(+6IԶCɯ-Zv_vޱy(K.]L^}ՔiIkƦ]r%p B76Uh "o„ .[l >]p'*=)Ze(?~B{x iӦ}ݗ*?]W;jҟhV >-\0lذ*OU`*F)|M|VQ 6lsuYE3eV ƌSV:#"%LȪ\()c눀W<`e >}zлwpwI'%E}2/_曁}*]PqO L]?/ا\7k-,H[o w؞G}4l'|aAѱNBPgK}~C{S`- gws\}vGǵȳZk}_]> 'Xc[o l cko}2VX-ě7oܝ~igHM }8Ү6m;?C߾}Cv"nR$ ţGMmW}}m֥tbMS۹s'g^x!LgǶihhv$>@ N,]#;2Aq؝*6huU ¸UW lXIσf֧#!݉JۗT g"Q]}}!!Dѽk8ymJd/"h1W+=أH_5" " "Px=qEuM7V7Ub#Ui$o OSD NUmԨQʺh7lPlg: K}>mt(%+Oqmcp;vG1)(wr{5qDcijJyD@D@Dh H(ڡQj@th}_. Z̺8W$<,h1FeWS E?fG(R: _|E9RSgo,D$?^gƍ?΍Ϝg~DD@D@ʉ|+ X.W78? {6GhoٰnqǿdžP6s/vա" " "PxmF6Q.؈DF' IO(h<ݎLo0AOW̟ y OĆ+pۨX >*nPؐƆu8=3l7p:=mH*(f׿;v3F\>N;ڠA=L+09|HiDž-o(@\jTOӽbFCFϤn]_~e#F~BBgo* ds0N(O>)3˴AXgEDCr:]og9۽О?>sOX~ PI{0hwEkQքVr^Ԁg4DU:y{ ?m(C8  ]tSAgp 'GtHgdMEpHD@J/65U<(3.c.ݻwT`p?Q1u. <0~,@)۟v%(Qq ƍe92х:g>8Y=z"g$ t~30PZ8_=(?vg!3fT)6:qU7ߑ'U1v8pS+՘1cRˎGDǔ|(o H@G{nE @/Nis:N؝BzߊTȸE9>-w];w|Pk[S!c\[hM'^$2iO!$]p /f_o`z7<Ȅq|g\qjoٲeF{3)[iDݤ%L%XNxh/O n^Pʭ?<j)6"j'V)@xPE@D@D@D@D@⑞RYt㑎@ ѱSmjy|S˭?%?  3ذF ,"  %(*D矍 ]TRcD@Rݪ!" " "Pr:.4DDtH(RKE@D@D@@&MYK! ţtJ-oy7CDH(RSE@D@D@K`뭷6SN" ţKM/Ν;̝;(̛7p[dS~\ ̞{i.=J_-~핈&}w*PD@D@DXc ӷo_ӯ_BWQ 64ӦM3͛7(}4Q֭k2뮻޽y뭷dO!Æ sQ>lsGN8|׮.6S~|yiȑ#M6m\YwmN=T祎J.DG%." " %LK/53g4{6gϮӞԯ_.A5jo+ן[nŰjO<˥? 4\p.0J _ޕ|Jm/Tk`'ӹvVZbXt><"_}U{9݈v ~Wv:$dg|6]vp̹K.1{#8tᄀ:;gukQnLǎ:&S$?aeA 剝 8!M-/r}9"O?th~ѝ%SIuL^׾h1¼˛aKTm Qӱ:e`&9Z՜rmO o-IY4S \:~ƗG`N_|̜R 齙Uum”(*2va)ԇ“,q R4MuL3i䶤+?1_̍r0}ɧ`Ģ x4qҢE wȏ?^ lHTAT؅O?9z#?gr RxaGsBPv՗-v'IGtL6iU>mԳNj>9|T9" " " FE_R.@8G5"g8a'rßy݉Y 7 Boȡjƌcn*,~ى쉝0&Md.\(U7!KC;IDq]6BaׂW]us**GBs=9ve (qAr-cΆ2HS׫Y!" " "PRX`PO6 ʵDc=N;d6d p^Ţxs8?/Jucs] Bgi( Qaw=~pċS%YXh\#\q㜓7QtW8C:>v3]aqi݄<+0s>_"[ѷTG:&ϜSnTECWv4{R>:*;Ք%I|K]vr ^E!F 0>};a<7p]hBMA`ϔsUJv`#6o(r&[,tQAe^8q;'w\“qJ'IDg ~ h# MNmaJ>&E#pJ}7%*>H#?*KD@JvHH/|L+QtȪ_ #3/C3g"P,djU,#v@ 0%@<" "H@GҌpD};4yD p`VT8J""_ɿ?43D@D Lmthv+WjN@;5g" " "@˖-EGD@D B@GtH6J:J&@T٠'Hc*i6hV&CCtQ!ЎG欔RD hJ@j?4;D@D VED 5EJCD@' S43D[D 5"怾ꊼЎG)$V" "H@GҌPT+""Ziv@jj!" Umthvb>*P;D sȜR$PM HM@f$4#J&V"Ziv@djfh+W&CCtO]W"Ps9;ժ#yt|DQQ1TFPTrY+*3/c'VJ SamQK5H (ҁQŋF_~*UM" 9ЎGUT.EܱWQMSfUWiv#R<+UDU[ժ$I#jU{?ӤIګP5䅀L`y^ )S?t`խxf̙fذaD%䝀v`%r%PV7t ootbf̘aƏӧ6mڄ;tPs'Y0No5O?{]o߾az~d"gtAfWt+b%bݬY3sךǜtIf2۷wM/̎;h+E&Md `z!s9%˄ ~甙v>^:u;%SuE2"=ufz)ꪫ=,f֬YvS\~ nKۚgy|h@y_}F)hA^z饰i- vمRX}nI ]7n|V *f}ZXU[Jq|`*?_@Fr}^٧!y#GL(?\sMwx$ S|oahd_jKu_g_tuցH3fw*#Çwٝv$f:K,D0w!aR:DE T]?+J; `V[c9&ۘ,bR5"K{b-oT>`3eʔE?aÆ{ ".bz('xp>Al&^8s8,>S7+AH\䗈@x\:$R~9s>5~w15ȿ˥GOc=fqwDe]wuDv&taz%r$PG-bnvϞ=;G\Y{ϥKq3%"PS\l't|@typE~cM ΡNpiwgϞVyC2Bvb(W\;/pÙT|#\?H(i"/ozYU*׬χK.O,~yU=&NsB^c=֙&@灔lPD@#ՙ^""ZtBC8p`:.%O+^W8ya<>=ezPԁ%@d+^%:Wwu\x[nb٨" "%PV$yQ2oslҁgi@&V|Il_1H< {쁪C 9æ:%bਣ" "` TɂN<}mٲ췿LLMlr!Dk% `˜] +N8l Nwϣj۶;o!W[[r27gD' !/bHGMJFOS:u?;@v389٬q<@mLz34w^#!zdBSiD@*@Yx DtMݩ.۷~CBt8 ڃ\+N4ggPx[U~ OJ#c>x뭷I|N\ NV&L(KHZλol;3qDHD\p"YV{SD~7Pڇ ` 2@Z l',xb >.. awx sӧ&LpQKXptꫯdrSEJ'9yƎknv盱F9㎫B!(+3:]Qv2y Zptԩ@Nqv{&%`RaLS"#m@/7~Ş[_n=ƷGH!P>Ń[-$ ţ2]Z% ţVq2LR<*skURXwuZnm b9s)"PHR_W_}%ţ&;KZr-P)1$Zk-_/*Gʂ,(aU'H+M4c**@>}}'8" %D]<ȸ?fYfh"3w\*А"P,7o*Ԃg}f6mZJUyF9~?$'KQӅGD /#3o6/U:nv=J}T|\q'b-̻[{&HHy -eB ۑ%z 9x7\>@I6`aO""Px'-cԨQ1j@nNKСC]{<Ȓjw)7v5tV DW^,X~DqQ3C}x7MM K^m5%" O@ߌ)ƈ/21f̘IP D iUWu "(|[2809蠃t=sfڴiXȅjȑ#݁i4޷gϞ毿2 6tx|uzTP=~ۣGE;Sرc-֨D@r! ţzmi B)Vx.LyK1 OᡁsCi>:H/J#pw"Zun}WD@2&_; IJݺusOrQ<?p"G|?s P68ãqrrڊV4ہDD@D 5xz2 'Yg$M`ܸqPM T2.̌?펟p B}xA+ATW\q1bDZJ H4of5xuf9 )S~\sY~3ɦ4" "Pxd0;w6\pKٷo_ π[n){キ_sTB^w}|+ +" Y!2Z8 jw0@yЮGyzQ3]t19MkVr@ŀO4mϚ5KYpS'ڵQcdOO7?;dnȾ % #ˁ\aÆO>9J.G`رδ N"NW_}o]UD@D xduszs衇fY@c馛~pfX5,RDh[a~0gy9쳫mygN;4|䗈@9`3gMvڕCuRo7|cj+/-Zq4(nRSQ.KD }6|}c=f4hPWD@D Hz3pQ~0Qc-*JG={:'>qfʔ)9Gk#PR>PD|_7|.zR^}Us5׸'Ήx%uNSv!x)Tƍ >\pfܹT%-"u@Ȗ|WhlvСCE)}0`.U@xdK,ƏNѯ}:dY@n(+hw%B[(W~o9ҭ[BUrE ={=.;Қ4RQuD@GW"PCR쳄qJ;$*T R('G ޿{;{k׮TvHNO)@ H(0B"oW_4k,|XR; hkq)E($'|NŎ\ +"RJf&a.矇/h*}ݴ^#LLO9{c&)e62UF| UD6m*$9y͢D:bV0(~!_G٨݌,LήJ Z__`Al>.:/_X| ?%UIR23VDNJѠ@}LL{eN<0e]L{W^2#!q|'V'>+Μ9&>9Ouv >!;uM/,(۷o?C@YLu"iWF,x8NbꪫFxqW\qŒB鯿^{#Y`NXU0{E+KG/,,WBRh8T5C? IÆ E2{VJ2|SwEv2)Bk'fFqw[o5#F0w\N6l[l馛\P N8aQ"M曆(_5j"h;0sכ;Çפ )<%uРAUZzAI&@D:tA~Ÿ9"^{!*s=4is(n~gĐQMH΂!jd2d%X"AIarF W}եԿM.\XEEXxr u]7})c(^i|-YXdOޣp.Y9iРA0#DTQSo@JPc9FA&8WܑGwŃcơ.OPguVDIp g5~Pg7?Y/ bMw[Ar4băSTuy&TWΏcpP?xwKLg^fNFxdCV-8 O0b<sW)q9YPXRm䇘Wuw&+$s[/RFJ߲K/~TEݍY֌ #}IDAT+ ->(݂c=-nGfY(<5FQd/-r'(Se]ݻà 5B)]wVZvUfrx+‚cCis`F8v{otGd#<4=z9 uiGpb4*A:(v" uM13i<ӄYn۶ϓN:<Κ+$˙g-l O ⒦AM=P\L:)D,= YW05 |!6~:X]t1=XQ>}\~8ѳgO{K%u-PX_6R<*,WL6GT)yd". ݐT">:7GrQL'ygZkJ7I K QE"PʲeqwE"YpfP3H,Y;"kq8| N>ds!8d8?1&T,49D\T0͘PwT0b wP 0AH6cDP{aNaMw՜s9EfO= sμ e ]?'|R>X0т 0_}SjP<>D1s.Ts)?sX3;jT@Db0as+FygXv2;(IV2a`O]๲B0f+avY`́ A uW`wBba4%]IFvýg66zYMUrEj` i`PQ"ȵcq9u暦K.fʔ)Ֆc=4k,d;{Z֋-2Æ s}8̗8O?5wk+hk/,;j(7k+ݺu3cǎjkUD@D@DXņD*]  :uE{RK/{kf͚UO<ol>X0L?NO`˿;: ~*͙={v[':kF0q*6mX)}|;1w}r[nr 'dGD@]K""Px& ,o~sʂyV]uU@q6T`w8ܵ=#O,X.,O>dxm>c?.^A>NCB=O=T w}wаaCW x+B ~`Ի}NAƷz+!/W ̛7{W(>/c6w\W/cuI'݂<႞|~`0f̘P`=PHΝ;:Xo&.k6;DrciӦF3@xdL9D. HKV DK/Z/< 5c@ڵsiPl6m& = }o)¢t,Vf_ǚo3τ}{úA|p&(S.E]^sߦ>?櫩fUա|Һu밯0|C1Y1#CHPܬY(S첋KxkUZs>3Ψr6l2hܸqpM7꽠D\H(T?*|<@RY8i|O@nA0VaH`5Vp/eα@ ]z5ǟ_?Ӝy>QR#փqBb.Q 6{a*~X& ~by믿ۅLUpfUa`ͳgvgw$ 0N5Kcvw6Vy'$\o'E"" " N@UhԩS5} 5i51-vyygk&O(5q48Aꫯ6,9-80IN,X`Qly8#֏":q:}l/vĽE)YeUb\~5;v͋ȢrA`π A*(R |^To:y,o9ߣtlFgtI l J! ţRFZ K%,(F,8'L`;{JBϪ.%BTb}\ b7T)wtĚAUI(L|יEB[,')b~}: 8e]rHae` bs=͕W^iإ.T^l&ƚhgJW]b! ţXFB5Τ^%oD,vmg3Ŋ HHO/)\p=uh?&v JWuyC{ڶJn'ƈ:y@^B"ȘK7:UPo*9]x`̴0ѱ ;((PL[HD@D@D H(Ub = 9,uѣ3.Y<&/d1As)v] S_"ކu O~cóFיD /sy_ָ 0AqS@`ǐ3 ψ#bY5F730bw†q'~{QcCuü[@syQkR8ʵÞǛp @-( |(b2c)VBݞ)`~DuTS3uhJF:rfw] w[Os663mB<4n|m\砏Vw!c2m0 `(~~pE+à C 7b% ţXGF*qs=.=PW?;{8F<@Wc=fqB,>\a EʼnwǴ(gRBw^PhyUvp(~W8*6,/9JQJ`3̞?>smb[~8vi:h;GuhȐ! Ηg?6LKOZ!tfo*n-)5 iw|I{`_>r{{ vG$o0B_݅Hϟb#"rB={nx9܎Ӵ}=i͔ZSCqeEtBg!,>5=@C=s5ʕ,cabQd&o 8Az*{UNe><=0w28q7U}>-AE@j@^{@i1NjeاՁ=#Njk.Xeck}b')ל͂9ɛyEOYgSNݝpYТX*٧TY;⮳0[32w(.ҭW`M P's ]*IPXо}P1OaÂosך*}b6Wxr: m'In aoNHƞO=:,w (svdGD@fOEDDԣyBQa"PBˇWMtnN'<r7]>3)cKt;G<^ǔ qgZ6u"Wa3F0˅}ċq&p~:ҕ"Piw/"DD (nZX@kMwfDzˤ,1(TnB(95e bKL@.wJj 9H" " "Paжm 뵺+K@G鎝Z." " K^fX2PEԪFL2&)3HG̥tiu]D8ȹ8AȐ#:6`9LDhǣFAmHK>0F26,;vlJ "P\Q\ֈ@E0` cK.ƞd?ƞSќy(E2*QSE@D@Dp ä@QQãƉ@yUyz!" " " " EM@GQ'" " " " A@Gyz!" " " " EM@GQ'" " " " A@Gyz!" " " " EM@GQ'" " " " A@Gyz!" " " " EM@GQ'" " " " AbTqDIENDB`receptor-1.5.3/docs/source/user_guide/remote.png000066400000000000000000001260161475465740700217600ustar00rootroot00000000000000PNG  IHDR1 ӖsRGB IDATx^ MTK)IIҤy%Bs4HD E)Ri҄JIH(MBD^翟xp>u].ZVX”D@D@D@D@D I|恲p" " " " y# 7*HD@D@D@D@S}@D@D@D@D o$>ZH|䍀gP Oj$" " " " > Y P\,,L@:᪛ƃp#xUmpL6d6-S)fn *ydsgy/Yd "9C 1lbn,7O%@C웹A$>3gD@3}@M 橿dhȲ}37g H|Ƽh<, Yvo̜rϘM617pe!N 3T" >&r_ 4dɾD3sA$>c4YnK,;7sH|fP9g&8S2Аe'fn*ydsgy/Yd "9C 1lbn,7O%@C웹A$>3gD@3}@M 橿dhȲ}37g H|Ƽh<, Yvo̜rϘM617pe!N 3T" >&r_ 4dɾD3sA$>c4YnK,;7sH|fP9g&8S2Аe'fn*ydsgy/Yd "9C 1lbn,7O%@C웹A$>3gD s޼yvEѣ?{ڶm+"F]"Q/l믿ZժUSin=EHaIQ=mfv!عkw\$mJGվdWO E3E&ߺ<)*%:ըQ~G{뭷o^T~f&_$>x|g~8`TJ[xT*@T+{@g}%MuyR3?UJ Du0Zwumɒ%_ħ@8J||r\rC'4I#4*=('UV3ϬҠ;.B9n9rV|yiNnݺنn 0~m_l׷w71}ٶ:DE;JG~^znv믭bŊv[mZJF!Cɓ[sO}HL7t]veu٦LuQQoJ|"O$ Dm0z>n͖-[ oz͚5j׮<7f={n6n8dMJꫯxRټ3xV#9s>qDW^ U2FT^̙1x3?M#H|e6j֬i\stA .QoAQg,D@T-7oh6lO.$C[liou+i…;]ABpO?s=g|lUFtL*OYbY'x{'rVoXƍKdzsmƾRR% )mO@F1EѾa,6>$,!Ʉ`Lnb>tM]l"L9 R￿ oFv0!⋑qT91pK >w≉U=2ϟK|$ٺuU,1i$_'}vYjy_-ϰYD$FeG}ԗY:gitZb/1|WVn]k߾{H{KhӇ~”e<&7jR(@|;֎8UL XL|/װNb/\@|6jWOh߰ ETH`T$ MԩS^MڑGiLZvڭbCV[m?'&4Xjߨ[=_9}taV1ɮꛃ6+eziY $2Σh߰ ETH`TDx"@f:L{ b8O_"lWz)?F}5b '_Y(Jw6կ_ߗ<D B,TF5EѾac-6>$,yW\F.ǰI&֬Y3~uObGzk;룔j(1S]_7 ASd#"}~ƤwygibDS^[$>R91(N6,9b^x{bUb8sѢE7QL{I{e]w*pD q yF-EվQF= b>9˗cXz'{VI5[lǀJ|ƥWg܊@'ՉOn|ܹsm7_ɪlBjذpF{1;3?x^&$<1{֡CQ1LA̧c3hg8٬N|s{獍w6drYV-=!,s`5s4 5j)7%Ht駟Ir6pUP~q| >ܿ$>3og #Vw3kgf8(N6k|Jinw6 1ֱٰc2KbaBrG4쳏_QMQoTyGQ/*T5Y64hM6^21qK~R7(N6mZ/ƫ؟Vw}3g D)%G@MrĻ'ȾW3sA4ļh<, Yvo̜rϘM617pe!N 3T" >&r_ 4dɾD3sA$>c4YnK,;7sH|fP9g&8S2Аe'fn*ydsgy/Yd "9C 1lbn,7O%@C웹A$>3gD@3}@M 橿dhȲ}37g H|Ƽh<, Yvo̜rϘM617pe!N 3T" >&r_ 4dɾD3sA$>c4YnK,;7sH|fP9g&8S2Аe'fn*ydsgy/Yd "9C 1lbn,7O%@C웹A$>3gD@3}@M 橿dhȲ}37g H|Ƽh<, Yvo̜rϘM617pe!N 3T" >&r_ 4dɾD3sA$>c4YnK,;7sH|fP9OxXbEeƃ mF23gf}>}]`G@zC:$>ӡwD h)b"PutH|CM@dSWE@S} $>QY@1,&k"fC! 5#EL@M_My>@@3 b*|dg*G5XWhD@R"&%\zXbM@A͛I| 2xdOU"P;" )d.=,& Y$>sV@< h]*HƃtOhI Xxkq9CE 4ӮjC@A:ħ@J4٤K@ h-[fݺu3N-M6͛glAVM()Afl2WE f4̠yjNsm;ӎ?xQGefͲ/?ydm믿{g+Wf͚٭joC=+3g$>k#F &fm~:vyX: :tQ?~pB>|x;UVaÆyYv?餓<߷z˶n;ۼ~ '|n&;ud_}U^Cj֬i?hn-^_}8pua34nf_y睭֢E :瞳 *'l~C|s1駟/l[lr-O.^xϧAn]vyxP/ZN{mvyժU]_lx?FmTR=Skd3~O?g6-ϞuYZv`l1l%D 4ƹhaM6.Nfx+f[|m6x`{g_=w_SO==6_J|wq ޽{'xqw\rOy,QDw}.ZqBD4yz.}Qɔ=z6iFc>{_ez>_yk׮K>Žv|.h'|b=E(7o x=XÆ kۅer\N8#>}-YąGy6c 7߸H}g<9#wߵۅ {n1ĠqGp9/ifa?_2o!i1)v <|M\g& U13h[ԇ~<\l.t"$?+"?]` ~R6 !8Cᅤ^Ė-[E2 $?D NڈX _VL0Ŷ$ Ն2 CM6CE t4$P Ba./x-YG`ԨQýox!>6~g 6 d;#E"+x*d'"gϞ%CH㹣^x b5jZI,;vlJefw,׭[wg,#IxŠey/^z%9AD|~G$Β;"y7zxR|7pKnf ]}g%I95HM,,}tg$ͤI<,CYxaIx"#DMڈ@! h<($薝H?ߗxYţi.]ܣ[,3#!bHYg %<īGLeOFMDxIOn VJx U +,IZ5b:D||`>_xXR<$G:uOlNll('$iӦG/k kIuC3\&pCBxPH-;-w8H$x\G#KшJYF?c}ٝ 5eOk"THNrz"f#;xd/RGDXE z"|,9\O6PgDp!iـl"!^bD$>no M]'xI,D0lgRO6?z@J\v'\/&p)}q:d6 ewްgEC<ԋ-X0/]7 |( F}FBHvn }@|RWꁧ.u3g畮ċȒ1/ b*1!a4z@=C-ƍs [<DklApDsq9<,q#>D`ٺuk1FڷoH(rӡw273X.gwRdSWE@S} $>A1yd">Y'dbfBdU/$>sWY@hA97\~;9<8Y i֬z~{鮻ĆmFAOn2F3ϓN:&L`oۼLs:WRoIzL ԧOϣҙh={'p?lcgD2% S~AgpWL>.v{W_"8{x9,IsD(k+ɇ湪{t"/:oAΚ5oBvi~k74mԯ޽}ժU+(D B8oǑU\8e!LѣG[> ٲeKLA!>!$.Q{'z=]t˹gΜ"Uo\8~O%>kgL% ,)=H C=ep8 qFbJLwx#IÇw!ύ1= }wg}ws:/H-g:uoYx6Yf ?: IDATw٤xXxBIϷƍzĊ@ժU6+Ӫ1"5YCYTe]|.H̝vɅ` ΘX>3b)e.]/Rboj׮K7x-YĞx≒Ϗ;8z]|֭[ח/䒤dɾG%ż+۰aÌLԵkW>}zlժ-\0D1zMGTG( pzY b>W'>9KqadٝCl J\ve]QF.>9r-=3Hl`QF; :ݟ}Y0 Ϟ=$/)S3(n(YKu,(J н۽w"8ِ7~?pC2UW]K#gMMGCY^Ovœ7axV)]ğxqK*TgTO dH|&CIϔ&Z2\`o:B<" 9.x 쬳AvNB~r11|XV1cظql7jB:u䛏(F. A*&ljtD|SQ#"laB@@A:H|Ff&W"*BeTF32REE 4لƃ0X!zuTc((M6ůE T4gdL@8h T aB;ٛwt?w\j֚}Y&˺rz@$&MpzMbH@A &I|yM#8/k@;V)8_m|ZkqR/ρnAʍNJ"olM\@x h<m\3&UO}kK_#Bq+=ʙ/=?m 7L7;'id6:(# v&K ">m6|pl_N{mvs楗^7 URŎ?x;K<~/ q <x+-se]fSLjժٹ;ڗ/_D1o< z熤5kx ZvnƎkp 0F|rs7p/j?իWϯL6D{&cҥe^^B&6/+xOn#8^xhgy͘1&L`+V[.o߾?SO=*W"%sҤIwߕ:D-Wb"Z۴iE.F/6l]y~O|pqYR%;餓쯿'|oofⓛد*Wp6OD3y|Rf>D4pxNVxۡBe_՗$gꫯ)/ "9y 7b q&hYeC… jժFp9ruQ9w#Sp4b$2R|#~C@MZ-x6B,Yڵzg}lĉ.fΜ?֭/_裏As>gH$7t#͟?7n삍{ts7駟n͛7w/*^DAzl h"~p*a5jp.g,ST'q%޽{{{%>KElh5YrC@An=׬O$<,{7,.>ETv^~}O|f׮]m.HHs,E~Wԅe gϞ=RiܣIlx]|uQn])Oqg>M6Z(x,)=H 3A磏>KoaM4qH<$ٳgdG$5B QFCJ&b/q;GqI9l ;{l/\9M)L3Q!&*R=E 4qK($q.֭[rQ|@ׯ 7xKR6^Uvmvw TkٲٱcGkԨo bԩSGecHfC!ƍ˙$nM]IĹI6iTs6&ZT|"|r:^G :u䛅6`a͚5shÆ }ēƣqVD%gtwΝ=r#D+qxQ~߿{^xWQK.g7<;=X֖ʺhMll"Δ A"/lEZ@ h<XÂO6nis!MCX%d*E@@A:FR|r&mr|^InWM6RD 4DJc$'Ē=J" #&Uƃ[(+ ' JD lgD8h<(;g&D 4j@@A h OuhI Xxkq9CE 4ӮjC@A:ħ@J4٤K@ h "M6)"kbmޜ5N3ghē&xUthD@R"&%\zXbM@A͛I| 2xdOU"P;" )d.=,& Y$>sV@< h]*HƃtOhI Xxkq9CE 4ӮjC@A:ħ@J4٤K@ hxrmܸq6`{WvSD+K,'w_xG=#]tR?iԨ{f>l6mg&W"2BfTG3"f5͚5 7VXao>3D(7x>:eَ;^Jy]VVmX̑#G 6?|/_=߷o_|\p2˾ 짟~+ҪWnSL{W3"ż4٤L@ h ?b5YB_~/7ϒ… }D]wuu/_nwyv 'K'Nʕ+$>YVs+#FΝ;{pTɦP""BdUE3BVUˊ$e%ƍۋ/h?~6jԨ'Mds}%;{N? ,O<ٗn{_e*Tp%ޓz֭[7svVOH/M6q["GhܷI3CWBidwy)Hl%G<{キ/tM.DKJ~]=ڗٴTvmߴ2=f̘a'tǏ"2S;?;:UVׯqiυ=4ԬsЙ$Rd)s"Sr7K|ִoX9 oH׀\YFU2_|aÆ  _s۝MGs=#l2§E V4ʜykgPdz;o?(>)7lQ@h<3d\ySOmlѢUnNhѢ<G}7Νs׼R4hTK2$>gӬ~wپ{p tQ٩S'm\\뮻>xeA/ߠ㏻D*E&hI|x+C3~6z/^]v'o^RF,X|gv1ǸUM6Ѱj) WglM41cl}C;찒iӦ~/|۶m,׫OS?~pB>|x;kK yI'yomvR@8jq}9'J|W?mƆ_h:tX48*-8/FzB owvO6~/[l_4Y'Qe]f/ӠANPÃzQ:uQbIvu׹ Uf]v/^}U;6ڨ,x^{m%\'jժ_7/l|t@}bL駟 3ϞuYZvOa$xBh H|8@'JΕEl}~+O{a NqxlJ*.t{.2O<; SN9ߧ8!"<[o=~>dGԎ=c4im]1CB_2=kCdxaG;>sjO>"7on p|߆ 5\cw} rʹh%$mӧO%K0>#\Ҿ3y=$_3=fa&!$oFS~(’ᄏ <80p+"?] !𢖕N.>I5PHɈOMB#}x!e˖9osLB$Ed">QG7H6"kP.b8:Pg')،p-/N8? w%񨴏s|iv衇x [Ν_~7c+yMӥ-Aσ|?^/1K.)S\|ؖDe |LOg@ {;P`גepF5 O3H,a.3 6 d0YfX:!(G֭3O$xcaŲlh e K3x>~q,w( /, 0_d$>x=$_3=fa&!$PrR4!˴xX:ҥ{ԂxefD5D !C K,ûǒCx,񉧎e 3ΰ=zdM'ˆO# jQ v!37%>easO%o<;sĒ%aGXu=BȟxЩS,XvG|bsbcDx<'M6=xY_XO7?< +ϰZF(dw8H$x\G#KшJYF?c}ٝ 5eOk"THNrz D,#V/$z7@|" |4WⓍ7$<DG6``85O3Wi(Xv6!w,Q&N*H\hAJAg >HDhlO)I_L S1GQu <r=m@xbgO!ƃk'J@3QD  1ed3qDjܸ/ okWNJ7,xO,E ,]7 |( F}FBHvn }@|RWꁧ.u3g畮ċȒ1/ b*1!a4z@=C-ƍs [<DklApDsq9<,q#>D`ٺuk1F#.h߾{ xfG:%(>p9x./`K< 9S<ƗbmCy> +ϰZF(dR:K,o#dK\6+ziZ<|k0DQ/BFxx)ͲzWG,qC+ewv㵣elZ.lB_q%Mb Yԕ6q'm&:㛽( /%#x\pAb0 6",O(%A 38gRgx.>9+ȯZ?H~M3yOg>>l< h<]s*\V"3Qlq# ظ 8'99CT7q;WaH]8 ۊ8\)oe]fSLjժٹ9ۏǼyԇHϡDa<ewn)lر~ <7ⓛ~8s6ի'uM]{~3Ã[h$x gm@Dq dDy6c 0aX9 niڴ_݉6ΚDđܸĭD:D-#Z5Qn6l[orhD\rI'd:Փ9|7\XErb5+?ǒ[}RfȃIIJx +gm@Dq 'A${ɹ{!ɲ]tw^}UQD@R"ɦ]vCy?{r3gu͗k>*9YrG(3ɲ:#͟?7nmٲe~7l~5o+(𔲜SO0ecO&E3#5jHe]3ͩ[8S޽gJ]]'A lAU#! # "%alXF(7ؿ?óLe,#RͲ{Kr<]vӧ`#! YF̱իW_E|R̓D={t+KӦMs&yg7vוSFݺuu~橊Oqg~Wװ#X5,N"6(N6>KoaM4qH<$ٳgp!nOD!B(ql!%z h}w#bD:AEd/,|N}%>z18Og#&x"ݺuk_N5job;oiXƫ.mۮ">;ڠA܃J~-[38;vh5A첟:uAlLb2l("`ܸq9ĭ"+8W69is~% ٶ[,$&>6߰β4N:f 6EXf3ڰaC_&'!*d(8t%QDݹsgϯ\rJ'^W<ĕr˷~{?>clmִ&L ,}.x/q AIbe[n"|OY B8vO Dl%'fu/…$em6ZD-E!'|UVă8&{{(oΜ9|%6NrllH"x窫8r"v8 ?!I\:yG\SŹ܂l $%O NW@y'p/)'zx9aNP,MoPD.6AK:RIҳ" |@`s ^8ė4(IKQ'8K|?1!dgħ!YFq%>s Xً@h=nU{D }gWoJ|vH&4)15l%c^F@M,@4Ϯߔ,f"M6i@+"Sbj7K3ǀč&YT hd|KJ:Ľ\Db9_]E|r >̯D;I|書dq"Es5YM41cl};waxΞ=۟aCRsߒWg>i,M610 Y" K 3x|Wz/s:{2{nw/h="">ln /]w]c@b~mv!6>XBoO?T~r(}Y{}Y!zaI|& Zω@1O6Z&tRr뭷bgϞCdǎ3ɆI碋.rG=V*Ϛ5ˮJС 1.{뭷cT̙cmJE|RF޽mʔ)Q|R?&{=xV'F^}UԂ-8ƥy;6m5jqh-[lvڏ?h; dٝ|Yk|&֫Zj6b|$& 7UTFI.2fI?ZjyxOY*Op “#^<ڊ+l-o~uq!H8?['ںukCTa 1w|5dY^dR'PN͟?n*I&ۃ$>%D@@O6td'b-fmfA b>䇈CxZYW|ݣ,}Ǻ$OɃ2dFڵ=VXO hd(Əb%DH dWlaR`iLna9)'N+8ru|rn'GdYvDlxs,s)q^'n G, nݺϸ1J#:j(?R*ϠlzbKJ'PA>L 7qm&?MQhH2D Fy YR&pa'x=BUX9w5'ͱl<{>9/iO=?_Dg!)?G c$K[bH`m_G^"PD@k*uM!'>pBvIYl*o6uY4nEbJ*~&ĢN|3ON 4(fl]wIC•D@M@3_&0:ܧ~D|}[I"ns.>l(436l/}{)G<(= !o/("dC;b3C~nvP+ğgnmմ\E ,뉷 p-g}" )K\bf<"l(j=vT" &u 1?wq<Ͽ9O݄C_]:mp)`}vQGy &&pGqb{7y!JK}9Y1X}zw]\5ǚq-4i>Pz8 뤘\V"3k|kD@O@ۘ%tv3I '_P8g {ƭc\NN5lЏ6CT"LsradM6O>o?\&\U"CQ$Hg5"jJ@S%E $>k\L3tĐ&CI"kN" ~!"M6)"kbmޜ5N3ghē&xUth`OwuWy sfsk?j+lp&g2U:I|暰d39"[ͫ~=ѣ cDP֩SjԨ4h`]t6m؈#ָSƉϭ>ǗYb7I3ׄČ&T h<j^Evi. 8TJŋ[J|6w\nd'WqUN[j2D F4ȘjdH@AjC@M:ēƃx5׭5a/1#&fUsD 2WįJ|tH&t'k[%k_bF@M @4d_,b"M6P;"Oi\J3ׄČ&T h<^*YWE lҡwD 4Ӯng +M613#x"~UⳈ@:4٤CM@< h<]s*\V"3lbfP5G2 xEg_MthIxxOU&E f4̠jd@@AU"6.d5#$ vu$>sMX@hAȀƃ El|5]!&jzGI@AjC@M:ēƃx5׭5a/1#&fUsD 2WįJ|tH&t'k[%k_bF@M @4d_,b"M6P;"Oi\J3ׄČ&T h<^*YWE lҡwD 4Ӯng +M613#x"~UⳈ@:4٤CM@< h<]s*\V"3lbfP5G2 xEg_MthIxxOU&E f4̠jd@@AU"6.d5#$ vu$>sMX@hAȀƃ El|5]!&jzGI@AjC@an}n1'^k%\bg϶~:'hlwaW:"t+wM6M."{ԩ&G!b ԨQomɒ%V^=^m۽{w{7O>Ym?p{衇J~oٖ[ni5k9眔VL/i<(&Qog-@ aym뭷n.2{CVbŜXt-\0-+?묳8 IDATNNYZ|E7ߜryOif ʉLR /wqVV-KK~ZBj_#<~gb-)S~gm%_ ;;:u2< 5kZ/l_|ӟ;w;{1CdN:)_|{qnvo[o|o+)IUG@3{,0O<[D_0KbBh","|*1c >뢨y};$]>`p.BxmbD>Di鄨"<R˖-]۷qA"PD<#HI˖-'Nt/Ԡ7-H۩=񉨦$ر}w"B?p믿{gyƿ,5xNW^y ^D+Kl,⓲7|iӦy__Q|{|E7oҪ0.# ="PPal$M%bs(^áCz|38ýgW˷;w.欳βE (>Bx]Yf9c $CNx7Ա$#KOf۶m]PEif'S?|]DnQ ^{y5jm $ d/穧j ,(= )!`K'|1b{='^P$o~3N'/| 9 .=evI<Yz;xhpE&,->"r" )>)7/$<0{]PŒ9%f| >mܸq5q➸Sf) Di)AÒ>1)56 |Q!D%x~|NXB[{חYڵk?2uTZ|9Æ sd!'1x-ųr9q[lZ"8R+";JFi<'f!" )dÑ:x8> G3qR< |"XyG8OΆDyc971%>l26%G,o٤nMHQbGlIy pE\9vxrN*Krx2%h<0Z|ʱIxc+NA"f/&|<5;ˊd;xT/Px3mgdM@ah) w*a$ V $>o#PBE@M̡ʈ@A h<((.Yө"Pl ]@ h<U_'H5Pd*s2"PP ?K|Ft&pW"FhI36R E T4لƃl5*.!ɦ0Ugv*4*.W3SE 4 X.j֭G(B54-\3SE04JB.#2:@j<(U]gڋ@ h;r('\?JTRErW߾}O>* Ѱj)!TH(+ZZlʔ)@t Ts(y> ]@A TZ~wy> j.[%"yBD 4իo:RghH|F|䟀gD($v/$x-OU"39CE tg(JI|F|䟀gD+aL%nv":lBgUH F@AG`HOd*QJ@AX-zI|>&ЙDxP0.X3SE 4䟹JxV˄^ᶏj'#&t&QD`4 } Ty?M6gE 42ᮗgډ@h IT!(C%>#m>U^O@MD+aL%nv":lBgUH F@AG`HOd*QJ@AX-zI|>&LtR{'3(3M6ľ QFZ;veOLYd K@AaGtϨZNdI&Y^lԨQ9wdVѵi9k+x5EgQ[ hI̙3M66w\[lo.BW͟?{k˗/ЁnիWڞz)/p}!CFmdGumN;4,o}mn&;K*b o^.]Xnl-N ؄ l֬YN;O?m7|Jyֹsg8qUX."ر馛ڀgӧ۹럗U=6fhG3vj-6mj*UrdֲeK{Vu9{?ܳy̞m[ncƌ)Qく^&ħ>*J|嗮nef4,ɥ^Zi{~ Æ SZkYhilΝݿuְlҝ,MP7nk7ȔTf@f;'@ɏĖDĘě_ >u&5ݨPYFMKu} MW|H|vG E~jԨ2ڑl֬Y9#8S]SGq\K.j֬鲣뮻j׮m&Mr( 0mB|[->@^LYȻeB ҴiS9rxֶm[UfFrs=N:餲[$FU$:5piMLs6s Mv :GӑM Zt8TX)M51b7oӉL9a;:(˔JLJL84 >k֬Uݐ S5W;:^BcǎgO MmV;rゲ5kZ%ɓ>kxk[הΰl`֦M7-FZj٧~겚tr= :4}fYd[&h >%z5');pUO]f}6}tg~LѿoE6zh1M6{… mu)|֣GWرc2n:27.O׽n&W;O K]v_}G|%Ng0[؊Y"l6!hM7^K. bSR6j Ѻu|g}֭eS&SRU+4`>}8 NmT9c]~n!CZ&?YRPM4qט=MYOeG9H8eJ5Llni2}u7n[+36zROMo-XtnTח6i?W;+t-њUIY^mf\SKvZ{N'ƃx-jQ{ 3|LSO=_`s3APPׯ)ݴ NWM*j*n5,'>t֑iS6wN(S$&?Z͓h~͔FzRqԨQvE1]#p^$8p[)&qέ̀iT64(9}衇\[xjmfnvV$>3ѺQDn~r-n{.)39G|#xGa3~*ԺJH]Jt?_ Akȴ~+ ~ڭeh(GvFWZ1L YM{ M0W=fƌ{vҕlV'@ ڒv YR~V->9@n3m hhPiѺ] 4pk^z%7MK[o۴iz+9swqnDffɜvʎjz]Set$|i9蠃VY|.{_bUeF7zGM ٽwJb|^s> M}kmvk@EӅԚKe7xrw\Ꙇ.t)=]tm.Cl뻲ŧ%\̙ڒ+ݦ }[̧D~fw7Zl;sNe5uZAMlwRMa++ K(o1fUάKܗڌ՟lѩ%Z3( ,/H:ͣZˬ?z+@|Cc"iVm(K\֬WlOQZ`hIlSRS{ H>g}\">KA5u^}NtJOo6vٲmƏhYEvKx q/K8Ⱦ=ѹ@|#D g泶ٌ>YO[*|zH>2q)z,Uk>K|+B qZ6#>A,JL=:Yz4bW 6bt4Me{A8"L;C 34hHF=+T|j7P,pā@eA@|<-5zܹsN:mU~z-GuZw 'ͮg+aHk_wuݳ:U _"ԫFcfm Vi3`6_&xcU >WZ ΑД8Ի,XLt/lCuOH;wݺuݻvϑ#Gڍ7ho{ѩjճn!ԃuO_`&x6dLދk M wݪTx']w2Vՙ;Su޽;H2Ϣ$lJJ!Kt[F#>#whcX4b7o.rSR/LTZΓN:ɉ֭[[v+595kOm&oT`Sgyu3=|֣G2Ÿ4iըQEz+2z;xn6VB aPN^(=R}v!5M4|fϞm_M0LDԩSm 7mۚvKX>cn3㝸w7}ZF棖-[];zՌZc*JO` xKMkTZéW1HBݻ 2jժe]tqNzotR´W^V~}kӦ 0Mo֪U+l̘1ִiӒJ #@)gC I 5-A 6p#@Q0c*A|t Ə!$Ifx}A|ǚ DN@(1u >Sr: l Hƃ$y3 >cMKHM"H' PEJs9@a6l$A^_ᱦ%$&n( ƃ`L]%ԹC0q6D I /XA`7 AQ0g\N!PMa8I"x$ogxi  @IB(SW 3u.(0~ $`Sr: l Hƃ$y3 >cMKHM"H' PEJs9@a6l$A^_ᱦ%$&n( ƃ`L]%ԹC0q6D I /XA`7 AQ0g\N!PMa8I"x$ogxi  @IB(SW 3u.(0~ $`Sr: l Hƃ$y3 >cMKHM"H' PEJs9@a6l$A^_ᱦ%$&n( ƃ`L]%ԹC0q6D I /XA`7 AQ0g\N!PMa8I"x$ogxi  @IB(SW 3u.(0~ $`%ڵkg~{Ig!6r-c=Vƍg|MfM6u]fϞ]vȑ#Z}~imFss(adlmqYi9s{?yqҥvSO=e5negWŋF6`'z!믭nݺֵkWwafիW'1cݻwwSO>vqG_}R?(*٩S'+l*+|N4KF`ƌ{6=\OmM6qׯo?Ĵ9o8FaժU-sdS}O*t0}.VjK(+{=<J~|򳔣|"%SfU@eTwm7s=H.bZjlwBI2H꼗^zɉ]f Ή]wՉR/$%=P{w]iv2:vESU"βމK fu 7؄ 4R ŧ%O8{~-4ݭnFֲeKyML /8{$*%@_~e7 $%lUװa쨣rBuҤI>}Ay%ro.BnV.2jɓ裏:!Jɏ/A~r/x; >e$h ٳwqNd Me0%5*!tW̙3 ȓHkԨ˖EIMTForKes.+"V4h裏'|hڭ[7⋝XzGqݦ/h\s5n]feus$teĽ2Z'߆+K6$իwJ|*#|## lT_Z/ؽ벣{z?eCO9QIUD5hdJ IPi //'>%uմ}+*I KyN j}jRes%2]tJO Oe$$d$"yG;gm G`F8mQ[Z"Ao(d϶mڂ QqԽO ]L-eESu/O}&+{,߁R:ʖy#%yB&\idtS k]&SBe~MKlhZVklĕħKj[%|Kʼ*:WMӚ ˞vWRSfWKEG|jMuSn6(˫ pv 2*>?s\TPT$%O-mˠH|ێJeS trel!Tto馮Z>YY !CGPU_gup]s5}8ytCB" 4@ )lYEk$FO.*eŴOL5eil2jzLsOe. ifYHJYZ #t>Ϝv믿*ZC)uOMBhhZGZj)| +'DKTvPBVB ŧnDPzNHEaL8 'U͆ħتeε(Xjԩ){[YRN(+&_C/ `5|i-v|kj\+lVС3e^B0XOK|F߃wq[6EHQiDvK4k:^%$z@KPZ" ħ>J@;g L|{Mwk3QO=RKl$-1mRѦ%m:R-T&뮻sUY7 e8UTb @ >ϔ!hqrio@Lڅ)h2ZUF,I]S19ʮ*3/zDf')jMk網(aQTe,uDڙ? (é U6^%$4#>e2ZwTn :Voʺ5֗ZөG=)G*P nw\Yb ]Jnq~+ ظ C!Pr%Gt+@lJǖ!7q">V@ 66qBJ8 >V: ؔ-5C n1?E|@llb* @ 0q"@|&ҭt #@)[j@0c~؋X U #:w\#8u >Sr: l =_/j֭իe<7!@0E!z)^ "(SW 3u.(0~ 8kcjR5XLJ-ZzCUW]Ǯas@Ię@ >l '[Æ mڴiqgMg!P823čo'7yj/S`|%3К.m:bgiVgc$LO>y[ Pz<3m(@`@81m >q  l Hƃ93Į >CMSHMH PᘶZi8@6t$AbW!¦)$p/+ZzVXYnհl2{GSNY3/￷}w 0D8Zװ .kVq[r5|^xaN8[n.U'xh3^fDH`?onl6zqꪫlM7n8;>s]|v 7P{8 k׮3C)El _s;X͚5mv?lsIPVRN>dݻ;wֳ믿ޞ|Iꫯ3ϴ~s=M/խL4h>)S؟i3f0eLU} 'ꫯn:t뮻.cqU UhժjٺkvqVzJRҔGm AQT:t)).(v65\c]tmN^'pPQ lO?YFouYdž f-s=vaw(٭[7w{_[ƍ8dM)63g˖-siDcժU[o{Znmsε.Ȗ/_DofM65e%=P[϶kfϞmcƌq&M8۩S'8qfΜƇ>}'|ķ:u4h~Oxg Jbħ\(izc+~.)N:Zk-W2ʂ:vvGAR(Eqƹv&ŋTVOY>dO]'K.-2vsi[laO=c]ú)l駟w}t0JJ|28v6i$'~mvN*s{ď?؝?j(2:@|3(]䥁O/RZŧZի) 쇲/(]6C)(ʌ9M xֶm[UfFrsN:<)3QN \ST |׬aÆnCiB:gζeXKK+"@wlO 0;e%3EĘG'4MK/8OMj'5j@Y\sMw=k)G}dÇw׫u=Y_Gl*g}2&a9zrR=zLEBO3SKcN=Բf͚PNt[ \{Z|޳gO7kѿħ]I.b8x/g#?"O:4th=3PejnF d})KsMBnݺZ}2ڄcʰjݙ,`t]ZG_MfC A|WM~54b4Ec=f=VFTS&⋶n yD&\YWC/ƃ.JQҏ̧֦)S`~zh ]As޼yL.\Y~\˵H;xǏo;vte97U)>5%X|ye."ls~d*k7vO#2NBT3 ~jDӍPl>N:^Kht--Ԭ>d]hJ.)]c2ݬjcYњOMi 8Aw-T?l?,=*E@|*iTէ ֒)*UV͉L YM{ MmzPU0`}nV6x$Ar}[ʞ!>KIӺŧ,Qym6I4jm2l1*2F ]8& m\[o6-i^e9ّ,O}쨦%8?xݑU蠃Zew1;I*,fBSAl4@ *bL3Q7O٢on]vMk4n= Pk.}afw\jROw%  U=Spz4%{W Z;OoVٜQ{ 6LkXSZAMlwRMa++ K(o1fUάKܗڌ՟lѩ%Da~@@!@mQћ(ȇ3JMj6EYE%f:f3~Ҳd=FKb3z|P @|&ǥ!TSXW_D7DYo6vٲQZmƏGhYEvKx q/K8Ⱦ=ѻb@|"uxE g泶ٌ^Kc*|Z/LOg}\">KA: Ҹ5J#g͈}; 38R=Ҋ < s6evAD^>N@ >: u2=H\Q0Ю,pā@eA@|<-jWi{flСFro;vz&^ѧgpuzz^y:WgY*:Vک^z,8HMN!P1~B*RpOÒ@To۶nMg϶ |`7YfY *5jp$f%eFIջۃixu"]_4j6ƍ DIj,=X0h ۱cG'0;u^`9s\$YZ5~? 8 Fe˖6|pk߾I>cӽ{XrK4x>B ?q"*$i:ؔ)SJ*{}g5kִVZl1cܔ믿vKx5q?~rig{^~֦M0`M8\s͜? l VA QP@ TPq&x{5k0 ~ @6!=@61xMkxk[`$@/X(0DA=m">CzP lBMc8ħ0I`_ Q`u AO?UA&3>&T4 0xoC|z 6~ ƃ(ǿMg}H *Mi ^`<=5? l VA QP@ TPq&x{5k0 ~ @6!=@61xMkxk[`$@/X(0DA=m">CzP lBMc8ħ0I`_ Q`u AO?UA&3>&T4 0xoC|z 6~ ƃ(ǿMg}H *Mi ^`<=5? l VA QP@ TPq&x{5k0 ~ @6!=@61xMkxk[`$@/X(0DA=m">CzP lBMc8ħ0I`_ Q`u AO?UA&3>&T4 0xoC|z 6~ ƃ(ǿMg}H *Mi ^`<=5? l VA QP@\2˸qL7odP9 -ƃh'ugRz  'UwO)(ƃ@Ma:.C`efE2cVM$ϵ1~" 4@  $c6 A k=cntCR|!'ϵ1wKJYRT'sLI@ k=c ntCR|!'ϵ1wKJYRT'sLI@ k=c ntCRq@ @|& xC+0 $3> @Oo\! @ 1= @@|z  @'L! @W` @H>g}L!@7޸C @@ >cz@!@O|C@ ħ7@ _yK/Uxʰa3p߭;XÆ B"l2{GSN E| M믷-Nkժ^֢E/_no}^ǪTw}90Be]f//@   P<_~ 0>M6VV95jmv6vXu]o߾6x`;wuzYd{_~:uSkV[: gkQ]vuvH5o܆jժUu]u7ڬYlֹsg'k֬i }_Nj;v[lau~G[k}Gq}G־}{;7t}۬u&zmÇw  {rg?nqʧP~{_l=W^yx?j&=d믿e˖v'O>駟^zyMk /vqG_~ƍ;a;XmSOom]q`;묳07n&LpBwY . >]z6i$'R6mꄚSyN|&M9ĉ裏3g;S5kꫯژ1cW^ytI&*Q7c 'fwe{C#G:O>i7tkxbi'p%>ηb)QN"L-bO?pk8<@=zuQO S]<ˎ,\[o='Z{嗝}˦%fٳ;Gӛoى"(;%PU>l|͝p roeh%)=yU78)ѬtR*{キ[m5k̉g)SС_|NרQ \3@\v >c9*a$@We>37y5"6m4#l޼yNlJi [uRSuֵM_r%.B :eSIUom:FIfMg|U](-giR; 2|ŧDďDԺZ7i.h(ө̤Vj6hРL|bXP\J\8ceHFW_e>uOJj=h.)ѩ%=zpMʎ^٫5w}/wK4_Q?2EoF||qd? @"0 ě3z@ +X c!@@ >? @"0 ě3z@ +X c!@@ >? @"0 ě3z@ +X c!@@ >? @"O d_IENDB`receptor-1.5.3/docs/source/user_guide/tls.rst000066400000000000000000000150541475465740700213120ustar00rootroot00000000000000TLS support =========== Receptor supports mutual TLS authentication and encryption for above and below the mesh connections. .. contents:: :local: Configuring TLS --------------- Add ``tls-servers`` and ``tls-clients`` definitions to Receptor configuration files. ``foo.yml`` .. code-block:: yaml --- version: 2 node: id: foo log-level: level: Debug tls-servers: - name: myserver cert: /full/path/foo.crt key: /full/path/foo.key requireclientcert: true clientcas: /full/path/ca.crt tcp-listeners: - port: 2222 tls: myserver Defining ``tls-servers`` has no effect, but it can be referenced elsewhere in the Receptor configuration file. In the preceding configuration snippet, ``tls`` in the ``tcp-listeners`` is set to use ``myserver``. In general, ``tls-servers`` should be referenced anywhere Receptor is expecting an incoming connection, such as ``*-listeners`` backends or on the ``control-services``. Similarly, ``tls-clients`` should be referenced anywhere Receptor is expecting to make an outgoing connection, such as ``*-peers`` backends or in ``receptorctl`` (the command-line client for Receptor). ``bar.yml`` .. code-block:: yaml --- version: 2 node: id: bar log-level: level: Debug tls-clients: - name: myclient rootcas: /full/path/ca.crt insecureskipverify: false cert: /full/path/bar.crt key: /full/path/bar.key tcp-peers: - address: localhost:2222 tls: myclient ``myclient`` is referenced in ``tcp-peers``. Once started, `foo` and `bar` will authenticate each other, and the connection will be fully encrypted. Generating certs ----------------- Receptor supports X.509 compliant certificates and provides a built-in tool to generate valid certificates. Running and configuring Receptor with the ``cert-init``, ``cert-makereqs``, and ``cert-signreqs`` properties creates certificate authorities, make requests, and sign requests. ``makecerts.sh`` .. code-block:: bash #!/bin/bash mkdir -p certs receptor --cert-init commonname="test CA" bits=2048 outcert=certs/ca.crt outkey=certs/ca.key for node in foo bar; do receptor --cert-makereq bits=2048 commonname="$node test cert" dnsname=localhost nodeid=$node outreq=certs/$node.csr outkey=certs/$node.key receptor --cert-signreq req=certs/$node.csr cacert=certs/ca.crt cakey=certs/ca.key outcert=certs/$node.crt done The preceding script will create a CA, and for each node ``foo`` and ``bar``, create a certificate request and sign it with the CA. These certificates and keys can then create ``tls-servers`` and ``tls-clients`` definitions in the Receptor configuration files. Pinned certificates -------------------- In a case where a TLS connection is only ever going to be made between two well-known nodes, it may be preferable to require a specific certificate rather than accepting any certificate signed by a CA. Receptor supports certificate pinning for this purpose. Here is an example of a pinned certificate configuration: .. code-block:: yaml --- version: 2 node: id: foo tls-servers: - name: myserver cert: /full/path/foo.crt key: /full/path/foo.key requireclientcert: true clientcas: /full/path/ca.crt pinnedclientcert: - E6:9B:98:A7:A5:DB:17:D6:E4:2C:DE:76:45:42:A8:79:A3:0A:C5:6D:10:42:7A:6A:C4:54:57:83:F1:0F:E2:95 tcp-listeners: - port: 2222 tls: myserver Certificate pinning is an added requirement, and does not eliminate the need to meet other stated requirements. In the above example, the client certificate must both be signed by a CA in the `ca.crt` bundle, and also have the listed fingerprint. Multiple fingerprints may be specified, in which case a certificate matching any one of them will be accepted. To find the fingerprint of a given certificate, use the following OpenSSL command: .. code-block:: bash openssl x509 -in my-cert.pem -noout -fingerprint -sha256 SHA256 and SHA512 fingerprints are supported. SHA1 fingerprints are not supported due to the insecurity of the SHA1 algorithm. Above the mesh TLS ------------------- Below-the-mesh TLS deals with connections that are being made to an IP address or DNS name, and so it can use normal X.509 certificates which include DNS names or IP addresses in their ``subjectAltName`` field. Above-the-mesh TLS deals with connections that use Receptor node IDs as endpoint addresses, which require generating certificates that include Receptor node IDs as names in the ``subjectAltName`` extension. You can use the ``otherName`` field of ``subjectAltName`` to specify Receptor node IDs. The ``otherName`` field accepts arbitrary names of any type, and includes an ISO Object Identifier (OID) that defines what type of name this is, followed by arbitrary data that is meaningful for that type. Red Hat has its own OID namespace, which is controlled by RHANANA, the Red Hat Assigned Names And Number Authority. Receptor has an assignment within the overall Red Hat namespace. If you use TLS authentication in your mesh, the certificates OIDs (1.3.6.1.4.1.2312.19.1) will be verified against the `node.id` specified in the configuration file. If there is no match, the Receptor binary will hard exit. To avoid this check, visit the `Skip Certificate Validation`_ section for more details. Skip certificate validation ---------------------------- You can turn off certificate validation by adding a `skipreceptornamescheck` key-value pair to your configuration. Depending on the specifics of your environment(s), you may need to add the ``skipreceptornamescheck`` key-value pair to the configuration file for `tls-server`, `tls-config`, or both. The default behavior for this option is `false` which means that the certificate's OIDs will be verified against the node ID. .. code-block:: yaml --- version: 2 node: id: bar log-level: level: Debug tls-clients: - name: myclient rootcas: /full/path/ca.crt insecureskipverify: false cert: /full/path/bar.crt key: /full/path/bar.key skipreceptornamescheck: true tls-servers: - name: myserver cert: /full/path/foo.crt key: /full/path/foo.key requireclientcert: true clientcas: /full/path/ca.crt pinnedclientcert: - E6:9B:98:A7:A5:DB:17:D6:E4:2C:DE:76:45:42:A8:79:A3:0A:C5:6D:10:42:7A:6A:C4:54:57:83:F1:0F:E2:95 skipreceptornamescheck: true tcp-peers: - address: localhost:2222 tls: myclient receptor-1.5.3/docs/source/user_guide/workceptor.rst000066400000000000000000000304311475465740700227030ustar00rootroot00000000000000Workceptor ========== .. contents:: :local: Workceptor is a component of receptor that handles units of work. ``work-commands`` defines a type of work that can run on the node. foo.yml .. code-block:: yaml --- version: 2 node: id: foo log-level: level: Debug tcp-listeners: - port: 2222 control-services: - service: control filename: /tmp/foo.sock work-commands: - workType: echoint command: bash params: "-c \"for i in {1..5}; do echo $i; sleep 1; done\"" bar.yml .. code-block:: yaml --- version: 2 node: id: bar log-level: level: Debug tcp-peer: address: localhost:2222 control-services: - service: control work-commands: - worktype: echoint command: bash params: "-c \"for i in {1..10}; do echo $i; sleep 1; done\"" - workType: echopayload command: bash params: "-c \"while read -r line; do echo ${line^^}; sleep 3; done\"" Configuring work commands -------------------------- ``worktype`` User-defined name to give this work definition ``command`` The executable that is invoked when running this work ``params`` Command-line options passed to this executable Local work ----------- Start the work by connecting to the ``control-services`` and issuing a "work submit" command .. code-block:: bash $ receptorctl --socket /tmp/foo.sock work submit echoint --no-payload Result: Job Started Unit ID: t1BlAB18 Receptor started an instance of this work type, and labeled it with a unique "Unit ID" Work results ------------- Use the "Unit ID" to get work results .. code-block:: bash receptorctl --socket /tmp/foo.sock work results t1BlAB18 1 2 3 4 5 6 7 8 9 10 Remote work ------------ Although connected to `foo`, by providing the "--node" option the work can be started on node `bar`. The work type must be defined on the node it is intended to run on, e.g. `bar` must have a ``work-command`` called "echoint", in this case. .. code-block:: bash $ receptorctl --socket /tmp/foo.sock work submit echoint --node bar --no-payload Result: Job Started Unit ID: 87Vwqb6A Remote work submission ultimately results in two work units running at the same time; a local work unit and the remote work unit. These two units have their own Unit IDs. The local work unit's goal is to monitor and stream results back from the running remote work unit. Sequence of events for remote work submission - `foo` starts a local work unit of work type "remote". This is a special work type that is built into receptor. - This work unit attempts to connect to `bar`'s control service and issue a "work submit echoint" command. From `bar`'s perspective, this is the exact same operation as if a user connected to `bar` directly and issued a work submit command. `bar` is not aware that `foo` is the one that issued the command. - Once submitted, `foo` will stream work results back to itself and store it on disk. It also periodically gets the ``work status`` of the work running on `bar`. Status includes information about the work state and the stdout size. - `foo` continues streaming stdout results until the size stored on disk matches the StdoutSize reported in `bar`'s status. .. _work_payload: Payload -------- in `bar.yml` .. code-block:: yaml - workType: echopayload command: bash params: "-c \"while read -r line; do echo ${line^^}; sleep 5; done\"" Here the bash command expects to read a line from stdin, echo the line in all uppercase letters, and sleep for 3 seconds. Payloads can be passed into receptor using the "--payload" option. .. code-block:: bash $ echo -e "hi\ni am foo\nwhat is your name" | receptorctl --socket /tmp/foo.sock work submit echopayload --node bar --payload - -f HI I AM FOO WHAT IS YOUR NAME "--payload -" means the payload should be whatever the stdin is, which is piped in from the "echo -e ..." command. Note: "-f" instructs receptorctl to follow the work unit immediately, i.e. stream results to stdout. One could also use "work results" to stream the results. Runtime Parameters ------------------- Work commands can be configured to allow parameters to be passed to commands when work is submitted: .. code-block:: yaml work-commands: - workType: listcontents command: ls allowruntimeparams: true The ``allowruntimeparams`` option will allow parameters to be passed to the work command by the client submitting the work. The contents of a specific directory can be listed by passing the paths to the receptor command as positional arguments immediately after the ``workType``: .. code-block:: bash receptorctl --socket /tmp/foo.sock work submit --node bar --no-payload -f listcontents /root/ /bin/ /bin/: bash sh /root/: helloworld.sh Passing options or flags to the work command needs to be done using the ``--param`` parameter to extend the ``params`` work command setting. The ``--all`` flag can be passed to the work command this way: .. code-block:: bash receptorctl --socket /tmp/foo.sock work submit --node bar --no-payload -f --param params='--all' listcontents /root/ . .. .bash_logout .bash_profile .bashrc .cache helloworld.sh Work list ---------- "work list" returns information about all work units that have ran on this receptor node. The following shows two work units, ``12L8s8h2`` and ``T0oN0CAp`` .. code-block:: bash $ receptorctl --socket /tmp/foo.sock work list {'12L8s8h2': {'Detail': 'exit status 0', 'ExtraData': None, 'State': 2, 'StateName': 'Succeeded', 'StdoutSize': 21, 'WorkType': 'echoint'}, 'T0oN0CAp': {'Detail': 'Running: PID 1700818', 'ExtraData': {'Expiration': '0001-01-01T00:00:00Z', 'LocalCancelled': False, 'LocalReleased': False, 'RemoteNode': 'bar', 'RemoteParams': {}, 'RemoteStarted': True, 'RemoteUnitID': 'ATDzdViR', 'RemoteWorkType': 'echoint', 'TLSClient': ''}, 'State': 1, 'StateName': 'Running', 'StdoutSize': 4, 'WorkType': 'remote'}, Notice that ``T0oN0CAp`` was a remote work submission, therefore its work type is "remote". On `bar` there is a local unit ``ATDzdViR``, with the "echoint" work type. Work cancel ------------ Cancel will stop any running work unit. Upon canceling a "remote" work unit, the local node will attempt to connect to the remote node's control service and issue a work cancel. If the remote node is down, receptor will periodically attempt to connect to the remote node to do the cancellation. Work release ------------- Release will cancel the work and then delete files on disk associated with that work unit. For remote work submission, release will attempt to delete files both locally and on the remote machine. Like work cancel, the release can be pending if the remote node is down. In that situation, the local files will remain on disk until the remote node can be contacted. Work force-release -------------------- It might be preferable to force a release, using the ``work force-release`` command. This will do a one-time attempt to connect to the remote node and issue a work release there. After this one attempt, it will then proceed to delete all local files associated with the work unit. States --------- A unit of work can be in Pending, Running, Succeeded, or Failed state For local work, transitioning from Pending to Running occurs the moment the ``command`` executable is started For remote work, transitioning from Pending to Running occurs when the status reported from the remote node has a Running state. Signed work ------------ Remote work submissions can be digitally signed by the sender. The target node will verify the signature of the work command before starting the work unit. A *single* pair of RSA public and private keys is created offline and distributed to the nodes. Distribute the public key (PKIX format) to any node that should receive work. Distribute the private key (PKCS1 format) to any node that needs authority to submit work. The following commands can be used to create keys for signing work: .. code-block:: bash openssl genrsa -out signworkprivate.pem 2048 openssl rsa -in signworkprivate.pem -pubout -out signworkpublic.pem in `bar.yml` .. code-block:: yaml # PKIX work-verification: publickey: /full/path/signworkpublic.pem - workType: echopayload command: bash params: "-c \"while read -r line; do echo ${line^^}; sleep 5; done\"" verifysignature: true in `foo.yml` .. code-block:: yaml # PKCS1 work-signing: privatekey: /full/path/signworkprivate.pem tokenexpiration: 30m Tokenexpiration determines how long a the signature is valid for. This expiration directly corresponds to the "expiresAt" field in the generated JSON web token. Valid units include "h" and "m", e.g. 1h30m for one hour and 30 minutes. Use the "--signwork" parameter to sign the work. .. code-block:: bash $ receptorctl --socket /tmp/foo.sock work submit echoint --node bar --no-payload --signwork Units on disk -------------- Netceptor, the main component of receptor that handles mesh connectivity and traffic, operates entirely in memory. That is, it does not store any state information on disk. However, Workceptor functionality is designed to be persistent across receptor restarts. Work units might be running commands that could take hours to complete, and as such needs to store some relevant information on disk in case the receptor process restarts. By default receptor stores data under ``/tmp/receptor`` but can be changed by setting the ``datadir`` param under the ``node`` action in the config file. For a given work unit, receptor will store files in ``{datadir}/{nodeID}/{unitID}/``. Here is the receptor directory tree after running ``work submit echopayload`` described in :ref:`work_payload`. .. code-block:: bash $ tree /tmp/receptor /tmp/receptor ├── bar │   └── NImim5WA │   ├── status │   ├── status.lock │   ├── stdin │   └── stdout └── foo └── BsAjS4wi ├── status ├── status.lock ├── stdin └── stdout The main purpose of work unit ``BsAjS4wi`` on `foo` is to copy stdin, stdout, and status from ``NImim5WA`` on `bar` back to its own working directory. ``stdin`` is a copy of the submitted payload. The contents of this file is the same on both the local (`foo`) and remote (`bar`) machines. .. code-block:: bash $ cat /tmp/receptor/bar/NImim5WA/stdin hi i am foo what is your name ``stdout`` contains the work unit results; the stdout of the command execution. It will also be the same on both the local node and remote node. .. code-block:: bash $ cat /tmp/receptor/bar/NImim5WA/stdout HI I AM FOO WHAT IS YOUR NAME ``status`` contains additional information related to the work unit. The contents of status are different on `foo` and `bar`. .. code-block:: bash $ cat /tmp/receptor/bar/NImim5WA/stdout { "State":2, "Detail":"exit status 0", "StdoutSize":30, "WorkType":"echopayload", "ExtraData":null } .. code-block:: text $ cat /tmp/receptor/foo/BsAjS4wi/stdout { "State":2, "Detail":"exit status 0", "StdoutSize":30, "WorkType":"remote", "ExtraData":{ "RemoteNode":"bar", "RemoteWorkType":"echopayload", "RemoteParams":{}, "RemoteUnitID":"NImim5WA", "RemoteStarted":true, "LocalCancelled":false, "LocalReleased":false, "TLSClient":"", "Expiration":"0001-01-01T00:00:00Z" } } .. image:: remote.png :alt: sequence of events during work remote submission The sequence of events during a work remote submission. Blue lines indicate moments when receptor writes files to disk. receptor-1.5.3/example/000077500000000000000000000000001475465740700150215ustar00rootroot00000000000000receptor-1.5.3/example/net.go000066400000000000000000000072141475465740700161420ustar00rootroot00000000000000package main import ( "context" "fmt" "io" "net" "os" "time" "github.com/ansible/receptor/pkg/backends" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" ) /* This is an example of the use of Receptor as a Go library. */ func main() { // Suppress log output. Remove this if you want to see log information. logger.SetGlobalQuietMode() // Create two nodes of the Receptor network-layer protocol (Netceptors). n1 := netceptor.New(context.Background(), "node1") n2 := netceptor.New(context.Background(), "node2") // Start a TCP listener on the first node b1, err := backends.NewTCPListener("localhost:3333", nil, n1.Logger) if err != nil { fmt.Printf("Error listening on TCP: %s\n", err) os.Exit(1) } err = n1.AddBackend(b1) if err != nil { fmt.Printf("Error starting backend: %s\n", err) os.Exit(1) } // Start a TCP dialer on the second node - this will connect to the listener we just started b2, err := backends.NewTCPDialer("localhost:3333", false, nil, n2.Logger) if err != nil { fmt.Printf("Error dialing on TCP: %s\n", err) os.Exit(1) } err = n2.AddBackend(b2) if err != nil { fmt.Printf("Error starting backend: %s\n", err) os.Exit(1) } // Start an echo server on node 1 l1, err := n1.Listen("echo", nil) if err != nil { fmt.Printf("Error listening on Receptor network: %s\n", err) os.Exit(1) } go func() { // Accept an incoming connection - note that conn is just a regular net.Conn conn, err := l1.Accept() if err != nil { fmt.Printf("Error accepting connection: %s\n", err) return } fmt.Printf("Accepted a connection\n") go func() { defer conn.Close() buf := make([]byte, 1024) done := false for !done { n, err := conn.Read(buf) if err == io.EOF { done = true } else if err != nil { fmt.Printf("Read error in Receptor listener: %s\n", err) return } fmt.Printf("Echo server got %d bytes\n", n) if n > 0 { _, err := conn.Write(buf[:n]) if err != nil { fmt.Printf("Write error in Receptor listener: %s\n", err) return } } } }() }() // Connect to the echo server from node 2. We expect this to error out at first with // "no route to node" because it takes a second or two for node1 and node2 to exchange // routing information and form a mesh. var c2 net.Conn for { fmt.Printf("Dialing node1\n") c2, err = n2.Dial("node1", "echo", nil) if err != nil { fmt.Printf("Error dialing on Receptor network: %s\n", err) time.Sleep(1 * time.Second) continue } break } // Start a listener function that prints received data to the screen // Note that because net.Conn is a stream connection, it is not guaranteed // that received messages will be the same size as the messages that are sent. // For datagram use, Receptor also provides a net.PacketConn. go func() { rbuf := make([]byte, 1024) for { n, err := c2.Read(rbuf) if n > 0 { fmt.Printf("Received data: %s\n", rbuf[:n]) } if err == io.EOF { // Shut down the whole Netceptor when any connection closes, because this is just a demo n2.Shutdown() return } if err != nil { fmt.Printf("Read error in Receptor dialer: %s\n", err) return } } }() // Send some data, which should be processed through the echo server back to our // receive function and printed to the screen. _, err = c2.Write([]byte("Hello, world!")) if err != nil && err != io.EOF { fmt.Printf("Write error in Receptor dialer: %s\n", err) } // Close our end of the connection _ = c2.Close() // Wait for n2 to shut down n2.BackendWait() // Gracefully shut down n1 n1.Shutdown() n1.BackendWait() } receptor-1.5.3/go.mod000066400000000000000000000103011475465740700144670ustar00rootroot00000000000000module github.com/ansible/receptor go 1.22.7 require ( github.com/creack/pty v1.1.24 github.com/fortytw2/leaktest v1.3.0 github.com/fsnotify/fsnotify v1.8.0 github.com/ghjm/cmdline v0.1.2 github.com/golang-jwt/jwt/v4 v4.5.1 github.com/google/go-cmp v0.6.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/gorilla/websocket v1.5.3 github.com/jupp0r/go-priority-queue v0.0.0-20160601094913-ab1073853bde github.com/minio/highwayhash v1.0.3 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/prep/socketpair v0.0.0-20171228153254-c2c6a7f821c2 github.com/quic-go/quic-go v0.48.2 github.com/rogpeppe/go-internal v1.13.1 github.com/sirupsen/logrus v1.9.3 github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 github.com/spf13/cobra v1.8.1 github.com/spf13/viper v1.19.0 github.com/vishvananda/netlink v1.3.0 go.uber.org/goleak v1.3.0 go.uber.org/mock v0.5.0 golang.org/x/net v0.35.0 golang.org/x/sys v0.30.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.31.3 k8s.io/apimachinery v0.31.3 k8s.io/client-go v0.31.3 ) require github.com/stretchr/testify v1.10.0 // indirect require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.22.4 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241101162523-b92577c0c142 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grafana/pyroscope-go v1.2.0 github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.8 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/spdystream v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/onsi/ginkgo/v2 v2.21.0 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/x448/float16 v0.8.4 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect golang.org/x/crypto v0.33.0 // indirect golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect golang.org/x/mod v0.22.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/text v0.22.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.27.0 // indirect google.golang.org/protobuf v1.35.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) receptor-1.5.3/go.sum000066400000000000000000001147371475465740700145360ustar00rootroot00000000000000cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghjm/cmdline v0.1.2 h1:XhhlCLSPx4qYf+eNDNzge3lBtymw5ZbWMnJfOFbuL6k= github.com/ghjm/cmdline v0.1.2/go.mod h1:w+7xIuuBUPEik5PI6gho/gLu161qFYatvw6AQ7B73K4= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20241101162523-b92577c0c142 h1:sAGdeJj0bnMgUNVeUpp6AYlVdCt3/GdI3pGRqsNSQLs= github.com/google/pprof v0.0.0-20241101162523-b92577c0c142/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/pyroscope-go v1.2.0 h1:aILLKjTj8CS8f/24OPMGPewQSYlhmdQMBmol1d3KGj8= github.com/grafana/pyroscope-go v1.2.0/go.mod h1:2GHr28Nr05bg2pElS+dDsc98f3JTUh2f6Fz1hWXrqwk= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jupp0r/go-priority-queue v0.0.0-20160601094913-ab1073853bde h1:+5PMaaQtDUwOcJIUlmX89P0J3iwTvErTmyn5WghzXAQ= github.com/jupp0r/go-priority-queue v0.0.0-20160601094913-ab1073853bde/go.mod h1:RDgD/dfPmIwFH0qdUOjw71HjtWg56CtyLIoHL+R1wJw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.4.0 h1:Vy79D6mHeJJjiPdFEL2yku1kl0chZpJfZcPpb16BRl8= github.com/moby/spdystream v0.4.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.34.2 h1:pNCwDkzrsv7MS9kpaQvVb1aVLahQXyJ/Tv5oAZMI3i8= github.com/onsi/gomega v1.34.2/go.mod h1:v1xfxRgk0KIsG+QOdm7p8UosrOzPYRo60fd3B/1Dukc= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prep/socketpair v0.0.0-20171228153254-c2c6a7f821c2 h1:vzKDZ0uNPcOdITzZT5d4Tn2YOalCMqIhYzVNq/oRjlw= github.com/prep/socketpair v0.0.0-20171228153254-c2c6a7f821c2/go.mod h1:E/IaW35yb7xPACTLciISfz5w+jqPwmnXwDdmilSl/Nc= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU= go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus= golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.31.3 h1:umzm5o8lFbdN/hIXbrK9oRpOproJO62CV1zqxXrLgk8= k8s.io/api v0.31.3/go.mod h1:UJrkIp9pnMOI9K2nlL6vwpxRzzEX5sWgn8kGQe92kCE= k8s.io/apimachinery v0.31.3 h1:6l0WhcYgasZ/wk9ktLq5vLaoXJJr5ts6lkaQzgeYPq4= k8s.io/apimachinery v0.31.3/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/client-go v0.31.3 h1:CAlZuM+PH2cm+86LOBemaJI/lQ5linJ6UFxKX/SoG+4= k8s.io/client-go v0.31.3/go.mod h1:2CgjPUTpv3fE5dNygAr2NcM8nhHzXvxB8KL5gYc3kJs= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= receptor-1.5.3/internal/000077500000000000000000000000001475465740700152025ustar00rootroot00000000000000receptor-1.5.3/internal/version/000077500000000000000000000000001475465740700166675ustar00rootroot00000000000000receptor-1.5.3/internal/version/version.go000066400000000000000000000011671475465740700207100ustar00rootroot00000000000000package version import ( "fmt" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) // Version is receptor app version. var Version string // cmdlineCfg is a cmdline-compatible struct for a --version command. type cmdlineCfg struct{} // Run runs the action. func (cfg cmdlineCfg) Run() error { if Version == "" { fmt.Printf("Version unknown\n") } else { fmt.Printf("%s\n", Version) } return nil } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-version", "version", "Displays the Receptor version.", cmdlineCfg{}, cmdline.Exclusive) } receptor-1.5.3/packaging/000077500000000000000000000000001475465740700153125ustar00rootroot00000000000000receptor-1.5.3/packaging/container/000077500000000000000000000000001475465740700172745ustar00rootroot00000000000000receptor-1.5.3/packaging/container/.gitignore000066400000000000000000000000351475465740700212620ustar00rootroot00000000000000receptor *.whl source.tar.gz receptor-1.5.3/packaging/container/Dockerfile000066400000000000000000000020121475465740700212610ustar00rootroot00000000000000FROM registry.access.redhat.com/ubi9/ubi:9.5 AS builder ARG VERSION RUN dnf -y update && \ dnf install -y golang make python3.12 python3.12-pip git RUN pip3.12 install wheel ADD source.tar.gz /source WORKDIR /source RUN make VERSION=${VERSION} FROM registry.access.redhat.com/ubi9/ubi:9.5 ARG VERSION LABEL license="ASL2" LABEL name="receptor" LABEL vendor="ansible" LABEL version="${VERSION}" COPY receptorctl-${VERSION}-py3-none-any.whl /tmp COPY receptor_python_worker-${VERSION}-py3-none-any.whl /tmp COPY receptor.conf /etc/receptor/receptor.conf RUN dnf -y update && \ dnf -y install python3.12-pip && \ dnf clean all && \ pip3.12 install --no-cache-dir wheel && \ pip3.12 install --no-cache-dir dumb-init && \ pip3.12 install --no-cache-dir /tmp/*.whl && \ rm /tmp/*.whl COPY --from=builder /source/receptor /usr/bin/receptor ENV RECEPTORCTL_SOCKET=/tmp/receptor.sock EXPOSE 7323 ENTRYPOINT ["/usr/local/bin/dumb-init", "--"] CMD ["/usr/bin/receptor", "-c", "/etc/receptor/receptor.conf"] receptor-1.5.3/packaging/container/receptor.conf000066400000000000000000000001551475465740700217670ustar00rootroot00000000000000--- - control-service: service: control filename: /tmp/receptor.sock - tcp-listener: port: 7323 receptor-1.5.3/packaging/tc-image/000077500000000000000000000000001475465740700170005ustar00rootroot00000000000000receptor-1.5.3/packaging/tc-image/Dockerfile000066400000000000000000000002721475465740700207730ustar00rootroot00000000000000FROM receptor:latest RUN dnf install tc -y ENTRYPOINT ["/bin/bash"] CMD ["-c", "/usr/bin/receptor --config /etc/receptor/receptor.conf > /etc/receptor/stdout 2> /etc/receptor/stderr"] receptor-1.5.3/pkg/000077500000000000000000000000001475465740700141475ustar00rootroot00000000000000receptor-1.5.3/pkg/backends/000077500000000000000000000000001475465740700157215ustar00rootroot00000000000000receptor-1.5.3/pkg/backends/cmdline.go000066400000000000000000000003161475465740700176630ustar00rootroot00000000000000package backends import "github.com/ghjm/cmdline" var backendSection = &cmdline.ConfigSection{ Description: "Commands to configure back-ends, which connect Receptor nodes together:", Order: 10, } receptor-1.5.3/pkg/backends/mock_backends/000077500000000000000000000000001475465740700205045ustar00rootroot00000000000000receptor-1.5.3/pkg/backends/mock_backends/websockets.go000066400000000000000000000321751475465740700232140ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/backends/websockets.go // // Generated by this command: // // mockgen -source=pkg/backends/websockets.go -destination=pkg/backends/mock_backends/websockets.go // // Package mock_backends is a generated GoMock package. package mock_backends import ( context "context" tls "crypto/tls" net "net" http "net/http" reflect "reflect" sync "sync" backends "github.com/ansible/receptor/pkg/backends" netceptor "github.com/ansible/receptor/pkg/netceptor" gomock "go.uber.org/mock/gomock" ) // MockGorillaWebsocketDialerForDialer is a mock of GorillaWebsocketDialerForDialer interface. type MockGorillaWebsocketDialerForDialer struct { ctrl *gomock.Controller recorder *MockGorillaWebsocketDialerForDialerMockRecorder isgomock struct{} } // MockGorillaWebsocketDialerForDialerMockRecorder is the mock recorder for MockGorillaWebsocketDialerForDialer. type MockGorillaWebsocketDialerForDialerMockRecorder struct { mock *MockGorillaWebsocketDialerForDialer } // NewMockGorillaWebsocketDialerForDialer creates a new mock instance. func NewMockGorillaWebsocketDialerForDialer(ctrl *gomock.Controller) *MockGorillaWebsocketDialerForDialer { mock := &MockGorillaWebsocketDialerForDialer{ctrl: ctrl} mock.recorder = &MockGorillaWebsocketDialerForDialerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockGorillaWebsocketDialerForDialer) EXPECT() *MockGorillaWebsocketDialerForDialerMockRecorder { return m.recorder } // DialContext mocks base method. func (m *MockGorillaWebsocketDialerForDialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (backends.Conner, *http.Response, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DialContext", ctx, urlStr, requestHeader) ret0, _ := ret[0].(backends.Conner) ret1, _ := ret[1].(*http.Response) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // DialContext indicates an expected call of DialContext. func (mr *MockGorillaWebsocketDialerForDialerMockRecorder) DialContext(ctx, urlStr, requestHeader any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DialContext", reflect.TypeOf((*MockGorillaWebsocketDialerForDialer)(nil).DialContext), ctx, urlStr, requestHeader) } // MockWebsocketListenerForWebsocket is a mock of WebsocketListenerForWebsocket interface. type MockWebsocketListenerForWebsocket struct { ctrl *gomock.Controller recorder *MockWebsocketListenerForWebsocketMockRecorder isgomock struct{} } // MockWebsocketListenerForWebsocketMockRecorder is the mock recorder for MockWebsocketListenerForWebsocket. type MockWebsocketListenerForWebsocketMockRecorder struct { mock *MockWebsocketListenerForWebsocket } // NewMockWebsocketListenerForWebsocket creates a new mock instance. func NewMockWebsocketListenerForWebsocket(ctrl *gomock.Controller) *MockWebsocketListenerForWebsocket { mock := &MockWebsocketListenerForWebsocket{ctrl: ctrl} mock.recorder = &MockWebsocketListenerForWebsocketMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockWebsocketListenerForWebsocket) EXPECT() *MockWebsocketListenerForWebsocketMockRecorder { return m.recorder } // Addr mocks base method. func (m *MockWebsocketListenerForWebsocket) Addr() net.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Addr") ret0, _ := ret[0].(net.Addr) return ret0 } // Addr indicates an expected call of Addr. func (mr *MockWebsocketListenerForWebsocketMockRecorder) Addr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Addr", reflect.TypeOf((*MockWebsocketListenerForWebsocket)(nil).Addr)) } // GetAddr mocks base method. func (m *MockWebsocketListenerForWebsocket) GetAddr() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetAddr") ret0, _ := ret[0].(string) return ret0 } // GetAddr indicates an expected call of GetAddr. func (mr *MockWebsocketListenerForWebsocketMockRecorder) GetAddr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAddr", reflect.TypeOf((*MockWebsocketListenerForWebsocket)(nil).GetAddr)) } // GetTLS mocks base method. func (m *MockWebsocketListenerForWebsocket) GetTLS() *tls.Config { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetTLS") ret0, _ := ret[0].(*tls.Config) return ret0 } // GetTLS indicates an expected call of GetTLS. func (mr *MockWebsocketListenerForWebsocketMockRecorder) GetTLS() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTLS", reflect.TypeOf((*MockWebsocketListenerForWebsocket)(nil).GetTLS)) } // Path mocks base method. func (m *MockWebsocketListenerForWebsocket) Path() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Path") ret0, _ := ret[0].(string) return ret0 } // Path indicates an expected call of Path. func (mr *MockWebsocketListenerForWebsocketMockRecorder) Path() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Path", reflect.TypeOf((*MockWebsocketListenerForWebsocket)(nil).Path)) } // SetPath mocks base method. func (m *MockWebsocketListenerForWebsocket) SetPath(path string) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetPath", path) } // SetPath indicates an expected call of SetPath. func (mr *MockWebsocketListenerForWebsocketMockRecorder) SetPath(path any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPath", reflect.TypeOf((*MockWebsocketListenerForWebsocket)(nil).SetPath), path) } // Start mocks base method. func (m *MockWebsocketListenerForWebsocket) Start(ctx context.Context, wg *sync.WaitGroup) (chan netceptor.BackendSession, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start", ctx, wg) ret0, _ := ret[0].(chan netceptor.BackendSession) ret1, _ := ret[1].(error) return ret0, ret1 } // Start indicates an expected call of Start. func (mr *MockWebsocketListenerForWebsocketMockRecorder) Start(ctx, wg any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockWebsocketListenerForWebsocket)(nil).Start), ctx, wg) } // MockGorillaWebsocketUpgraderForListener is a mock of GorillaWebsocketUpgraderForListener interface. type MockGorillaWebsocketUpgraderForListener struct { ctrl *gomock.Controller recorder *MockGorillaWebsocketUpgraderForListenerMockRecorder isgomock struct{} } // MockGorillaWebsocketUpgraderForListenerMockRecorder is the mock recorder for MockGorillaWebsocketUpgraderForListener. type MockGorillaWebsocketUpgraderForListenerMockRecorder struct { mock *MockGorillaWebsocketUpgraderForListener } // NewMockGorillaWebsocketUpgraderForListener creates a new mock instance. func NewMockGorillaWebsocketUpgraderForListener(ctrl *gomock.Controller) *MockGorillaWebsocketUpgraderForListener { mock := &MockGorillaWebsocketUpgraderForListener{ctrl: ctrl} mock.recorder = &MockGorillaWebsocketUpgraderForListenerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockGorillaWebsocketUpgraderForListener) EXPECT() *MockGorillaWebsocketUpgraderForListenerMockRecorder { return m.recorder } // Upgrade mocks base method. func (m *MockGorillaWebsocketUpgraderForListener) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (backends.Conner, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Upgrade", w, r, responseHeader) ret0, _ := ret[0].(backends.Conner) ret1, _ := ret[1].(error) return ret0, ret1 } // Upgrade indicates an expected call of Upgrade. func (mr *MockGorillaWebsocketUpgraderForListenerMockRecorder) Upgrade(w, r, responseHeader any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upgrade", reflect.TypeOf((*MockGorillaWebsocketUpgraderForListener)(nil).Upgrade), w, r, responseHeader) } // MockHTTPServerForListener is a mock of HTTPServerForListener interface. type MockHTTPServerForListener struct { ctrl *gomock.Controller recorder *MockHTTPServerForListenerMockRecorder isgomock struct{} } // MockHTTPServerForListenerMockRecorder is the mock recorder for MockHTTPServerForListener. type MockHTTPServerForListenerMockRecorder struct { mock *MockHTTPServerForListener } // NewMockHTTPServerForListener creates a new mock instance. func NewMockHTTPServerForListener(ctrl *gomock.Controller) *MockHTTPServerForListener { mock := &MockHTTPServerForListener{ctrl: ctrl} mock.recorder = &MockHTTPServerForListenerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockHTTPServerForListener) EXPECT() *MockHTTPServerForListenerMockRecorder { return m.recorder } // Close mocks base method. func (m *MockHTTPServerForListener) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockHTTPServerForListenerMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockHTTPServerForListener)(nil).Close)) } // Serve mocks base method. func (m *MockHTTPServerForListener) Serve(l net.Listener) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Serve", l) ret0, _ := ret[0].(error) return ret0 } // Serve indicates an expected call of Serve. func (mr *MockHTTPServerForListenerMockRecorder) Serve(l any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Serve", reflect.TypeOf((*MockHTTPServerForListener)(nil).Serve), l) } // ServeTLS mocks base method. func (m *MockHTTPServerForListener) ServeTLS(l net.Listener, certFile, keyFile string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ServeTLS", l, certFile, keyFile) ret0, _ := ret[0].(error) return ret0 } // ServeTLS indicates an expected call of ServeTLS. func (mr *MockHTTPServerForListenerMockRecorder) ServeTLS(l, certFile, keyFile any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ServeTLS", reflect.TypeOf((*MockHTTPServerForListener)(nil).ServeTLS), l, certFile, keyFile) } // SetHandeler mocks base method. func (m *MockHTTPServerForListener) SetHandeler(mux *http.ServeMux) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetHandeler", mux) } // SetHandeler indicates an expected call of SetHandeler. func (mr *MockHTTPServerForListenerMockRecorder) SetHandeler(mux any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHandeler", reflect.TypeOf((*MockHTTPServerForListener)(nil).SetHandeler), mux) } // SetTLSConfig mocks base method. func (m *MockHTTPServerForListener) SetTLSConfig(tlscfg *tls.Config) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetTLSConfig", tlscfg) } // SetTLSConfig indicates an expected call of SetTLSConfig. func (mr *MockHTTPServerForListenerMockRecorder) SetTLSConfig(tlscfg any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTLSConfig", reflect.TypeOf((*MockHTTPServerForListener)(nil).SetTLSConfig), tlscfg) } // MockConner is a mock of Conner interface. type MockConner struct { ctrl *gomock.Controller recorder *MockConnerMockRecorder isgomock struct{} } // MockConnerMockRecorder is the mock recorder for MockConner. type MockConnerMockRecorder struct { mock *MockConner } // NewMockConner creates a new mock instance. func NewMockConner(ctrl *gomock.Controller) *MockConner { mock := &MockConner{ctrl: ctrl} mock.recorder = &MockConnerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockConner) EXPECT() *MockConnerMockRecorder { return m.recorder } // Close mocks base method. func (m *MockConner) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockConnerMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockConner)(nil).Close)) } // ReadMessage mocks base method. func (m *MockConner) ReadMessage() (int, []byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadMessage") ret0, _ := ret[0].(int) ret1, _ := ret[1].([]byte) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // ReadMessage indicates an expected call of ReadMessage. func (mr *MockConnerMockRecorder) ReadMessage() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadMessage", reflect.TypeOf((*MockConner)(nil).ReadMessage)) } // WriteMessage mocks base method. func (m *MockConner) WriteMessage(messageType int, data []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteMessage", messageType, data) ret0, _ := ret[0].(error) return ret0 } // WriteMessage indicates an expected call of WriteMessage. func (mr *MockConnerMockRecorder) WriteMessage(messageType, data any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteMessage", reflect.TypeOf((*MockConner)(nil).WriteMessage), messageType, data) } receptor-1.5.3/pkg/backends/null.go000066400000000000000000000014541475465740700172260ustar00rootroot00000000000000package backends import ( "context" "crypto/tls" "sync" "github.com/ansible/receptor/pkg/netceptor" ) type NullBackendCfg struct { Local bool } // make the nullBackendCfg object be usable as a do-nothing Backend. func (cfg NullBackendCfg) Start(_ context.Context, _ *sync.WaitGroup) (chan netceptor.BackendSession, error) { return make(chan netceptor.BackendSession), nil } // Run runs the action, in this case adding a null backend to keep the wait group alive. func (cfg NullBackendCfg) Run() error { err := netceptor.MainInstance.AddBackend(&NullBackendCfg{}) if err != nil { return err } return nil } func (cfg *NullBackendCfg) GetAddr() string { return "" } func (cfg *NullBackendCfg) GetTLS() *tls.Config { return nil } func (cfg NullBackendCfg) Reload() error { return cfg.Run() } receptor-1.5.3/pkg/backends/null_test.go000066400000000000000000000071541475465740700202700ustar00rootroot00000000000000package backends_test import ( "context" "crypto/tls" "reflect" "sync" "testing" "github.com/ansible/receptor/pkg/backends" "github.com/ansible/receptor/pkg/netceptor" ) func TestNullBackendCfgGetAddr(t *testing.T) { type fields struct { Local bool } tests := []struct { name string fields fields want string }{ { name: "Positive", fields: fields{ Local: true, }, want: "", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := &backends.NullBackendCfg{ Local: tt.fields.Local, } if got := cfg.GetAddr(); got != tt.want { t.Errorf("NullBackendCfg.GetAddr() = %v, want %v", got, tt.want) } }) } } func TestNullBackendCfg_Start(t *testing.T) { type fields struct { Local bool } type args struct { in0 context.Context in1 *sync.WaitGroup } tests := []struct { name string fields fields args args wantKind reflect.Kind wantType string wantErr bool }{ { name: "Positive", fields: fields{ Local: true, }, args: args{ in0: nil, in1: nil, }, wantKind: reflect.Chan, wantType: "netceptor.BackendSession", wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := backends.NullBackendCfg{ Local: tt.fields.Local, } got, err := cfg.Start(tt.args.in0, tt.args.in1) defer close(got) if (err != nil) != tt.wantErr { t.Errorf("%s: NullBackendCfg.Start() error = %+v, wantErr %+v", tt.name, err, tt.wantErr) return } if reflect.ValueOf(got).Kind() == tt.wantKind { if reflect.ValueOf(got).Type().Elem().String() != tt.wantType { t.Errorf("%s: NullBackendCfg.Start() gotType = %+v, wantType = %+v", tt.name, reflect.ValueOf(got).Type().Elem(), tt.wantType) } } else { t.Errorf("%s: NullBackendCfg.Start() gotKind = %+v, wantKind = %+v", tt.name, reflect.ValueOf(got).Kind(), tt.wantKind) } }) } } func TestNullBackendCfgGetTLS(t *testing.T) { type fields struct { Local bool } tests := []struct { name string fields fields want *tls.Config }{ { name: "Positive", fields: fields{ Local: true, }, want: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := &backends.NullBackendCfg{ Local: tt.fields.Local, } if got := cfg.GetTLS(); !reflect.DeepEqual(got, tt.want) { t.Errorf("NullBackendCfg.GetTLS() = %v, want %v", got, tt.want) } }) } } func TestNullBackendRun(t *testing.T) { type fields struct { Local bool } tests := []struct { name string fields fields want error }{ { name: "Positive", fields: fields{ Local: true, }, want: nil, }, } netceptor.MainInstance = netceptor.New(context.Background(), "null_backends_test") for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := &backends.NullBackendCfg{ Local: tt.fields.Local, } if got := cfg.Run(); !reflect.DeepEqual(got, tt.want) { t.Errorf("NullBackendCfg.Run() = %v, want %v", got, tt.want) } }) } } func TestNullBackendReload(t *testing.T) { type fields struct { Local bool } tests := []struct { name string fields fields want error }{ { name: "Positive", fields: fields{ Local: true, }, want: nil, }, } netceptor.MainInstance = netceptor.New(context.Background(), "null_backends_test_reload") for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := &backends.NullBackendCfg{ Local: tt.fields.Local, } if got := cfg.Reload(); !reflect.DeepEqual(got, tt.want) { t.Errorf("NullBackendCfg.Run() = %v, want %v", got, tt.want) } }) } } receptor-1.5.3/pkg/backends/tcp.go000066400000000000000000000224041475465740700170400ustar00rootroot00000000000000package backends import ( "context" "crypto/tls" "fmt" "io" "net" "sync" "time" "github.com/ansible/receptor/pkg/framer" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/utils" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) // TCPDialer implements Backend for outbound TCP. type TCPDialer struct { address string redial bool tls *tls.Config logger *logger.ReceptorLogger } // NewTCPDialer instantiates a new TCP backend. func NewTCPDialer(address string, redial bool, tls *tls.Config, logger *logger.ReceptorLogger) (*TCPDialer, error) { td := TCPDialer{ address: address, redial: redial, tls: tls, logger: logger, } return &td, nil } func (b *TCPDialer) GetAddr() string { return b.address } func (b *TCPDialer) GetTLS() *tls.Config { return b.tls } // Start runs the given session function over this backend service. func (b *TCPDialer) Start(ctx context.Context, wg *sync.WaitGroup) (chan netceptor.BackendSession, error) { return dialerSession(ctx, wg, b.redial, 5*time.Second, b.logger, func(closeChan chan struct{}) (netceptor.BackendSession, error) { var conn net.Conn var err error dialer := &net.Dialer{} if b.tls == nil { conn, err = dialer.DialContext(ctx, "tcp", b.address) } else { dialer.Timeout = 15 * time.Second // tls library does not have a DialContext equivalent conn, err = tls.DialWithDialer(dialer, "tcp", b.address, b.tls) } if err != nil { return nil, err } return newTCPSession(conn, closeChan), nil }) } // TCPListener implements Backend for inbound TCP. type TCPListener struct { address string TLS *tls.Config li net.Listener innerLi *net.TCPListener logger *logger.ReceptorLogger } // NewTCPListener instantiates a new TCPListener backend. func NewTCPListener(address string, tls *tls.Config, logger *logger.ReceptorLogger) (*TCPListener, error) { tl := TCPListener{ address: address, TLS: tls, li: nil, logger: logger, } return &tl, nil } // Addr returns the network address the listener is listening on. func (b *TCPListener) GetAddr() string { return b.li.Addr().String() } func (b *TCPListener) GetCost() string { return b.li.Addr().String() } func (b *TCPListener) GetTLS() *tls.Config { return b.TLS } // Start runs the given session function over the TCPListener backend. func (b *TCPListener) Start(ctx context.Context, wg *sync.WaitGroup) (chan netceptor.BackendSession, error) { sessChan, err := listenerSession(ctx, wg, b.logger, func() error { var err error lc := net.ListenConfig{} li, err := lc.Listen(ctx, "tcp", b.address) if err != nil { return err } var ok bool tli, ok := li.(*net.TCPListener) if !ok { return fmt.Errorf("listen returned a non-TCP listener") } if b.TLS == nil { b.li = li b.innerLi = tli } else { tlsLi := tls.NewListener(tli, b.TLS) b.li = tlsLi b.innerLi = tli } return nil }, func() (netceptor.BackendSession, error) { var c net.Conn for { err := b.innerLi.SetDeadline(time.Now().Add(1 * time.Second)) if err != nil { return nil, err } c, err = b.li.Accept() select { case <-ctx.Done(): return nil, io.EOF default: } if nerr, ok := err.(net.Error); ok && nerr.Timeout() { continue } if err != nil { return nil, err } break } return newTCPSession(c, nil), nil }, func() { _ = b.li.Close() }) if err == nil { b.logger.Debug("Listening on TCP %s\n", b.GetAddr()) } return sessChan, err } // TCPSession implements BackendSession for TCP backend. type TCPSession struct { conn net.Conn framer framer.Framer closeChan chan struct{} closeChanCloser sync.Once } // newTCPSession allocates a new TCPSession. func newTCPSession(conn net.Conn, closeChan chan struct{}) *TCPSession { ts := &TCPSession{ conn: conn, framer: framer.New(), closeChan: closeChan, closeChanCloser: sync.Once{}, } return ts } // Send sends data over the session. func (ns *TCPSession) Send(data []byte) error { buf := ns.framer.SendData(data) n, err := ns.conn.Write(buf) if err != nil { return err } if n != len(buf) { return fmt.Errorf("partial data sent") } return nil } // Recv receives data via the session. func (ns *TCPSession) Recv(timeout time.Duration) ([]byte, error) { buf := make([]byte, utils.NormalBufferSize) for { if ns.framer.MessageReady() { break } err := ns.conn.SetReadDeadline(time.Now().Add(timeout)) if err != nil { return nil, err } n, err := ns.conn.Read(buf) if nerr, ok := err.(net.Error); ok && nerr.Timeout() { return nil, netceptor.ErrTimeout } if err != nil { return nil, err } ns.framer.RecvData(buf[:n]) } buf, err := ns.framer.GetMessage() if err != nil { return nil, err } return buf, nil } // Close closes the session. func (ns *TCPSession) Close() error { if ns.closeChan != nil { ns.closeChanCloser.Do(func() { close(ns.closeChan) ns.closeChan = nil }) } return ns.conn.Close() } // ************************************************************************** // Command line // ************************************************************************** // TODO make these fields private // TCPListenerCfg is the cmdline configuration object for a TCP listener. type TCPListenerCfg struct { BindAddr string `description:"Local address to bind to" default:"0.0.0.0"` Port int `description:"Local TCP port to listen on" barevalue:"yes" required:"yes"` TLS string `description:"Name of TLS server config"` Cost float64 `description:"Connection cost (weight)" default:"1.0"` NodeCost map[string]float64 `description:"Per-node costs"` AllowedPeers []string `description:"Peer node IDs to allow via this connection"` } func (cfg TCPListenerCfg) GetCost() float64 { return cfg.Cost } func (cfg TCPListenerCfg) GetNodeCost() map[string]float64 { return cfg.NodeCost } func (cfg TCPListenerCfg) GetAddr() string { return cfg.BindAddr } func (cfg TCPListenerCfg) GetTLS() string { return cfg.TLS } // Prepare verifies the parameters are correct. func (cfg TCPListenerCfg) Prepare() error { if cfg.Cost <= 0.0 { return fmt.Errorf("connection cost must be positive") } for node, cost := range cfg.NodeCost { if cost <= 0.0 { return fmt.Errorf("connection cost must be positive for %s", node) } } return nil } // Run runs the action. func (cfg TCPListenerCfg) Run() error { address := fmt.Sprintf("%s:%d", cfg.BindAddr, cfg.Port) tlscfg, err := netceptor.MainInstance.GetServerTLSConfig(cfg.TLS) if err != nil { return err } b, err := NewTCPListener(address, tlscfg, netceptor.MainInstance.Logger) if err != nil { netceptor.MainInstance.Logger.Error("Error creating listener %s: %s\n", address, err) return err } err = netceptor.MainInstance.AddBackend(b, netceptor.BackendConnectionCost(cfg.Cost), netceptor.BackendNodeCost(cfg.NodeCost), netceptor.BackendAllowedPeers(cfg.AllowedPeers)) if err != nil { return err } return nil } // TODO make these fields private // TCPDialerCfg is the cmdline configuration object for a TCP dialer. type TCPDialerCfg struct { Address string `description:"Remote address (Host:Port) to connect to" barevalue:"yes" required:"yes"` Redial bool `description:"Keep redialing on lost connection" default:"true"` TLS string `description:"Name of TLS client config"` Cost float64 `description:"Connection cost (weight)" default:"1.0"` AllowedPeers []string `description:"Peer node IDs to allow via this connection"` } // Prepare verifies the parameters are correct. func (cfg TCPDialerCfg) Prepare() error { if cfg.Cost <= 0.0 { return fmt.Errorf("connection cost must be positive") } return nil } // Run runs the action. func (cfg TCPDialerCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running TCP peer connection %s\n", cfg.Address) host, _, err := net.SplitHostPort(cfg.Address) if err != nil { return err } tlscfg, err := netceptor.MainInstance.GetClientTLSConfig(cfg.TLS, host, netceptor.ExpectedHostnameTypeDNS) if err != nil { return err } b, err := NewTCPDialer(cfg.Address, cfg.Redial, tlscfg, netceptor.MainInstance.Logger) if err != nil { netceptor.MainInstance.Logger.Error("Error creating peer %s: %s\n", cfg.Address, err) return err } err = netceptor.MainInstance.AddBackend(b, netceptor.BackendConnectionCost(cfg.Cost), netceptor.BackendAllowedPeers(cfg.AllowedPeers)) if err != nil { return err } return nil } func (cfg TCPDialerCfg) PreReload() error { return cfg.Prepare() } func (cfg TCPListenerCfg) PreReload() error { return cfg.Prepare() } func (cfg TCPDialerCfg) Reload() error { return cfg.Run() } func (cfg TCPListenerCfg) Reload() error { return cfg.Run() } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-backends", "tcp-listener", "Run a backend listener on a TCP port", TCPListenerCfg{}, cmdline.Section(backendSection)) cmdline.RegisterConfigTypeForApp("receptor-backends", "tcp-peer", "Make an outbound backend connection to a TCP peer", TCPDialerCfg{}, cmdline.Section(backendSection)) } receptor-1.5.3/pkg/backends/tcp_test.go000066400000000000000000000064151475465740700201030ustar00rootroot00000000000000package backends import ( "context" "crypto/tls" "net" "reflect" "sync" "testing" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" ) func TestNewTCPListener(t *testing.T) { type args struct { address string tls *tls.Config logger *logger.ReceptorLogger } tests := []struct { name string args args want *TCPListener wantErr bool }{ { name: "Positive", args: args{ address: "127.0.0.1:9999", tls: nil, logger: nil, }, want: &TCPListener{ address: "127.0.0.1:9999", TLS: nil, li: nil, innerLi: nil, logger: nil, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NewTCPListener(tt.args.address, tt.args.tls, tt.args.logger) if (err != nil) != tt.wantErr { t.Errorf("NewTCPListener() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("NewTCPListener() = %+v, want %+v", got, tt.want) } }) } } func TestTCPListenerStart(t *testing.T) { type fields struct { address string TLS *tls.Config li net.Listener innerLi *net.TCPListener logger *logger.ReceptorLogger } type args struct { ctx context.Context wg *sync.WaitGroup } tests := []struct { name string fields fields args args want chan netceptor.BackendSession wantErr bool }{ { name: "Positive", fields: fields{ address: "127.0.0.1:9998", TLS: nil, li: nil, innerLi: nil, logger: logger.NewReceptorLogger("TCPtest"), }, args: args{ ctx: context.Background(), wg: &sync.WaitGroup{}, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := &TCPListener{ address: tt.fields.address, TLS: tt.fields.TLS, li: tt.fields.li, innerLi: tt.fields.innerLi, logger: tt.fields.logger, } got, err := b.Start(tt.args.ctx, tt.args.wg) if (err != nil) != tt.wantErr { t.Errorf("TCPListener.Start() error = %+v, wantErr %+v", err, tt.wantErr) return } if got == nil { t.Errorf("TCPListener.Start() returned nil") } }) } } func TestTCPDialerStart(t *testing.T) { type fields struct { address string redial bool tls *tls.Config logger *logger.ReceptorLogger } type args struct { ctx context.Context wg *sync.WaitGroup } tests := []struct { name string fields fields args args want chan netceptor.BackendSession wantErr bool }{ { name: "Positive", fields: fields{ address: "127.0.0.1:9998", redial: true, tls: nil, logger: logger.NewReceptorLogger("TCPtest"), }, args: args{ ctx: context.Background(), wg: &sync.WaitGroup{}, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := &TCPDialer{ address: tt.fields.address, redial: tt.fields.redial, tls: tt.fields.tls, logger: tt.fields.logger, } got, err := b.Start(tt.args.ctx, tt.args.wg) if (err != nil) != tt.wantErr { t.Errorf("TCPDialer.Start() error = %+v, wantErr %+v", err, tt.wantErr) return } if got == nil { t.Errorf("TCPDialer.Start() got = nil") } }) } } receptor-1.5.3/pkg/backends/udp.go000066400000000000000000000242531475465740700170460ustar00rootroot00000000000000package backends import ( "context" "crypto/tls" "fmt" "net" "sync" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/utils" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) // UDPMaxPacketLen is the maximum size of a message that can be sent over UDP. const UDPMaxPacketLen = 65507 // UDPDialer implements Backend for outbound UDP. type UDPDialer struct { address string redial bool logger *logger.ReceptorLogger } func (b *UDPDialer) GetAddr() string { return b.address } func (b *UDPDialer) GetTLS() *tls.Config { return nil } // NewUDPDialer instantiates a new UDPDialer backend. func NewUDPDialer(address string, redial bool, logger *logger.ReceptorLogger) (*UDPDialer, error) { _, err := net.ResolveUDPAddr("udp", address) if err != nil { return nil, err } nd := UDPDialer{ address: address, redial: redial, logger: logger, } return &nd, nil } // Start runs the given session function over this backend service. func (b *UDPDialer) Start(ctx context.Context, wg *sync.WaitGroup) (chan netceptor.BackendSession, error) { return dialerSession(ctx, wg, b.redial, 5*time.Second, b.logger, func(closeChan chan struct{}) (netceptor.BackendSession, error) { dialer := net.Dialer{} conn, err := dialer.DialContext(ctx, "udp", b.address) if err != nil { return nil, err } udpconn, ok := conn.(*net.UDPConn) if !ok { return nil, fmt.Errorf("DialContext returned a non-UDP connection") } ns := &UDPDialerSession{ conn: udpconn, closeChan: closeChan, closeChanCloser: sync.Once{}, } return ns, nil }) } // UDPDialerSession implements BackendSession for UDPDialer. type UDPDialerSession struct { conn *net.UDPConn closeChan chan struct{} closeChanCloser sync.Once } // Send sends data over the session. func (ns *UDPDialerSession) Send(data []byte) error { if len(data) > UDPMaxPacketLen { return fmt.Errorf("data too large") } n, err := ns.conn.Write(data) if err != nil { return err } if n != len(data) { return fmt.Errorf("partial data sent") } return nil } // Recv receives data via the session. func (ns *UDPDialerSession) Recv(timeout time.Duration) ([]byte, error) { err := ns.conn.SetReadDeadline(time.Now().Add(timeout)) if err != nil { return nil, err } buf := make([]byte, utils.NormalBufferSize) n, err := ns.conn.Read(buf) if nerr, ok := err.(net.Error); ok && nerr.Timeout() { return nil, netceptor.ErrTimeout } if err != nil { return nil, err } return buf[:n], nil } // Close closes the session. func (ns *UDPDialerSession) Close() error { if ns.closeChan != nil { ns.closeChanCloser.Do(func() { close(ns.closeChan) ns.closeChan = nil }) } return ns.conn.Close() } // UDPListener implements Backend for inbound UDP. type UDPListener struct { laddr *net.UDPAddr conn *net.UDPConn sessChan chan *UDPListenerSession sessRegLock sync.RWMutex sessionRegistry map[string]*UDPListenerSession logger *logger.ReceptorLogger } func (b *UDPListener) GetAddr() string { return b.LocalAddr().String() } func (b *UDPListener) GetTLS() *tls.Config { return &tls.Config{} } // NewUDPListener instantiates a new UDPListener backend. func NewUDPListener(address string, logger *logger.ReceptorLogger) (*UDPListener, error) { addr, err := net.ResolveUDPAddr("udp", address) if err != nil { return nil, err } uc, err := net.ListenUDP("udp", addr) if err != nil { return nil, err } ul := UDPListener{ laddr: addr, conn: uc, sessChan: make(chan *UDPListenerSession), sessRegLock: sync.RWMutex{}, sessionRegistry: make(map[string]*UDPListenerSession), logger: logger, } return &ul, nil } // LocalAddr returns the local address the listener is listening on. func (b *UDPListener) LocalAddr() net.Addr { if b.conn == nil { return nil } return b.conn.LocalAddr() } // Start runs the given session function over the UDPListener backend. func (b *UDPListener) Start(ctx context.Context, wg *sync.WaitGroup) (chan netceptor.BackendSession, error) { sessChan := make(chan netceptor.BackendSession) wg.Add(1) go func() { defer wg.Done() buf := make([]byte, utils.NormalBufferSize) for { select { case <-ctx.Done(): _ = b.conn.Close() return default: } err := b.conn.SetReadDeadline(time.Now().Add(1 * time.Second)) if err != nil { b.logger.Error("Error setting UDP timeout: %s\n", err) return } n, addr, err := b.conn.ReadFromUDP(buf) if ne, ok := err.(net.Error); ok && ne.Timeout() { continue } if err != nil { b.logger.Error("UDP read error: %s\n", err) return } data := make([]byte, n) copy(data, buf) addrStr := addr.String() b.sessRegLock.RLock() sess, ok := b.sessionRegistry[addrStr] b.sessRegLock.RUnlock() if !ok { b.sessRegLock.Lock() sess = &UDPListenerSession{ li: b, raddr: addr, recvChan: make(chan []byte), } b.sessionRegistry[addrStr] = sess b.sessRegLock.Unlock() select { case <-ctx.Done(): _ = b.conn.Close() return case sessChan <- sess: } } select { case <-ctx.Done(): _ = b.conn.Close() return case sess.recvChan <- data: } } }() if b.conn != nil { b.logger.Debug("Listening on UDP %s\n", b.LocalAddr().String()) } return sessChan, nil } // UDPListenerSession implements BackendSession for UDPListener. type UDPListenerSession struct { li *UDPListener raddr *net.UDPAddr recvChan chan []byte } // Send sends data over the session. func (ns *UDPListenerSession) Send(data []byte) error { n, err := ns.li.conn.WriteToUDP(data, ns.raddr) if err != nil { return err } else if n != len(data) { return fmt.Errorf("partial data sent") } return nil } // Recv receives data from the session. func (ns *UDPListenerSession) Recv(timeout time.Duration) ([]byte, error) { select { case data := <-ns.recvChan: return data, nil case <-time.After(timeout): return nil, netceptor.ErrTimeout } } // Close closes the session. func (ns *UDPListenerSession) Close() error { ns.li.sessRegLock.Lock() defer ns.li.sessRegLock.Unlock() delete(ns.li.sessionRegistry, ns.raddr.String()) return nil } // ************************************************************************** // Command line // ************************************************************************** // TODO make these fields private // UDPListenerCfg is the cmdline configuration object for a UDP listener. type UDPListenerCfg struct { BindAddr string `description:"Local address to bind to" default:"0.0.0.0"` Port int `description:"Local UDP port to listen on" barevalue:"yes" required:"yes"` Cost float64 `description:"Connection cost (weight)" default:"1.0"` NodeCost map[string]float64 `description:"Per-node costs"` AllowedPeers []string `description:"Peer node IDs to allow via this connection"` } func (cfg UDPListenerCfg) GetCost() float64 { return cfg.Cost } func (cfg UDPListenerCfg) GetNodeCost() map[string]float64 { return cfg.NodeCost } func (cfg UDPListenerCfg) GetAddr() string { return cfg.BindAddr } func (cfg UDPListenerCfg) GetPort() int { return cfg.Port } func (cfg UDPListenerCfg) GetTLS() string { return "" } // Prepare verifies the parameters are correct. func (cfg UDPListenerCfg) Prepare() error { if cfg.Cost <= 0.0 { return fmt.Errorf("connection cost must be positive") } for node, cost := range cfg.NodeCost { if cost <= 0.0 { return fmt.Errorf("connection cost must be positive for %s", node) } } return nil } // Run runs the action. func (cfg UDPListenerCfg) Run() error { address := fmt.Sprintf("%s:%d", cfg.BindAddr, cfg.Port) b, err := NewUDPListener(address, netceptor.MainInstance.Logger) if err != nil { netceptor.MainInstance.Logger.Error("Error creating listener %s: %s\n", address, err) return err } err = netceptor.MainInstance.AddBackend(b, netceptor.BackendConnectionCost(cfg.Cost), netceptor.BackendNodeCost(cfg.NodeCost), netceptor.BackendAllowedPeers(cfg.AllowedPeers)) if err != nil { netceptor.MainInstance.Logger.Error("Error creating backend for %s: %s\n", address, err) return err } return nil } // udpDialerCfg is the cmdline configuration object for a UDP listener. type UDPDialerCfg struct { Address string `description:"Host:Port to connect to" barevalue:"yes" required:"yes"` Redial bool `description:"Keep redialing on lost connection" default:"true"` Cost float64 `description:"Connection cost (weight)" default:"1.0"` AllowedPeers []string `description:"Peer node IDs to allow via this connection"` } // Prepare verifies the parameters are correct. func (cfg UDPDialerCfg) Prepare() error { if cfg.Cost <= 0.0 { return fmt.Errorf("connection cost must be positive") } return nil } // Run runs the action. func (cfg UDPDialerCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running UDP peer connection %s\n", cfg.Address) b, err := NewUDPDialer(cfg.Address, cfg.Redial, netceptor.MainInstance.Logger) if err != nil { netceptor.MainInstance.Logger.Error("Error creating peer %s: %s\n", cfg.Address, err) return err } err = netceptor.MainInstance.AddBackend(b, netceptor.BackendConnectionCost(cfg.Cost), netceptor.BackendAllowedPeers(cfg.AllowedPeers)) if err != nil { netceptor.MainInstance.Logger.Error("Error creating backend for %s: %s\n", cfg.Address, err) return err } return nil } func (cfg UDPDialerCfg) PreReload() error { return cfg.Prepare() } func (cfg UDPListenerCfg) PreReload() error { return cfg.Prepare() } func (cfg UDPDialerCfg) Reload() error { return cfg.Run() } func (cfg UDPListenerCfg) Reload() error { return cfg.Run() } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-backends", "UDP-listener", "Run a backend listener on a UDP port", UDPListenerCfg{}, cmdline.Section(backendSection)) cmdline.RegisterConfigTypeForApp("receptor-backends", "UDP-peer", "Make an outbound backend connection to a UDP peer", UDPDialerCfg{}, cmdline.Section(backendSection)) } receptor-1.5.3/pkg/backends/udp_test.go000066400000000000000000000103241475465740700200770ustar00rootroot00000000000000package backends import ( "context" "net" "sync" "testing" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" ) func TestNewUDPListener(t *testing.T) { type args struct { address string logger *logger.ReceptorLogger } tests := []struct { name string args args want *UDPListener wantErr bool }{ { name: "Positive", args: args{ address: "127.0.0.1:9997", logger: logger.NewReceptorLogger("UDPtest"), }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NewUDPListener(tt.args.address, tt.args.logger) if (err != nil) != tt.wantErr { t.Errorf("NewUDPListener() error = %+v, wantErr %+v", err, tt.wantErr) return } if got == nil { t.Errorf("NewUDPListener(): want UDP Listener, got nil") } }) } } func TestUDPListenerStart(t *testing.T) { type fields struct { laddr *net.UDPAddr conn *net.UDPConn sessChan chan *UDPListenerSession sessionRegistry map[string]*UDPListenerSession logger *logger.ReceptorLogger } type args struct { ctx context.Context wg *sync.WaitGroup } tests := []struct { name string fields fields args args want chan netceptor.BackendSession wantErr bool }{ { name: "Positive", fields: fields{ laddr: &net.UDPAddr{ IP: net.IPv4(127, 0, 0, 1), Port: 9999, Zone: "", }, conn: &net.UDPConn{}, sessChan: make(chan *UDPListenerSession), sessionRegistry: make(map[string]*UDPListenerSession), logger: logger.NewReceptorLogger("UDPtest"), }, args: args{ ctx: context.Background(), wg: &sync.WaitGroup{}, }, want: make(chan netceptor.BackendSession), wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { uc, err := net.ListenUDP("udp", tt.fields.laddr) if err != nil { t.Errorf("ListenUDP error = %+v", err) } b := &UDPListener{ laddr: tt.fields.laddr, conn: uc, sessChan: tt.fields.sessChan, sessRegLock: sync.RWMutex{}, sessionRegistry: tt.fields.sessionRegistry, logger: tt.fields.logger, } got, err := b.Start(tt.args.ctx, tt.args.wg) if (err != nil) != tt.wantErr { t.Errorf("UDPListener.Start() error = %+v, wantErr %+v", err, tt.wantErr) return } if got == nil { t.Errorf("UDPListener.Start() returned nil") } }) } } func TestUDPDialerStart(t *testing.T) { type fields struct { address string redial bool logger *logger.ReceptorLogger } type args struct { ctx context.Context wg *sync.WaitGroup } tests := []struct { name string fields fields args args want chan netceptor.BackendSession wantErr bool }{ { name: "Positive", fields: fields{ address: "127.0.0.1:9998", redial: true, logger: logger.NewReceptorLogger("UDPtest"), }, args: args{ ctx: context.Background(), wg: &sync.WaitGroup{}, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := &UDPDialer{ address: tt.fields.address, redial: tt.fields.redial, logger: tt.fields.logger, } got, err := b.Start(tt.args.ctx, tt.args.wg) if (err != nil) != tt.wantErr { t.Errorf("UDPDialer.Start() error = %+v, wantErr %+v", err, tt.wantErr) return } if got == nil { t.Errorf("UDPDialer.Start() returned nil") } }) } } func TestNewUDPDialer(t *testing.T) { type args struct { address string redial bool logger *logger.ReceptorLogger } tests := []struct { name string args args want *UDPDialer wantErr bool }{ { name: "Positive", args: args{ address: "127.0.0.1:9995", redial: true, logger: logger.NewReceptorLogger("UDPtest"), }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NewUDPDialer(tt.args.address, tt.args.redial, tt.args.logger) if (err != nil) != tt.wantErr { t.Errorf("NewUDPDialer() error = %+v, wantErr %+v", err, tt.wantErr) return } if got == nil { t.Errorf("NewUDPDialer() returned nil") } }) } } receptor-1.5.3/pkg/backends/utils.go000066400000000000000000000047701475465740700174200ustar00rootroot00000000000000package backends import ( "context" "sync" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/utils" ) const ( maxRedialDelay = 20 * time.Second ) type dialerFunc func(chan struct{}) (netceptor.BackendSession, error) // dialerSession is a convenience function for backends that use dial/retry logic. func dialerSession( ctx context.Context, wg *sync.WaitGroup, redial bool, redialDelay time.Duration, logger *logger.ReceptorLogger, df dialerFunc, ) (chan netceptor.BackendSession, error) { sessChan := make(chan netceptor.BackendSession) wg.Add(1) go func() { defer func() { wg.Done() close(sessChan) }() redialDelayInc := utils.NewIncrementalDuration(redialDelay, maxRedialDelay, 1.5) for { closeChan := make(chan struct{}) sess, err := df(closeChan) if err == nil { redialDelayInc.Reset() select { case sessChan <- sess: // continue case <-ctx.Done(): return } select { case <-closeChan: // continue case <-ctx.Done(): return } } if redial && ctx.Err() == nil { if err != nil { logger.Warning("Backend connection failed (will retry): %s\n", err) } else { logger.Warning("Backend connection exited (will retry)\n") } select { case <-redialDelayInc.NextTimeout(): continue case <-ctx.Done(): return } } else { if err != nil { logger.Error("Backend connection failed: %s\n", err) } else if ctx.Err() != nil { logger.Error("Backend connection exited\n") } return } } }() return sessChan, nil } type ( listenFunc func() error acceptFunc func() (netceptor.BackendSession, error) listenerCancelFunc func() ) // listenerSession is a convenience function for backends that use listen/accept logic. func listenerSession( ctx context.Context, wg *sync.WaitGroup, logger *logger.ReceptorLogger, lf listenFunc, af acceptFunc, lcf listenerCancelFunc, ) (chan netceptor.BackendSession, error) { if err := lf(); err != nil { return nil, err } sessChan := make(chan netceptor.BackendSession) wg.Add(1) go func() { defer func() { wg.Done() lcf() close(sessChan) }() for { c, err := af() select { case <-ctx.Done(): return default: } if err != nil { logger.Error("Error accepting connection: %s\n", err) return } select { case sessChan <- c: case <-ctx.Done(): return } } }() return sessChan, nil } receptor-1.5.3/pkg/backends/websocket_interop_test.go000066400000000000000000000076361475465740700230510ustar00rootroot00000000000000package backends import ( "context" "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "math/big" "net" "net/http" "testing" "time" "github.com/ansible/receptor/pkg/netceptor" "github.com/gorilla/websocket" ) // This test verifies that a websockets backend client can connect to an // external backend running the websockets protocol. func TestWebsocketExternalInterop(t *testing.T) { // Create a Netceptor with an external backend n1 := netceptor.New(context.Background(), "node1") b1, err := netceptor.NewExternalBackend() if err != nil { t.Fatal(err) } err = n1.AddBackend(b1) if err != nil { t.Fatal(err) } // Create a server TLS certificate for "localhost" key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Fatal(err) } template := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ CommonName: "localhost", }, DNSNames: []string{"localhost"}, NotBefore: time.Now().Add(-1 * time.Minute), NotAfter: time.Now().Add(24 * time.Hour), } certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key) if err != nil { t.Fatal(err) } keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}) certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) tlsCert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { t.Fatal(err) } // Create a websocket server mux := http.NewServeMux() mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { extra := r.Header.Get("X-Extra-Data") if extra != "SomeData" { t.Fatal("Extra header not passed through") } upgrader := websocket.Upgrader{} conn, err := upgrader.Upgrade(w, r, nil) if err != nil { t.Fatalf("Error upgrading websocket connection: %s", err) return } b1.NewConnection(netceptor.MessageConnFromWebsocketConn(conn), true) }) li, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { t.Fatalf("Error listening for TCP: %s", err) } server := &http.Server{ Addr: li.Addr().String(), Handler: mux, ReadHeaderTimeout: 5 * time.Second, TLSConfig: &tls.Config{ Certificates: []tls.Certificate{tlsCert}, MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, }, } go func() { err := server.ServeTLS(li, "", "") if err != nil { t.Fatalf("Error in web server: %s", err) } }() // Create a Netceptor websocket client that connects to our server n2 := netceptor.New(context.Background(), "node2") CAcerts := x509.NewCertPool() CAcerts.AppendCertsFromPEM(certPEM) tls2 := &tls.Config{ RootCAs: CAcerts, ServerName: "localhost", MinVersion: tls.VersionTLS12, CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, } b2, err := NewWebsocketDialer("wss://"+li.Addr().String(), tls2, "X-Extra-Data: SomeData", true, n1.Logger, nil) if err != nil { t.Fatal(err) } err = n2.AddBackend(b2) if err != nil { t.Fatal(err) } // Wait for the nodes to establish routing to each other timeout, _ := context.WithTimeout(context.Background(), 2*time.Second) for { if timeout.Err() != nil { t.Fatal(timeout.Err()) } _, ok := n1.Status().RoutingTable["node2"] if ok { _, ok := n2.Status().RoutingTable["node1"] if ok { break } } time.Sleep(100 * time.Millisecond) } // Send a packet between nodes pc1, err := n1.ListenPacket("test") if err != nil { t.Fatal(err) } pc2, err := n2.ListenPacket("test") if err != nil { t.Fatal(err) } _, err = pc1.WriteTo([]byte("hello"), n1.NewAddr("node2", "test")) if err != nil { t.Fatal(err) } buf := make([]byte, 16) n, _, err := pc2.ReadFrom(buf) if err != nil { t.Fatal(err) } if string(buf[:n]) != "hello" { t.Fatal("Wrong message received") } // Shut down the nodes n1.Shutdown() n2.Shutdown() n1.BackendWait() n2.BackendWait() } receptor-1.5.3/pkg/backends/websockets.go000066400000000000000000000334701475465740700204300ustar00rootroot00000000000000package backends import ( "context" "crypto/tls" "fmt" "net" "net/http" "net/url" "strings" "sync" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ghjm/cmdline" "github.com/gorilla/websocket" "github.com/spf13/viper" ) // WebsocketDialer implements Backend for outbound Websocket. type WebsocketDialer struct { address string origin string redial bool tlscfg *tls.Config extraHeader string logger *logger.ReceptorLogger dialer GorillaWebsocketDialerForDialer } type GorillaWebsocketDialerForDialer interface { DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (Conner, *http.Response, error) } // GorillaDialWrapper represents the real library. type GorillaDialWrapper struct { dialer *websocket.Dialer } func (g GorillaDialWrapper) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (Conner, *http.Response, error) { return g.dialer.DialContext(ctx, urlStr, requestHeader) } func (b *WebsocketDialer) GetAddr() string { return b.address } func (b *WebsocketDialer) GetTLS() *tls.Config { return b.tlscfg } // NewWebsocketDialer instantiates a new WebsocketDialer backend. func NewWebsocketDialer(address string, tlscfg *tls.Config, extraHeader string, redial bool, logger *logger.ReceptorLogger, dialer GorillaWebsocketDialerForDialer) (*WebsocketDialer, error) { addrURL, err := url.Parse(address) if err != nil { return nil, err } httpScheme := "http" if addrURL.Scheme == "wss" { httpScheme = "https" } wd := WebsocketDialer{ address: address, origin: fmt.Sprintf("%s://%s", httpScheme, addrURL.Host), redial: redial, tlscfg: tlscfg, extraHeader: extraHeader, logger: logger, } if dialer != nil { wd.dialer = dialer } else { d := &websocket.Dialer{ TLSClientConfig: tlscfg, Proxy: http.ProxyFromEnvironment, } wd.dialer = GorillaDialWrapper{dialer: d} } return &wd, nil } func (b *WebsocketDialer) Dialer(dialer GorillaWebsocketDialerForDialer) GorillaWebsocketDialerForDialer { return dialer } // Start runs the given session function over this backend service. func (b *WebsocketDialer) Start(ctx context.Context, wg *sync.WaitGroup) (chan netceptor.BackendSession, error) { return dialerSession(ctx, wg, b.redial, 5*time.Second, b.logger, func(closeChan chan struct{}) (netceptor.BackendSession, error) { header := make(http.Header) if b.extraHeader != "" { extraHeaderParts := strings.SplitN(b.extraHeader, ":", 2) header.Add(extraHeaderParts[0], extraHeaderParts[1]) } header.Add("origin", b.origin) conn, resp, err := b.dialer.DialContext(ctx, b.address, header) if err != nil { return nil, err } if resp.Body.Close(); err != nil { return nil, err } ns := newWebsocketSession(ctx, conn, closeChan) return ns, nil }) } type WebsocketListenerForWebsocket interface { Addr() net.Addr GetAddr() string GetTLS() *tls.Config Path() string SetPath(path string) Start(ctx context.Context, wg *sync.WaitGroup) (chan netceptor.BackendSession, error) } type GorillaWebsocketUpgraderForListener interface { Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (Conner, error) } // GorillaDialWrapper represents the real library. type GorillaUpgradeWrapper struct { upgrader *websocket.Upgrader } func (g GorillaUpgradeWrapper) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (Conner, error) { return g.upgrader.Upgrade(w, r, responseHeader) } type HTTPServerForListener interface { Serve(l net.Listener) error ServeTLS(l net.Listener, certFile string, keyFile string) error Close() error SetTLSConfig(tlscfg *tls.Config) SetHandeler(mux *http.ServeMux) } type HTTPServerWrapper struct { server *http.Server } func (s HTTPServerWrapper) Serve(l net.Listener) error { return s.server.Serve(l) } func (s HTTPServerWrapper) ServeTLS(l net.Listener, certFile string, keyFile string) error { return s.server.ServeTLS(l, certFile, keyFile) } func (s HTTPServerWrapper) Close() error { return s.server.Close() } func (s HTTPServerWrapper) SetTLSConfig(tlscfg *tls.Config) { s.server.TLSConfig = tlscfg } func (s HTTPServerWrapper) SetHandeler(mux *http.ServeMux) { s.server.Handler = mux } // WebsocketListener implements Backend for inbound Websocket. type WebsocketListener struct { address string path string tlscfg *tls.Config li net.Listener server HTTPServerForListener logger *logger.ReceptorLogger upgrader GorillaWebsocketUpgraderForListener } func (b *WebsocketListener) GetAddr() string { return b.Addr().String() } func (b *WebsocketListener) GetTLS() *tls.Config { return b.tlscfg } // NewWebsocketListener instantiates a new WebsocketListener backend. func NewWebsocketListener(address string, tlscfg *tls.Config, logger *logger.ReceptorLogger, upgrader GorillaWebsocketUpgraderForListener, server HTTPServerForListener) (*WebsocketListener, error) { ul := WebsocketListener{ address: address, path: "/", tlscfg: tlscfg, li: nil, logger: logger, } if upgrader != nil { ul.upgrader = upgrader } else { u := &websocket.Upgrader{} ul.upgrader = GorillaUpgradeWrapper{upgrader: u} } if server != nil { ul.server = server } else { ser := &http.Server{ Addr: address, ReadHeaderTimeout: 5 * time.Second, } ul.server = HTTPServerWrapper{server: ser} } return &ul, nil } // SetPath sets the URI path that the listener will be hosted on. // It is only effective if used prior to calling Start. func (b *WebsocketListener) SetPath(path string) { b.path = path } // Addr returns the network address the listener is listening on. func (b *WebsocketListener) Addr() net.Addr { if b.li == nil { return nil } return b.li.Addr() } // Path returns the URI path the websocket is configured on. func (b *WebsocketListener) Path() string { return b.path } // Start runs the given session function over the WebsocketListener backend. func (b *WebsocketListener) Start(ctx context.Context, wg *sync.WaitGroup) (chan netceptor.BackendSession, error) { var err error sessChan := make(chan netceptor.BackendSession) mux := http.NewServeMux() mux.HandleFunc(b.path, func(w http.ResponseWriter, r *http.Request) { conn, err := b.upgrader.Upgrade(w, r, nil) if err != nil { b.logger.Error("Error upgrading websocket connection: %s\n", err) return } ws := newWebsocketSession(ctx, conn, nil) sessChan <- ws }) b.li, err = net.Listen("tcp", b.address) if err != nil { return nil, err } wg.Add(1) b.server.SetHandeler(mux) go func() { defer wg.Done() var err error if b.tlscfg == nil { err = b.server.Serve(b.li) } else { b.server.SetTLSConfig(b.tlscfg) err = b.server.ServeTLS(b.li, "", "") } if err != nil && err != http.ErrServerClosed { b.logger.Error("HTTP server error: %s\n", err) } }() go func() { <-ctx.Done() _ = b.server.Close() }() b.logger.Debug("Listening on Websocket %s path %s\n", b.Addr().String(), b.Path()) return sessChan, nil } // WebsocketSession implements BackendSession for WebsocketDialer and WebsocketListener. type WebsocketSession struct { conn Conner context context.Context recvChan chan *recvResult closeChan chan struct{} closeChanCloser sync.Once } type recvResult struct { data []byte err error } type Conner interface { Close() error ReadMessage() (messageType int, p []byte, err error) WriteMessage(messageType int, data []byte) error } func newWebsocketSession(ctx context.Context, conn Conner, closeChan chan struct{}) *WebsocketSession { ws := &WebsocketSession{ conn: conn, context: ctx, recvChan: make(chan *recvResult), closeChan: closeChan, closeChanCloser: sync.Once{}, } go ws.recvChannelizer() return ws } // recvChannelizer receives messages and pushes them to a channel. func (ns *WebsocketSession) recvChannelizer() { for { _, data, err := ns.conn.ReadMessage() select { case <-ns.context.Done(): return case ns.recvChan <- &recvResult{ data: data, err: err, }: } if err != nil { return } } } // Send sends data over the session. func (ns *WebsocketSession) Send(data []byte) error { err := ns.conn.WriteMessage(websocket.BinaryMessage, data) if err != nil { return err } return nil } // Recv receives data via the session. func (ns *WebsocketSession) Recv(timeout time.Duration) ([]byte, error) { select { case rr := <-ns.recvChan: return rr.data, rr.err case <-time.After(timeout): return nil, netceptor.ErrTimeout } } // Close closes the session. func (ns *WebsocketSession) Close() error { if ns.closeChan != nil { ns.closeChanCloser.Do(func() { close(ns.closeChan) ns.closeChan = nil }) } return ns.conn.Close() } // ************************************************************************** // Command line // ************************************************************************** // TODO make fields private // WebsocketListenerCfg is the cmdline configuration object for a websocket listener. type WebsocketListenerCfg struct { BindAddr string `description:"Local address to bind to" default:"0.0.0.0"` Port int `description:"Local TCP port to run http server on" barevalue:"yes" required:"yes"` Path string `description:"URI path to the websocket server" default:"/"` TLS string `description:"Name of TLS server config"` Cost float64 `description:"Connection cost (weight)" default:"1.0"` NodeCost map[string]float64 `description:"Per-node costs"` AllowedPeers []string `description:"Peer node IDs to allow via this connection"` } func (cfg WebsocketListenerCfg) GetCost() float64 { return cfg.Cost } func (cfg WebsocketListenerCfg) GetNodeCost() map[string]float64 { return cfg.NodeCost } func (cfg WebsocketListenerCfg) GetAddr() string { return cfg.BindAddr } func (cfg WebsocketListenerCfg) GetTLS() string { return cfg.TLS } // Prepare verifies the parameters are correct. func (cfg WebsocketListenerCfg) Prepare() error { if cfg.Cost <= 0.0 { return fmt.Errorf("connection cost must be positive") } for node, cost := range cfg.NodeCost { if cost <= 0.0 { return fmt.Errorf("connection cost must be positive for %s", node) } } return nil } // Run runs the action. func (cfg WebsocketListenerCfg) Run() error { address := fmt.Sprintf("%s:%d", cfg.BindAddr, cfg.Port) tlscfg, err := netceptor.MainInstance.GetServerTLSConfig(cfg.TLS) if err != nil { return err } // websockets requires at least the following cipher at the top of the list if tlscfg != nil && len(tlscfg.CipherSuites) > 0 { tlscfg.CipherSuites = append([]uint16{tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256}, tlscfg.CipherSuites...) } b, err := NewWebsocketListener(address, tlscfg, netceptor.MainInstance.Logger, nil, nil) if err != nil { b.logger.Error("Error creating listener %s: %s\n", address, err) return err } b.SetPath(cfg.Path) err = netceptor.MainInstance.AddBackend(b, netceptor.BackendConnectionCost(cfg.Cost), netceptor.BackendNodeCost(cfg.NodeCost), netceptor.BackendAllowedPeers(cfg.AllowedPeers)) if err != nil { return err } return nil } // websocketDialerCfg is the cmdline configuration object for a Websocket listener. type WebsocketDialerCfg struct { Address string `description:"URL to connect to" barevalue:"yes" required:"yes"` Redial bool `description:"Keep redialing on lost connection" default:"true"` ExtraHeader string `description:"Sends extra HTTP header on initial connection"` TLS string `description:"Name of TLS client config"` Cost float64 `description:"Connection cost (weight)" default:"1.0"` AllowedPeers []string `description:"Peer node IDs to allow via this connection"` } // Prepare verifies that we are reasonably ready to go. func (cfg WebsocketDialerCfg) Prepare() error { if cfg.Cost <= 0.0 { return fmt.Errorf("connection cost must be positive") } if _, err := url.Parse(cfg.Address); err != nil { return fmt.Errorf("address %s is not a valid URL: %s", cfg.Address, err) } if cfg.ExtraHeader != "" && !strings.Contains(cfg.ExtraHeader, ":") { return fmt.Errorf("extra header must be in the form key:value") } return nil } // Run runs the action. func (cfg WebsocketDialerCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running Websocket peer connection %s\n", cfg.Address) u, err := url.Parse(cfg.Address) if err != nil { return err } tlsCfgName := cfg.TLS if u.Scheme == "wss" && tlsCfgName == "" { tlsCfgName = "default" } tlscfg, err := netceptor.MainInstance.GetClientTLSConfig(tlsCfgName, u.Hostname(), netceptor.ExpectedHostnameTypeDNS) if err != nil { return err } b, err := NewWebsocketDialer(cfg.Address, tlscfg, cfg.ExtraHeader, cfg.Redial, netceptor.MainInstance.Logger, nil) if err != nil { b.logger.Error("Error creating peer %s: %s\n", cfg.Address, err) return err } err = netceptor.MainInstance.AddBackend(b, netceptor.BackendConnectionCost(cfg.Cost), netceptor.BackendAllowedPeers(cfg.AllowedPeers)) if err != nil { return err } return nil } func (cfg WebsocketDialerCfg) PreReload() error { return cfg.Prepare() } func (cfg WebsocketListenerCfg) PreReload() error { return cfg.Prepare() } func (cfg WebsocketDialerCfg) Reload() error { return cfg.Run() } func (cfg WebsocketListenerCfg) Reload() error { return cfg.Run() } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-backends", "ws-listener", "Run an http server that accepts websocket connections", WebsocketListenerCfg{}, cmdline.Section(backendSection)) cmdline.RegisterConfigTypeForApp("receptor-backends", "ws-peer", "Connect outbound to a websocket peer", WebsocketDialerCfg{}, cmdline.Section(backendSection)) } receptor-1.5.3/pkg/backends/websockets_test.go000066400000000000000000000331251475465740700214640ustar00rootroot00000000000000package backends_test import ( "bytes" "context" "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "errors" "io" "math/big" "net/http" "strings" "sync" "testing" "time" "github.com/ansible/receptor/pkg/backends" "github.com/ansible/receptor/pkg/backends/mock_backends" "github.com/ansible/receptor/pkg/logger" "go.uber.org/mock/gomock" ) const ( expectedWebsocketListenerError = "Expected Websocket Listener, got nil" newWebsocketDialerError = "NewWebsockerDialer return error: %+v" websocketListenerExpectedError = "Websocket listener expected, nil returned" websocketsTestNewWebsocketDialer = "websockets_test.go>TestNewWebsocketDialer" wssTestTesting = "wss://test.testing" ) func setupTLSCfg(t *testing.T) tls.Certificate { // Create a server TLS certificate for "localhost" key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { t.Fatal(err) } template := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ CommonName: "localhost", }, DNSNames: []string{"localhost"}, NotBefore: time.Now().Add(-1 * time.Minute), NotAfter: time.Now().Add(24 * time.Hour), } certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key) if err != nil { t.Fatal(err) } keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}) certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) tlsCert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { t.Fatal(err) } return tlsCert } func TestNewWebsocketDialer(t *testing.T) { NewWebsocketDialerTestCases := []struct { name string address string redial bool tlscfg *tls.Config extraHeader string logger *logger.ReceptorLogger expectedErr string failedTestString string }{ { name: "NewWebsocketDialer wss Success ", address: wssTestTesting, redial: false, tlscfg: &tls.Config{ Certificates: []tls.Certificate{setupTLSCfg(t)}, MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, }, extraHeader: "", logger: logger.NewReceptorLogger(websocketsTestNewWebsocketDialer), expectedErr: "", failedTestString: "Expected no error, but got: %v", }, { name: "NewWebsocketDialer non-wss Success ", address: "test.testing", redial: false, tlscfg: &tls.Config{ Certificates: []tls.Certificate{setupTLSCfg(t)}, MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, }, extraHeader: "", logger: logger.NewReceptorLogger(websocketsTestNewWebsocketDialer), expectedErr: "", failedTestString: "Expected no error, but got: %v", }, } for _, testCase := range NewWebsocketDialerTestCases { t.Run(testCase.name, func(t *testing.T) { _, err := backends.NewWebsocketDialer(testCase.address, testCase.tlscfg, testCase.extraHeader, testCase.redial, testCase.logger, nil) if testCase.expectedErr == "" && err != nil { t.Errorf(testCase.failedTestString, err) } if testCase.expectedErr != "" && err != nil && err.Error() != testCase.expectedErr { t.Errorf(testCase.failedTestString, err) } if testCase.expectedErr != "" && err == nil { t.Errorf(testCase.failedTestString, err) } }) } } func setUpDialer(t *testing.T) (*gomock.Controller, *mock_backends.MockGorillaWebsocketDialerForDialer) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockWebsocketDialer := mock_backends.NewMockGorillaWebsocketDialerForDialer(ctrl) return ctrl, mockWebsocketDialer } func setUpDialerWithConner(t *testing.T) (*gomock.Controller, *mock_backends.MockGorillaWebsocketDialerForDialer, *mock_backends.MockConner) { ctrl, mockWebsocketDialer := setUpDialer(t) mockWebsocketConner := mock_backends.NewMockConner(ctrl) return ctrl, mockWebsocketDialer, mockWebsocketConner } func TestWebsocketDialerStart(t *testing.T) { _, mockWebsocketDialer, mockWebsocketConner := setUpDialerWithConner(t) ctx := context.Background() defer ctx.Done() wd, wdErr := backends.NewWebsocketDialer(wssTestTesting, &tls.Config{}, "", false, logger.NewReceptorLogger(websocketsTestNewWebsocketDialer), mockWebsocketDialer) if wdErr != nil { t.Errorf(newWebsocketDialerError, wdErr) } resp := &http.Response{ Body: io.NopCloser(bytes.NewBufferString("Hello World")), } websocketDialerTestCases := []struct { name string err error }{ { name: "Start ok", err: nil, }, { name: "Start Error", err: errors.New("Websocket Start error"), }, } for _, testCase := range websocketDialerTestCases { t.Run(testCase.name, func(t *testing.T) { mockWebsocketDialer.EXPECT().DialContext(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockWebsocketConner, resp, testCase.err) mockWebsocketConner.EXPECT().ReadMessage().Return(0, []byte{}, nil).AnyTimes() sess, err := wd.Start(ctx, &sync.WaitGroup{}) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } s := <-sess if testCase.err == nil && s == nil { t.Errorf("session should not be nil") } if testCase.err != nil && s != nil { t.Errorf("session should be nil") } }) } } func TestWebsocketDialerGetAddr(t *testing.T) { _, mockWebsocketDialer := setUpDialer(t) address := wssTestTesting wd, wdErr := backends.NewWebsocketDialer(address, &tls.Config{}, "", false, logger.NewReceptorLogger(websocketsTestNewWebsocketDialer), mockWebsocketDialer) if wdErr != nil { t.Errorf(newWebsocketDialerError, wdErr) } add := wd.GetAddr() if add != address { t.Errorf("Expected Dialer Address to be %s, got %s instead", address, add) } } func TestWebsocketDialerGetTLS(t *testing.T) { _, mockWebsocketDialer := setUpDialer(t) blankTLS := &tls.Config{} wd, wdErr := backends.NewWebsocketDialer(wssTestTesting, blankTLS, "", false, logger.NewReceptorLogger(websocketsTestNewWebsocketDialer), mockWebsocketDialer) if wdErr != nil { t.Errorf(newWebsocketDialerError, wdErr) } TLS := wd.GetTLS() if TLS != blankTLS { t.Errorf("Expected Dialer TLS to be %v, got %v instead", blankTLS, TLS) } } func setUpListener(t *testing.T) (*gomock.Controller, *mock_backends.MockGorillaWebsocketUpgraderForListener, *mock_backends.MockHTTPServerForListener) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockWebsocketUpgrader := mock_backends.NewMockGorillaWebsocketUpgraderForListener(ctrl) mockServer := mock_backends.NewMockHTTPServerForListener(ctrl) return ctrl, mockWebsocketUpgrader, mockServer } func TestNewWebsocketListener(t *testing.T) { _, mockWebsocketUpgrader, mockServer := setUpListener(t) wi, err := backends.NewWebsocketListener("address", &tls.Config{}, logger.NewReceptorLogger("test"), mockWebsocketUpgrader, mockServer) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if wi == nil { t.Error(websocketListenerExpectedError) } } func TestWebsocketListenerSetandGetPath(t *testing.T) { _, mockWebsocketUpgrader, mockServer := setUpListener(t) wi, err := backends.NewWebsocketListener("address", &tls.Config{}, logger.NewReceptorLogger("test"), mockWebsocketUpgrader, mockServer) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if wi == nil { t.Error(websocketListenerExpectedError) } pathName := "Test Path" wi.SetPath(pathName) p := wi.Path() if p != pathName { t.Errorf("Expected path to be %s got %s instead", pathName, p) } } func TestWebsocketListenerStart(t *testing.T) { ctrl, mockWebsocketUpgrader, mockServer := setUpListener(t) ctx := context.Background() defer ctx.Done() mockWebsocketConner := mock_backends.NewMockConner(ctrl) wi, err := backends.NewWebsocketListener("localhost:21700", &tls.Config{}, logger.NewReceptorLogger("test"), mockWebsocketUpgrader, mockServer) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if wi == nil { t.Error(websocketListenerExpectedError) } mockWebsocketUpgrader.EXPECT().Upgrade(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockWebsocketConner, nil).AnyTimes() mockServer.EXPECT().SetHandeler(gomock.Any()) mockServer.EXPECT().SetTLSConfig(gomock.Any()).AnyTimes() mockServer.EXPECT().ServeTLS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() bs, err := wi.Start(ctx, &sync.WaitGroup{}) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if bs == nil { t.Error(expectedWebsocketListenerError) } } func TestWebsocketListenerStartUpgradeError(t *testing.T) { _, mockWebsocketUpgrader, mockServer := setUpListener(t) ctx := context.Background() defer ctx.Done() wi, err := backends.NewWebsocketListener("localhost:21701", &tls.Config{}, logger.NewReceptorLogger("test"), mockWebsocketUpgrader, mockServer) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if wi == nil { t.Error(websocketListenerExpectedError) } returnError := errors.New("Upgrade Error") mockWebsocketUpgrader.EXPECT().Upgrade(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, returnError).AnyTimes() mockServer.EXPECT().SetHandeler(gomock.Any()) mockServer.EXPECT().SetTLSConfig(gomock.Any()).AnyTimes() mockServer.EXPECT().ServeTLS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() bs, err := wi.Start(ctx, &sync.WaitGroup{}) if err != nil { t.Errorf("Expected error %v, got %v instead", nil, err) } if bs == nil { t.Error(expectedWebsocketListenerError) } } func TestWebsocketListenerStartNetError(t *testing.T) { ctrl, mockWebsocketUpgrader, mockServer := setUpListener(t) ctx := context.Background() defer ctx.Done() mockWebsocketConner := mock_backends.NewMockConner(ctrl) badAddress := "127.0.0.1:80" wi, err := backends.NewWebsocketListener(badAddress, &tls.Config{}, logger.NewReceptorLogger("test"), mockWebsocketUpgrader, mockServer) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if wi == nil { t.Error(websocketListenerExpectedError) } mockWebsocketUpgrader.EXPECT().Upgrade(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockWebsocketConner, nil).AnyTimes() bs, err := wi.Start(ctx, &sync.WaitGroup{}) if !strings.Contains(err.Error(), "listen tcp 127.0.0.1:80: bind: permission denied") { t.Errorf(err.Error()) //nolint:govet,staticcheck } if bs != nil { t.Errorf("Expected Websocket Listener to be nil") } } func TestWebsocketListenerStartTLSNil(t *testing.T) { ctrl, mockWebsocketUpgrader, mockServer := setUpListener(t) ctx := context.Background() defer ctx.Done() mockWebsocketConner := mock_backends.NewMockConner(ctrl) wi, err := backends.NewWebsocketListener("localhost:21702", nil, logger.NewReceptorLogger("test"), mockWebsocketUpgrader, mockServer) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if wi == nil { t.Error(websocketListenerExpectedError) } mockWebsocketUpgrader.EXPECT().Upgrade(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockWebsocketConner, nil).AnyTimes() mockServer.EXPECT().SetHandeler(gomock.Any()) mockServer.EXPECT().Serve(gomock.Any()).Return(errors.New("Test Error")).AnyTimes() bs, err := wi.Start(ctx, &sync.WaitGroup{}) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if bs == nil { t.Errorf("Expected Websocket Listener not be nil") } } func TestWebsocketListenerGetAddr(t *testing.T) { ctrl, mockWebsocketUpgrader, mockServer := setUpListener(t) ctx := context.Background() defer ctx.Done() mockWebsocketConner := mock_backends.NewMockConner(ctrl) address := "127.0.0.1:21703" wi, err := backends.NewWebsocketListener(address, &tls.Config{}, logger.NewReceptorLogger("test"), mockWebsocketUpgrader, mockServer) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if wi == nil { t.Error(websocketListenerExpectedError) } mockWebsocketUpgrader.EXPECT().Upgrade(gomock.Any(), gomock.Any(), gomock.Any()).Return(mockWebsocketConner, nil).AnyTimes() mockServer.EXPECT().SetHandeler(gomock.Any()) mockServer.EXPECT().SetTLSConfig(gomock.Any()).AnyTimes() mockServer.EXPECT().ServeTLS(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() bs, err := wi.Start(ctx, &sync.WaitGroup{}) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if bs == nil { t.Error(expectedWebsocketListenerError) } add := wi.GetAddr() if add != address { t.Errorf("Expected Listener Address to be %s, got %s instead", address, add) } } func TestWebsocketListenerGetTLS(t *testing.T) { _, mockWebsocketUpgrader, mockServer := setUpListener(t) blankTLS := &tls.Config{} wi, err := backends.NewWebsocketListener("127.0.0.1:21704", blankTLS, logger.NewReceptorLogger("test"), mockWebsocketUpgrader, mockServer) if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } if wi == nil { t.Error(websocketListenerExpectedError) } TLS := wi.GetTLS() if TLS != blankTLS { t.Errorf("Expected Dialer TLS to be %v, got %v instead", blankTLS, TLS) } } func TestWebsocketListenerCfg(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() baseBindAddr := "127.0.0.1" basePort := 21710 basePath := "" baseTLS := "" baseCost := 1.0 wlc := backends.WebsocketListenerCfg{ BindAddr: baseBindAddr, Port: basePort, Path: basePath, TLS: baseTLS, Cost: baseCost, } c := wlc.GetCost() if c != baseCost { t.Errorf("Expected %v, got %v", baseCost, c) } a := wlc.GetAddr() if a != baseBindAddr { t.Errorf("Expected %s, got %s", baseBindAddr, a) } tls := wlc.GetTLS() if tls != baseTLS { t.Errorf("Expected %s, got %s", baseTLS, tls) } err := wlc.Prepare() if err != nil { t.Errorf(err.Error()) //nolint:govet,staticcheck } } receptor-1.5.3/pkg/certificates/000077500000000000000000000000001475465740700166145ustar00rootroot00000000000000receptor-1.5.3/pkg/certificates/ca.go000066400000000000000000000264641475465740700175420ustar00rootroot00000000000000//go:build !no_cert_auth // +build !no_cert_auth package certificates import ( "bytes" "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "fmt" "io" "io/fs" "math/big" "net" "os" "strings" "time" "github.com/ansible/receptor/pkg/utils" ) // Oser is the function calls interfaces for mocking os. type Oser interface { ReadFile(name string) ([]byte, error) WriteFile(name string, data []byte, perm fs.FileMode) error } // OsWrapper is the Wrapper structure for Oser. type OsWrapper struct{} // ReadFile for Oser defaults to os library call. func (ow *OsWrapper) ReadFile(name string) ([]byte, error) { return os.ReadFile(name) } // WriteFile for Oser defaults to os library call. func (ow *OsWrapper) WriteFile(name string, data []byte, perm fs.FileMode) error { return os.WriteFile(name, data, perm) } // Rsaer is the function calls interface for mocking rsa. type Rsaer interface { GenerateKey(random io.Reader, bits int) (*rsa.PrivateKey, error) } // RsaWrapper is the Wrapper structure for Rsaer. type RsaWrapper struct{} // GenerateKey for RsaWrapper defaults to rsa library call. func (rw *RsaWrapper) GenerateKey(random io.Reader, bits int) (*rsa.PrivateKey, error) { return rsa.GenerateKey(random, bits) } // CertNames lists the subjectAltNames that can be assigned to a certificate or request. type CertNames struct { DNSNames []string NodeIDs []string IPAddresses []net.IP } // CertOptions are the parameters used to initialize a new certificate or request. type CertOptions struct { CertNames CommonName string Bits int NotBefore time.Time NotAfter time.Time } // LoadFromPEMFile loads certificate data from a PEM file. func LoadFromPEMFile(filename string, osWrapper Oser) ([]interface{}, error) { content, err := Oser.ReadFile(osWrapper, filename) if err != nil { return nil, err } results := make([]interface{}, 0) var block *pem.Block for len(content) > 0 { block, content = pem.Decode(content) if block == nil { return nil, fmt.Errorf("failed to decode PEM block") } switch block.Type { case "CERTIFICATE": var cert *x509.Certificate cert, err = x509.ParseCertificate(block.Bytes) if err != nil { return nil, err } results = append(results, cert) case "CERTIFICATE REQUEST": var req *x509.CertificateRequest req, err = x509.ParseCertificateRequest(block.Bytes) if err != nil { return nil, err } results = append(results, req) case "RSA PRIVATE KEY": var key *rsa.PrivateKey key, err = x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { return nil, err } results = append(results, key) case "PRIVATE KEY": key, err := x509.ParsePKCS8PrivateKey(block.Bytes) if err != nil { return nil, err } results = append(results, key) case "PUBLIC KEY": key, err := x509.ParsePKIXPublicKey(block.Bytes) if err != nil { return nil, err } results = append(results, key) default: return nil, fmt.Errorf("unknown block type %s", block.Type) } } return results, nil } // SaveToPEMFile saves certificate data to a PEM file. func SaveToPEMFile(filename string, data []interface{}, osWrapper Oser) error { var err error var ok bool content := make([]string, 0) for _, elem := range data { var cert *x509.Certificate cert, ok = elem.(*x509.Certificate) if ok { certPEM := new(bytes.Buffer) err = pem.Encode(certPEM, &pem.Block{ Type: "CERTIFICATE", Bytes: cert.Raw, }) if err != nil { return err } content = append(content, certPEM.String()) continue } var req *x509.CertificateRequest req, ok = elem.(*x509.CertificateRequest) if ok { reqPEM := new(bytes.Buffer) err = pem.Encode(reqPEM, &pem.Block{ Type: "CERTIFICATE REQUEST", Bytes: req.Raw, }) if err != nil { return err } content = append(content, reqPEM.String()) continue } var keyPrivate *rsa.PrivateKey keyPrivate, ok = elem.(*rsa.PrivateKey) if ok { keyPEM := new(bytes.Buffer) err = pem.Encode(keyPEM, &pem.Block{ Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(keyPrivate), }) if err != nil { return err } content = append(content, keyPEM.String()) continue } var keyPublic *rsa.PublicKey keyPublic, ok = elem.(*rsa.PublicKey) if ok { keyPEM := new(bytes.Buffer) keyPublicBytes, err := x509.MarshalPKIXPublicKey(keyPublic) if err != nil { return err } err = pem.Encode(keyPEM, &pem.Block{ Type: "PUBLIC KEY", Bytes: keyPublicBytes, }) if err != nil { return err } content = append(content, keyPEM.String()) continue } return fmt.Errorf("unknown block type %s", elem) } return Oser.WriteFile(osWrapper, filename, []byte(strings.Join(content, "\n")), 0o600) } // LoadCertificate loads a single certificate from a file. func LoadCertificate(filename string, osWrapper Oser) (*x509.Certificate, error) { data, err := LoadFromPEMFile(filename, osWrapper) if err != nil { return nil, err } if len(data) != 1 { return nil, fmt.Errorf("certificate file should contain exactly one item") } cert, ok := data[0].(*x509.Certificate) if !ok { return nil, fmt.Errorf("certificate file does not contain certificate data") } return cert, nil } // LoadRequest loads a single certificate request from a file. func LoadRequest(filename string, osWrapper Oser) (*x509.CertificateRequest, error) { data, err := LoadFromPEMFile(filename, osWrapper) if err != nil { return nil, err } if len(data) != 1 { return nil, fmt.Errorf("certificate request file should contain exactly one item") } req, ok := data[0].(*x509.CertificateRequest) if !ok { return nil, fmt.Errorf("certificate request file does not contain certificate request data") } return req, nil } // LoadPrivateKey loads a single RSA private key from a file. func LoadPrivateKey(filename string, osWrapper Oser) (*rsa.PrivateKey, error) { data, err := LoadFromPEMFile(filename, osWrapper) if err != nil { return nil, err } if len(data) != 1 { return nil, fmt.Errorf("private key file should contain exactly one item") } key, ok := data[0].(*rsa.PrivateKey) if !ok { return nil, fmt.Errorf("private key file does not contain private key data") } return key, nil } // LoadPublicKey loads a single RSA public key from a file. func LoadPublicKey(filename string, osWrapper Oser) (*rsa.PublicKey, error) { data, err := LoadFromPEMFile(filename, osWrapper) if err != nil { return nil, err } if len(data) != 1 { return nil, fmt.Errorf("public key file should contain exactly one item") } key, ok := data[0].(*rsa.PublicKey) if !ok { return nil, fmt.Errorf("public key file does not contain public key data") } return key, nil } // CA contains internal data for a certificate authority. type CA struct { Certificate *x509.Certificate PrivateKey *rsa.PrivateKey } // CreateCA initializes a new CertKeyPair from given parameters. func CreateCA(opts *CertOptions, rsaWrapper Rsaer) (*CA, error) { if opts.CommonName == "" { return nil, fmt.Errorf("must provide CommonName") } if opts.Bits == 0 { opts.Bits = 2048 } if opts.NotBefore.IsZero() { opts.NotBefore = time.Now() } if opts.NotAfter.IsZero() { opts.NotAfter = time.Now().AddDate(10, 0, 0) } if opts.DNSNames != nil || opts.NodeIDs != nil || opts.IPAddresses != nil { return nil, fmt.Errorf("CertKeyPair certificate cannot have DNSNames, NodeIDs or IPAddresses") } var err error ca := &CA{} ca.PrivateKey, err = rsaWrapper.GenerateKey(rand.Reader, opts.Bits) if err != nil { return nil, err } caTemplate := &x509.Certificate{ SerialNumber: big.NewInt(time.Now().Unix()), Subject: pkix.Name{ CommonName: opts.CommonName, }, NotBefore: opts.NotBefore, NotAfter: opts.NotAfter, IsCA: true, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, } caBytes, err := x509.CreateCertificate(rand.Reader, caTemplate, caTemplate, &ca.PrivateKey.PublicKey, ca.PrivateKey) if err != nil { return nil, err } ca.Certificate, err = x509.ParseCertificate(caBytes) if err != nil { return nil, err } return ca, nil } // CreateCertReqWithKey creates a new x.509 certificate request with a newly generated private key. func CreateCertReqWithKey(opts *CertOptions) (*x509.CertificateRequest, *rsa.PrivateKey, error) { key, err := rsa.GenerateKey(rand.Reader, opts.Bits) if err != nil { return nil, nil, err } req, err := CreateCertReq(opts, key) if err != nil { return nil, nil, err } return req, key, nil } // CreateCertReq creates a new x.509 certificate request for an existing private key. func CreateCertReq(opts *CertOptions, privateKey *rsa.PrivateKey) (*x509.CertificateRequest, error) { if opts.CommonName == "" { return nil, fmt.Errorf("must provide CommonName") } if opts.Bits == 0 { opts.Bits = 2048 } var err error var san *pkix.Extension san, err = utils.MakeReceptorSAN(opts.DNSNames, opts.IPAddresses, opts.NodeIDs) if err != nil { return nil, err } reqTemplate := &x509.CertificateRequest{ Subject: pkix.Name{ CommonName: opts.CommonName, }, ExtraExtensions: []pkix.Extension{*san}, } var reqBytes []byte reqBytes, err = x509.CreateCertificateRequest(rand.Reader, reqTemplate, privateKey) if err != nil { return nil, err } var req *x509.CertificateRequest req, err = x509.ParseCertificateRequest(reqBytes) if err != nil { return nil, err } return req, nil } // GetReqNames returns the names coded into a certificate request, including Receptor node IDs. func GetReqNames(request *x509.CertificateRequest) (*CertNames, error) { nodeIDs, err := utils.ReceptorNames(request.Extensions) if err != nil { return nil, err } cn := &CertNames{ DNSNames: request.DNSNames, NodeIDs: nodeIDs, IPAddresses: request.IPAddresses, } return cn, nil } // SignCertReq signs a certificate request using a CA key. func SignCertReq(req *x509.CertificateRequest, ca *CA, opts *CertOptions) (*x509.Certificate, error) { if opts.NotBefore.IsZero() { opts.NotBefore = time.Now() } if opts.NotAfter.IsZero() { opts.NotAfter = time.Now().AddDate(1, 0, 0) } certTemplate := &x509.Certificate{ SerialNumber: big.NewInt(time.Now().Unix()), Signature: req.Signature, SignatureAlgorithm: req.SignatureAlgorithm, PublicKey: req.PublicKey, PublicKeyAlgorithm: req.PublicKeyAlgorithm, Issuer: ca.Certificate.Subject, Subject: req.Subject, NotBefore: opts.NotBefore, NotAfter: opts.NotAfter, IsCA: false, KeyUsage: x509.KeyUsageDigitalSignature, ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, } found := false for _, ext := range req.Extensions { if ext.Id.Equal(utils.OIDSubjectAltName) { certTemplate.ExtraExtensions = []pkix.Extension{ext} found = true break } } if !found { certTemplate.DNSNames = req.DNSNames certTemplate.IPAddresses = req.IPAddresses } certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, ca.Certificate, req.PublicKey, ca.PrivateKey) if err != nil { return nil, err } var cert *x509.Certificate cert, err = x509.ParseCertificate(certBytes) if err != nil { return nil, err } return cert, nil } receptor-1.5.3/pkg/certificates/ca_test.go000066400000000000000000002113331475465740700205700ustar00rootroot00000000000000//go:build !no_cert_auth // +build !no_cert_auth package certificates_test import ( "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "fmt" "io" "net" "reflect" "testing" "time" "github.com/ansible/receptor/pkg/certificates" "github.com/ansible/receptor/pkg/certificates/mock_certificates" "github.com/ansible/receptor/pkg/utils" "go.uber.org/mock/gomock" ) var excessPEMDataFormatString string = "Excess PEM Data: %v" var wrongPEMBlockTypeFormatString string = "PEM Block is not a %s: %v" func setupGoodCA(certOpts *certificates.CertOptions, rsaer certificates.Rsaer) (*certificates.CA, error) { certOpts.Bits = 4096 certOpts.CommonName = "Ansible Automation Controller Nodes Mesh Certificate Authority" certOpts.DNSNames = nil certOpts.NodeIDs = nil certOpts.IPAddresses = nil goodCaTimeAfterString := "2032-01-07T00:03:51Z" goodCaTimeAfter, err := time.Parse(time.RFC3339, goodCaTimeAfterString) if err != nil { return nil, err } certOpts.NotAfter = goodCaTimeAfter goodCaTimeBeforeString := "2022-01-07T00:03:51Z" goodCaTimeBefore, err := time.Parse(time.RFC3339, goodCaTimeBeforeString) if err != nil { return nil, err } certOpts.NotBefore = goodCaTimeBefore goodCA, err := certificates.CreateCA(certOpts, rsaer) if err != nil { return nil, err } return goodCA, nil } func setupGoodCaRsaPrivateKey() (*rsa.PrivateKey, error) { goodCaRsaPrivateKeyPEMData := setupGoodCaRsaPrivateKeyPEMData() goodCaRsaPrivateKeyBlock, rest := pem.Decode(goodCaRsaPrivateKeyPEMData) if len(rest) != 0 { return &rsa.PrivateKey{}, fmt.Errorf(excessPEMDataFormatString, rest) } if goodCaRsaPrivateKeyBlock.Type != "RSA PRIVATE KEY" { return &rsa.PrivateKey{}, fmt.Errorf(wrongPEMBlockTypeFormatString, "RSA Private Key", goodCaRsaPrivateKeyBlock.Type) } goodCaRsaPrivateKey, err := x509.ParsePKCS1PrivateKey(goodCaRsaPrivateKeyBlock.Bytes) if err != nil { return &rsa.PrivateKey{}, err } return goodCaRsaPrivateKey, nil } func setupGoodCaCertificate() (*x509.Certificate, error) { goodCaCertificatePEMData := setupGoodCaCertificatePEMData() goodCaCertificateBlock, rest := pem.Decode(goodCaCertificatePEMData) if len(rest) != 0 { return &x509.Certificate{}, fmt.Errorf(excessPEMDataFormatString, rest) } if goodCaCertificateBlock.Type != "CERTIFICATE" { return &x509.Certificate{}, fmt.Errorf(wrongPEMBlockTypeFormatString, "certificate", goodCaCertificateBlock.Type) } goodCaCertificate, err := x509.ParseCertificate(goodCaCertificateBlock.Bytes) if err != nil { return &x509.Certificate{}, err } return goodCaCertificate, nil } func setupGoodCaCertificatePEMData() []byte { return []byte(`-----BEGIN CERTIFICATE----- MIIFcTCCA1mgAwIBAgIEZmwvHzANBgkqhkiG9w0BAQsFADBJMUcwRQYDVQQDEz5B bnNpYmxlIEF1dG9tYXRpb24gQ29udHJvbGxlciBOb2RlcyBNZXNoIENlcnRpZmlj YXRlIEF1dGhvcml0eTAeFw0yMjAxMDcwMDAzNTFaFw0zMjAxMDcwMDAzNTFaMEkx RzBFBgNVBAMTPkFuc2libGUgQXV0b21hdGlvbiBDb250cm9sbGVyIE5vZGVzIE1l c2ggQ2VydGlmaWNhdGUgQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A MIICCgKCAgEA85o/C8zq1u1H+Cv+1o8tVk4scAQpwisifG0/fEXyoxliV5bmb5fo VXSxPAehBYHDu1w9ncT05AFHwe8gX4Iz8xCkjtiGDls9D+JDuZgxFif0RRh7bgwK VagmSIMUKaueqWJoIxtfq7n77UysKmby28V3GvIlF+H4nWzrH+RjuYLH9MeKptHK 4RW3FU0fLAPszD8erfgyl66Ka0vi6nX3ey0O15RPo/+rnBivdM9md6H6HXAGpbu6 n+Zs3AaMy4EyCSz0kPNY7A8qYCm4ZtmmQh7IHvtTQFUad/KWXYquSijKGkEJEpsP jDjPLkX4jJvwA8SVYv4wVD3Ou4eVWcXxMQfPucIb20cAgy/6JMWyWQSgvh6E+Wit 2c4PT6ivk5DwVLF9qTFvsZzVFDuFRs5sITxtFENh573DCbRM6wLkDMsLcGqDLQ3N KE4rD7yTq8Akg8ag28ktAtyCkss/8/AViVgo0EsDIpTU1+FKu6tu+jy9PQXJPgjm xae8g2KdozuO9gaAfTznze8olZCdbQSCMaSdrbV69N8dmJkh046d7OSad5+zILxW uo+K62p37wwEjrSD5dXynkHfEdQv7RZB5DvhswRVz2rMrJKyLYwj7ligUj7+ukMJ rwm+IRiGL1e/KRt1wbBOPFaDFqAr9oGBIRe/9f4RTB+d4h6TL3G9ukcCAwEAAaNh MF8wDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcD ATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRisPLfHjVNCLRBsbFPatzvlYGn rTANBgkqhkiG9w0BAQsFAAOCAgEANE65T3tLEXzq2w0dxEorqi14ctXg/coooG6A x5z1tEY6nQhj9/W4CIg+fQ0z82MXOIr29xDyRHmK3BX4FQ4mCYmJROL3sOkJtpwE dXJXt8MwsbtLF+8+rq98f0jnBR0AlnSzZMKNj7mfLWGnDr/DaCnjkmqzI4hA59m2 L1Jp8krAhfALkUaulQr/ceFRkmsW9sJ7HfhokxuBOXELJKB0w7IVo6oyOUwGICVd YMKKss638nPCYBEf/AlDgHlpBAbhsegllWQ5WKh6MYZNOecOaSD58CIyDmngUhPW djmbF9Gv85gZpQAcsGNPrh9VbxWMB52zdcOAD9YUHaaLYQtJ/v+tqRv61v381rtH 5Dm8LiKbkQOgmQS1YaWR707uyjHvDPoE0lO7lg66dW4zSspfVfqZ/DIHTqj+iA2N SMDcTfs6YLxrOEm8ZM7wF8LTSWTSoIYuMsTatMi/V7VwS3qlMY8wARmpduv1qz57 unWHC+aT6XA4CnrhZql0NVcfKoDZTDX8uMp1iPF1hdlPImBgpEpHjBMOxdiJ3KyY lsHyE+lNowsx+F2Ogy3sjivsGiyRHq5/gC37H9HSUDdqTTruX5219TPNQw58KAuE tUxX07xI1GjHRXMS60VKYLeVkFRoYGNAF8q6VOZQgY52412ujEbH5C7lYXXWROeR GbTkczs= -----END CERTIFICATE-----`) } func setupGoodCaRsaPrivateKeyPEMData() []byte { return []byte(`-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEA85o/C8zq1u1H+Cv+1o8tVk4scAQpwisifG0/fEXyoxliV5bm b5foVXSxPAehBYHDu1w9ncT05AFHwe8gX4Iz8xCkjtiGDls9D+JDuZgxFif0RRh7 bgwKVagmSIMUKaueqWJoIxtfq7n77UysKmby28V3GvIlF+H4nWzrH+RjuYLH9MeK ptHK4RW3FU0fLAPszD8erfgyl66Ka0vi6nX3ey0O15RPo/+rnBivdM9md6H6HXAG pbu6n+Zs3AaMy4EyCSz0kPNY7A8qYCm4ZtmmQh7IHvtTQFUad/KWXYquSijKGkEJ EpsPjDjPLkX4jJvwA8SVYv4wVD3Ou4eVWcXxMQfPucIb20cAgy/6JMWyWQSgvh6E +Wit2c4PT6ivk5DwVLF9qTFvsZzVFDuFRs5sITxtFENh573DCbRM6wLkDMsLcGqD LQ3NKE4rD7yTq8Akg8ag28ktAtyCkss/8/AViVgo0EsDIpTU1+FKu6tu+jy9PQXJ Pgjmxae8g2KdozuO9gaAfTznze8olZCdbQSCMaSdrbV69N8dmJkh046d7OSad5+z ILxWuo+K62p37wwEjrSD5dXynkHfEdQv7RZB5DvhswRVz2rMrJKyLYwj7ligUj7+ ukMJrwm+IRiGL1e/KRt1wbBOPFaDFqAr9oGBIRe/9f4RTB+d4h6TL3G9ukcCAwEA AQKCAgA/oMRi8q1MYkHwIp23sUIoTtbwk0XM/7NMSMTSUvrjexfixPl/oHABEi/Q /DUk2RYk9Bzr/qvweh0iHLGaAMpM7MXuwcOSMGaspW8HJSd9IAZ/nbtvrRlt8jO9 sO8tSzRwDdIhiV7d3gBdpS87DBTXLqbDlQf3SwDDLdXkSKV7qRtNZmli1V1mmWNu 3uwLdSL+3mXHDxM351EkYXftwc5YkBZWXNjIf1l1tkTt/9ZF3TsVQVObKoO+jFNW zSctaJuHJgTjb4yEbBmncH5H7wWsM5oV7ZN7ND9roSOBX13S5QCVBvO1tmbDO3ty HLilEX4JnelfKAG7n0FGXk22Wyt6HpIK2zcD5IDdmufk/dJM7wYkqnozmCs8k6xo /c5LijewuJwVHsd7PPUOfhRws+1tNpgZw++IEiFok8SkqkjV4LMsw3dFMvsAr1+u zu8or2YLbtOzzFxe4mMb4tGNO8KEdqXEsYf0Mbcoy26YO95iPiAwGLxqKELpxKjV 3jekB7d6CiLf3cgMYAK46dutWe3NZ92BSOhfDC1QsaobnqJq+8hWy62w50ZTBI8j 7P0S2O/lOGl3LmtrHz4u9OBJZSQbn4Y6Jyf2N68/KTN+KU7glIX/NO1dj6EXa+MP Kbo6zcZHCeG1I+U3WWwKDKBD3VBqRwJa/MA5z3jUMgKHFssW4QKCAQEA/wz5ghJ9 cSA6eGxINqs6phMYvV7fvksNOL7h9g+qEQYfvM45dIkxybdM2v2AzU438BYZFjvK v2M6kqmE8BSbgOR76djoQoiS6mqNZBWKRbM5pIOAtYugBo/jMkMuBlviQ6prafcp ZsrRRD3pf7ozYROtRVNCD40E8smhOpzvbk0DSVZUjHFD9A6zTMDr2s93ytL4Yhjy dhlpStHJpD0gV3ycvfDLf6696hL3XQSbbkb1h2hBt0uTCrjvb/8yg3QmTpXW478Z WZtbGi/8ZxN5A7wdboZKXMbLTtWv8o6zzKU1WoAl7NC3niYokkrWMPdzA7mMZjPb UdyvR9D2iDocOQKCAQEA9IJc/WYhg5y3NUYE6V3gWJXJQyLWx/wEYbwI7Xfm+sRT o5ZQ6uxFjakq9eMbZFs9YAA8ls04dMQWPWgM/M3TMTEDTGlp8EN7XL9erwXeQxka NFAaj05Oqj/hMIUh1jpDMWnLwAvbmo/8OdFVvPCnryZXxrv9hOy68MorAFlxFQTn zDIdbu5yK+2DcQvsfGl8evahArh64TB/mYY/FaCh/HtYWq2Yur0RsJe9jfV/X9h8 MFP3v/pwmb4qAKk7duAJEQLas2pVhdmMr5fTkv+AQfrK2/KFx1q9+BEam1fBSyM2 TRtPSiPUXG2/S31HeR8d3FK2iQLwfoNweI+obUuKfwKCAQEA+n5Vt2hNh4OtqYQ/ 0GGPsnhi/epmOVEg5TCkYLS1xQ6MsTShvVDZc1Hxy8raa8l8qvIpJWsID7x3VH/l rp2utIKzKNL+GgpksJKB12BIc3g5DtiQ2r0NKCS9cSoF0/z+VpLT+Djlkq1zpmBg KjBIMfnPXZ0N7pQD3iglj5l+lohQyABf/adopBSkCKgZfMfy0WMuy7nQuCjw/qjY L9RRJVuf3fTXCMoof/Ksu9DAjyhmEN1WXZ1+BzPnBcpQZEb5MIl9iiSv0xAAA9JJ RTscyYxCI8EE2+Xq27yHl+SLtlwz8HSXyuSa0lqL0lJp16HQdkIQIHih2N6z0Abt epIIEQKCAQBugwFukpuxNcLktDoCFCkTQNgPnQ6AoxqD5g4O7rOaQeQfuw/lrlad eYluS3CVAlwSKOk5q5XJyNrYdE/yb0D2KdX40mhMbOOz/tbXuopDv3PFkzL99IG/ l1G7sWCiyACYs92gdnuSN2Pj6gabeBYCJw1jSJKEYs73iBi5drOSsX3nH/uHqQQ3 cImJLb98V90oYpJRfXokj9wMUs3ug4TTbhh4G6A9PrlhHKRTJlOkyV8QJNOElLgR 9Q5c8CBtUUMnyid5EiWHctWQg8nf1dVtfOH1WX83pH7aQTjKX3aA9HBVl3NV6i3Q fNYKU5xbVUIU09mmwYpbVCMgiqWwiIUXAoIBABiL8tTtycbGQA66dSnHFaAkiUN8 v9J5xWm8Z2tkiQsSuTxE6+WRgXGfVZ+vvSjo5EsiH8u3ojjfdvMhczmsIGINImiN bthJgcouf2P1Ml9dTVYuSFXjGYNVFXsJOHTbF/j4HpPt5Za6s6KH+zgM7JlBadZF 7IbNFHszGCW2O5bNI9gu3qW/yRbuSkvRc5NwtKnW/y+UUqdektGBGlnjGkM8EXtc eLgrywVN2tooH1P2K7ryVYuboIlYMRGGBZ4bLDnNXurcVyBmxaqri+votdQhuJGb 0I8I18vUuJoB325MOJXQIwXyedwkxt8XFUNGhXvZ9vIxh+vH8RWDwpG6s4k= -----END RSA PRIVATE KEY-----`) } func setupGoodCertificate() (*x509.Certificate, error) { goodCertificatePEMData := setupGoodCertificatePEMData() goodCertificateBlock, rest := pem.Decode(goodCertificatePEMData) if len(rest) != 0 { return &x509.Certificate{}, fmt.Errorf(excessPEMDataFormatString, rest) } if goodCertificateBlock.Type != "CERTIFICATE" { return &x509.Certificate{}, fmt.Errorf(wrongPEMBlockTypeFormatString, "certificate", goodCertificateBlock.Type) } goodCertificate, err := x509.ParseCertificate(goodCertificateBlock.Bytes) if err != nil { return &x509.Certificate{}, err } return goodCertificate, nil } func setupGoodCertificatePEMData() []byte { return []byte(`-----BEGIN CERTIFICATE----- MIIFijCCA3KgAwIBAgIEZmww8zANBgkqhkiG9w0BAQsFADBJMUcwRQYDVQQDEz5B bnNpYmxlIEF1dG9tYXRpb24gQ29udHJvbGxlciBOb2RlcyBNZXNoIENlcnRpZmlj YXRlIEF1dGhvcml0eTAeFw0yMjAxMDcwMDAzNTFaFw0zMjAxMDcwMDAzNTFaMDMx MTAvBgNVBAMMKEFuc2libGUgQXV0b21hdGlvbiBQbGF0Zm9ybSBoeWJyaWRfbm9k ZTEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9aBV0TmDicwokCFx7 CL5oTsSxPU5CEffc3D+xPJ1Ymi0aWyRkNMKGJPRBvYz+PX/d1Sqd4XZ6pW/FESFr zt5filtgY/zSxGlHobIw0JPHr7atLcw8KbuB5vHpe4GewF/87SZjOlKPyG2dMJTG s0msYa6cNzbAIjPFFYtc5BzmlDVTqQ+0TtlcpM/u5RiIc6hQvYgNvRIlinw+pA9R tIH0RLUo8E/jlUIlFNhW1J73zPWUxn7RppeK6hJjQmkByS4cd8m/v/of42XcXQHf xg+tvuxw7TR+WP3fYvKoh4NpCdLc1IbusMJ0zf8l2r31jUkgm895j/Z57GlQFYxV WqPM2QJjcZMg2PvsFpVPrMaTj+cGXrsWufR39SNfXbVznmSdJ5pcCPdhXQ6P1ynb goX5BcniBunPTq70OEgN2999GycUAc3i9X1+Q4GX9PR2o8spqtI2OC3Aaxy7K+VJ Hhw0ikfPX2Ek68YFeyTXXr+7Sh7M/zg42pIJsIXzsT/L65N5DcsHJqaCA72deRHg ROzQIclg+HC5mrCFg7QS16+7IB4U5of+OYnxJwupNwaCfPtQq0UqE/SSvR/xJZcz pB3qO6Kz9euC+0t41+mY4IMyCA9muGSrteBh04d+ra4h1DpE7Alrs4aPDXracnp2 Nb35AuRKw8kOtB/HEaamZGVZ0wIDAQABo4GPMIGMMA4GA1UdDwEB/wQEAwIHgDAd BgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwHwYDVR0jBBgwFoAUYrDy3x41 TQi0QbGxT2rc75WBp60wOgYDVR0RBDMwMYIMaHlicmlkX25vZGUxhwTAqAQBoBsG CSsGAQQBkggTAaAODAxoeWJyaWRfbm9kZTEwDQYJKoZIhvcNAQELBQADggIBALYs YoxuimM9E2I2x3FyUNEqtQ7lPEuANSJJfPWAIk5ApccLYWGfvIzQjCqJ76JBWug2 ACrFy2xqa8JuMSFswATBI6SsGE0/q2QH/Zw3seiciwOtsAgAr2I8MRTtzuSS0fd9 NjLRUSSc338cbeUKooWBCqMGJ1O2EzaTP114V4NNb2naO1xmmFqEjhd3yIPKpc+G A1RJm3bqyrvwpSDbhRSGLRe65WPD4Q8roCa7E0TCsZalhhmYdFOOVjPEsSrsgpj/ uf8dkSk+mWv3/Xc8TIBTyxtzhZsYATHpFcKm3Xc2TZkxnsAehOBVGwsQE0iv7ejz vglty4bLda0Fv5kTqBHwFNwjEIEwBTvrOITjImofIz+NxSQ9NdRydBrdQFc2rwpX T58l96LeiqfpI9H2neIWmEjV41hdL8u1jZVDrq/6nnnYy6itR/WPBmUGxsCIPDNb XvgSwTEn/WkRO5eq0lVp8zzRSB2MZuhDagqV0Whifi2Eh0xrapC3IaFv02Uba/fh vl5i2UFRbZhWQIOTGkbUmk1ePFycKctAeBFneZE8RtT70hDcxgNxq2m0wVPIGDBL afbTv3SDMV2eDorOKPfDI30/C9692GlkS/k7Y8sOvQ4Fgs/T0cZ91uNzqs5GNvkI 59ynl0jGocRDVl5tBNlgBC8VdisrMhPEmeXGIfSN -----END CERTIFICATE-----`) } func setupGoodCertificateRequest() (*x509.CertificateRequest, error) { goodCertificateRequestPEMData := setupGoodCertificateRequestPEMData() goodCertificateRequestBlock, rest := pem.Decode(goodCertificateRequestPEMData) if len(rest) != 0 { return &x509.CertificateRequest{}, fmt.Errorf(excessPEMDataFormatString, rest) } if goodCertificateRequestBlock.Type != "CERTIFICATE REQUEST" { return &x509.CertificateRequest{}, fmt.Errorf(wrongPEMBlockTypeFormatString, "certificate request", goodCertificateRequestBlock.Type) } goodCertificateRequest, err := x509.ParseCertificateRequest(goodCertificateRequestBlock.Bytes) if err != nil { return &x509.CertificateRequest{}, err } goodCertificateRequestPrivateKeyPEMData := setupGoodCertificateRequestRSAPrivateKeyPEMData() goodCertificateRequestPrivateKeyBlock, rest := pem.Decode(goodCertificateRequestPrivateKeyPEMData) if len(rest) != 0 { return &x509.CertificateRequest{}, fmt.Errorf(excessPEMDataFormatString, rest) } if goodCertificateRequestPrivateKeyBlock.Type != "RSA PRIVATE KEY" { return &x509.CertificateRequest{}, fmt.Errorf(wrongPEMBlockTypeFormatString, "rsa private key", goodCertificateRequestPrivateKeyBlock.Type) } return goodCertificateRequest, nil } func setupGoodCertificateRequestRSAPrivateKeyPEMData() []byte { return []byte(`-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAvWgVdE5g4nMKJAhcewi+aE7EsT1OQhH33Nw/sTydWJotGlsk ZDTChiT0Qb2M/j1/3dUqneF2eqVvxREha87eX4pbYGP80sRpR6GyMNCTx6+2rS3M PCm7gebx6XuBnsBf/O0mYzpSj8htnTCUxrNJrGGunDc2wCIzxRWLXOQc5pQ1U6kP tE7ZXKTP7uUYiHOoUL2IDb0SJYp8PqQPUbSB9ES1KPBP45VCJRTYVtSe98z1lMZ+ 0aaXiuoSY0JpAckuHHfJv7/6H+Nl3F0B38YPrb7scO00flj932LyqIeDaQnS3NSG 7rDCdM3/Jdq99Y1JIJvPeY/2eexpUBWMVVqjzNkCY3GTINj77BaVT6zGk4/nBl67 Frn0d/UjX121c55knSeaXAj3YV0Oj9cp24KF+QXJ4gbpz06u9DhIDdvffRsnFAHN 4vV9fkOBl/T0dqPLKarSNjgtwGscuyvlSR4cNIpHz19hJOvGBXsk116/u0oezP84 ONqSCbCF87E/y+uTeQ3LByamggO9nXkR4ETs0CHJYPhwuZqwhYO0EtevuyAeFOaH /jmJ8ScLqTcGgnz7UKtFKhP0kr0f8SWXM6Qd6juis/XrgvtLeNfpmOCDMggPZrhk q7XgYdOHfq2uIdQ6ROwJa7OGjw162nJ6djW9+QLkSsPJDrQfxxGmpmRlWdMCAwEA AQKCAgAYpMU918DVVeB45bmarH2RtsXyxaY8BHh6fRGwbtGOKKk7p6Ypf4/bwk0h jJcesUPEyip8VkyDEqljO6Jck5LtXWQcO4Y2qiWEk25K2aiQr7UV/UJ4hXmHIlRR a6dmz2Lt1rFK8diyKoLQk/SXkdZBHoiIfMqb3xdbSj1Svb9g5OG3W+TWMfgoDK2d SKOThXDd6HseRn5o8gbOY6w2lkTeUl470GCknNP5XSf0zD9OwJUnYVg40PMguOnh jk95UwKz44YVc9YIdh7XuCY5Rlzd31yGKObOlaeafBZUhSU88nFEGEbiIY97gR30 f+x9WX+tRXiOOYP5QyBEsjkLFfr/v+MNVewJXmZnYNt7XUpE7woGznqFFSCgu1lB I8piwmFuJuAeW5ykN0mHJKaXohqh8fM0HvwQTpMPbZb5Pxy7+0JXAEdlBOMbc0zr vjBeZuu3Q6uV3E2F65DDP5Mgjl3Jjv/Ln/kbR/i7PrvbbXvw52jeuflmIuqr1Vty OcLSW/kJZMCHlWV4ydg91szgBoFLdP4pjFKK8jxfWrcrnpKFK5olM5zW/SlgDaPl KQJpK0jgLKN9FSt4fjjM7OpWVL8evaQpt9IPD6CVfX8OYofju/4U95UB2VgZ6WED NNfYfXTc4SykJ3yCIWBJvRDCVaKLyuxvhhrqYFnCPYsA5axDwQKCAQEA3cEASfKb fHOKqo+x0NotcXE20A9QiDQNziXYSqIWSYVVnKeRcXKeQcrsbZ0WeiLIU2PHNfqA gPZj4vYbDh+ir3qKL0F4KT0NM2OtkT088Mwt0biM4DAhDnEFVggroexwNqdzkQ5V 0D80xWVjyabrIMZZ+CDrGTxbcsdNbflNc+mFWudSdYXJTdkjyJmZQ+DP6/4ZLzWD xA8elyqAelEqpnZ1C4OMK/hlczhYjRe82jlVHU7LYG9WBUcBdDFoEZN30fqWwAev VuDk0i2iFnU8SSQMIkd92Xd7Vcm4/S8CWGO8UOWBtWOs5Yi2GWx3h4No32u8991t HwAh7Aw6IaNAcwKCAQEA2qg8qsDQ8cfDpaOOpQYPrPJ3yXmPYl3LsgYhHhP6gVRZ snMZf57yXRxbNA7gsUE7yaGyZJvd/aAgJoiIrCADAsvnYiIZbdPuVW1abeuBp2dl TgLy3yO2zoZWkIfwIvrmYNsIj6UfWtAu5+l6U8HjEKTaGU+P6jNnFmg6U5N3sEYl midGoeekxCb0RODpBd7wQnmS08QxpFVl3tyLE7h1c5Svrz5e1rT3LocvQHowrKI8 PLPCQ/DoPqN+AdHHKBkogUmAy39n3yaLUENeOnrQEkRH89/Up3ti60t9Zm4XIOPS yjrH+UC1AYjpEsHvt7gp1qrEin0azfOlDDog0xIJIQKCAQBXGg4Q/bhRI9kdsZ94 l5ot6vhKtRk+xqN9i6PvfMcgTb0Y8UDRvOGomhpP29oOU0c4agK/kwuSnDUT2vM5 kveF0a4hbafTGuXqf4aXe+RJ+QkuTT0Yeg8h6wbbig4JZiOVdCSSn3zZdVo3BuIx PN0yXHbOXa+6k9iftuUVA02G7/yvfhkHjGtLprTFgH+RB+bjnUUrI+wDwKSVX95j LSqBhBnpUdxnEQRYQ5OUp/RICykBgAemc1ToccGCcD2eWbxAwBsihmI6z89RNxdp ltb9K6mQdWBbYPES+08sM6DKMzKu29cc9m/dINvWFKLGmblBuMHa4iCQgq7QVuUm fUc9AoIBAAK+FXS20mgV71hGguWC+/NZXzNBuRCsyH9FIGk2KvAwVKeuVeWJ4tRU JLi0bxEKFmvjAU/NvK0PsxeIot410JPaZt6OOu9rtxOPOZEjYQ7udxDRbKZr1kEk 4jUR32sw5m7UkHjsw0XqUoxKG2Sti+GLj8i9fLqdRm+7gu/Fa7zkRFh4oaQqOaDU NEAg6gtOzoim6baKQW0kPRDch0oKCD8Y4FM8XwgmYOtOesM+RRoFNug0AP3FKcSP XGc9cFgKaL2mZF0pDe54l0q8bZQiMjXocVaXuh0XqRf2bG46d0PMV7p9nkB8FUkd pH69u/n771pn7esmwo1OvwU2PW8oTMECggEBANVncO8a3+z80LEhF5cp59nKp/bT sdwMzsBoCdPxwPAVF0oxtXWmhkHKypgnGmb7E2wT/cN9QhIrPlU08QVfGHb1eMaF 8E6pH4i+TxpgmwjwMkQxehMm7X9Jp0gSVBmOMNYrEgxHJLiyZb8DUiAcOt/0Jn1E lAlvptr8845GB25Pxjh8fFjaxLvhu9wfN8hVDm5HBUS85DbqWM2lmJv6YrfjoSP3 FY6jNzXZS8r2LVv399BSRG1BRG4OptajTMXAf33P4/b2KJfZ+UVq6UnxjEM5JUO8 +/DkhSrWkbU2peD/gkIhH+rgGN9aFA39XEcO3pA5L7qHBURMu2lO0NsmG24= -----END RSA PRIVATE KEY-----`) } func setupGoodCertificateRequestPEMData() []byte { return []byte(`-----BEGIN CERTIFICATE REQUEST----- MIIExTCCAq0CAQAwMzExMC8GA1UEAwwoQW5zaWJsZSBBdXRvbWF0aW9uIFBsYXRm b3JtIGh5YnJpZF9ub2RlMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB AL1oFXROYOJzCiQIXHsIvmhOxLE9TkIR99zcP7E8nViaLRpbJGQ0woYk9EG9jP49 f93VKp3hdnqlb8URIWvO3l+KW2Bj/NLEaUehsjDQk8evtq0tzDwpu4Hm8el7gZ7A X/ztJmM6Uo/IbZ0wlMazSaxhrpw3NsAiM8UVi1zkHOaUNVOpD7RO2Vykz+7lGIhz qFC9iA29EiWKfD6kD1G0gfREtSjwT+OVQiUU2FbUnvfM9ZTGftGml4rqEmNCaQHJ Lhx3yb+/+h/jZdxdAd/GD62+7HDtNH5Y/d9i8qiHg2kJ0tzUhu6wwnTN/yXavfWN SSCbz3mP9nnsaVAVjFVao8zZAmNxkyDY++wWlU+sxpOP5wZeuxa59Hf1I19dtXOe ZJ0nmlwI92FdDo/XKduChfkFyeIG6c9OrvQ4SA3b330bJxQBzeL1fX5DgZf09Haj yymq0jY4LcBrHLsr5UkeHDSKR89fYSTrxgV7JNdev7tKHsz/ODjakgmwhfOxP8vr k3kNywcmpoIDvZ15EeBE7NAhyWD4cLmasIWDtBLXr7sgHhTmh/45ifEnC6k3BoJ8 +1CrRSoT9JK9H/EllzOkHeo7orP164L7S3jX6ZjggzIID2a4ZKu14GHTh36triHU OkTsCWuzho8NetpyenY1vfkC5ErDyQ60H8cRpqZkZVnTAgMBAAGgTTBLBgkqhkiG 9w0BCQ4xPjA8MDoGA1UdEQQzMDGCDGh5YnJpZF9ub2RlMYcEwKgEAaAbBgkrBgEE AZIIEwGgDgwMaHlicmlkX25vZGUxMA0GCSqGSIb3DQEBCwUAA4ICAQAhxpmJItCN qn/FRV8p0PpkgtgXb4BKgmKCIKl8sLV+4oaxVVPBV4fb8qD6cm2/BhlSTGiAIy0i HYoL7yZafyICl+9fVF3hqaN7LwAVs2K/ijD4XSW/24UG56YW55CmIKj/PqPL/dRy 1mrfoDnUKS28zEjBlv1n/87RwULjjGwbUHc3PQxZGecfhfVzLsM5/eSn1Mc38Xdo QEsq7ByJLKZC6sgLwk9hzvEq9d4dWLvpQKXgvQfsTPIN0aezi/gA/Ga41FxhPqwf D1dHKH6Btt74AlZIRNVocY7MmMgWWRD+y4Rhf2LKs0dd5Id21JGGdrZ3GUMnn0i/ k1lhXhT/V49vEGcpQVp1ltzq16A/i3HZD8cclf7vO7i3Vt5KMmYlq2ulvJv7ufho kEoxUm4vffX2XMbBXBWgXrA8zaz06X9mwqglCwMw/2dGvKnXUmdK8Q63EZRlM92d FD2xRVgnG0qdElOI5Df162X+TikKNy6ud0ATRY/PcNqRcPfJBxviRdpkpOcgAv2e YiV449ZkiDxWyQ3A/x88YZ4e34Ca9fs20e7iflz0ezUcc8so9I5K0jbm8Cdmyj7R y6WbCHkbpqxRV/05sHzG0apj07qTNyv8RJSXqw8FSbd+4g4c7u9xaUaLr2OSg8hn IemSZj8QaR2JNPwXEbBEh8uPDhNvPBQFrw== -----END CERTIFICATE REQUEST-----`) } func setupGoodCertificateRequestRsaPrivateKey() (*rsa.PrivateKey, error) { goodCertificateRequestRsaPrivateKeyPEMData := setupGoodCertificateRequestRsaPrivateKeyPEMData() goodCertificateRequestRsaPrivateKeyBlock, rest := pem.Decode(goodCertificateRequestRsaPrivateKeyPEMData) if len(rest) != 0 { return &rsa.PrivateKey{}, fmt.Errorf(excessPEMDataFormatString, rest) } if goodCertificateRequestRsaPrivateKeyBlock.Type != "RSA PRIVATE KEY" { return &rsa.PrivateKey{}, fmt.Errorf(wrongPEMBlockTypeFormatString, "RSA Private Key", goodCertificateRequestRsaPrivateKeyBlock.Type) } goodCertificateRequestRsaPrivateKey, err := x509.ParsePKCS1PrivateKey(goodCertificateRequestRsaPrivateKeyBlock.Bytes) if err != nil { return &rsa.PrivateKey{}, err } return goodCertificateRequestRsaPrivateKey, nil } func setupGoodCertificateRequestRsaPrivateKeyPEMData() []byte { return []byte(`-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAp17EU0yKFdjqoec7zSc0AWDmT6cZpys8C74HqKeOArJPvswE b4OVyKZj20hFNj2N6TRry0x+pw+eP2XziEc0jdIqb33K6SbZKyezmKNYF+0TlzN9 Md249inCf3DIDImTEC6j3oCobTByxs9E1/tDHyeY6k5aMFY0gMlISuqTLX9iEqR6 jgOrr5i4VIZK7lK1JBzJ28FjE86zvEAzGnS71foYlmTWRWn+l7d5TQUWPsq17khu 2TnP+lLFg2+DVQCy9ZidCI30noiufEn/FR1GODBI8vFVtpXwEVP5nDZMa1GNQwTa ec3BzIcKC5CyHfdD8hcs1zAwr2cR6xhMLWdt1AGGP8AL8NV1puVyQYi82i9dnUrb h3mYLQFDrnEB7xDoJbz4pVOryn+TxXcaqChDsF7YC1E5cOKLZtm1seadiz5cZDwK WwL+1GsYk23KbiDIfFk00pPxFIKchFI6YYjdLqp6dnx/TJsp/IYEgfyn+hYSGRZd 1TDTesfFU5Ki5M1RvFHePIBR362lUF72i3Awwi8U3nWuk4erk8Nswonwc121sWSo 5Yp8fDBDP5CANcHv8JcLGMKUDYZGzqK0d3iehMXZdQK/Jd4x6dvd4Qr8VDbsxuWf aDwzEOjEpvMcawTdqWGTS9wwlmidJ47jY2HjUe5e7PvYm1+UQ/rgEoguoTsCAwEA AQKCAgApCj3Nxyjc7pGqHY82YPSJmf8fbPQHX7ybjH9IRb22v456VICJ75Qc3WAC 9xexkypnEqmT8i/kOxospY0vz3X9iJlLOWc2AIaj5FpPhU4mn8V7/+4k+h9OjTLa GQeUu29KOoWIG7gw/f5G7bAN3di5nPYMDiZjT+AT7EdDx31LXL7pn1dF13ST3Djm 0P8yrSkpr713m1X2F2tPL9bYF+OvNmItDpDT+IerIBwoXKT1xLMTuMMllN2Anic8 cW2cvE0ll8R5woVHEnDmnSZlQQZk5MIegDrqSJ3TQeok+dOHRToEQv5ne6KXyk0W RObIHkeU50XhhjmJ6RYltZGIWKI/QohWBECINhjmBxqGKBz5ultIOmeLPd5IlC+Y ow+zQk8WuYaUIX2PAzhFnhRfxUsv2Zoljt2J4YC3oKsB9cynrhonozvwEJy9MJJF a48+meJ6Wkm6LtcREPgbjFtfhrPKQlD+/kfHR6mxhjR977lgZAvrGhlBTZPKx/MF r0ZOP34+Cw2ZDrHO1L7GQVEjY0JM2B6lCEYtI8Mxy04gqa+kRIjL+04WhjT1w2Lk 71tOBNNB2AqxK+aptqxLG2By4mlW7WliGZI0j/6caXkg02olL/WqeBWTKSoUXLd6 LD523A02VHQgBDhTdIjezKI1FpAVKCXdHuwgqSWPQiQx6FkdAQKCAQEA1YinOp0U 1/9nq5f9Oet5uOLLGNG5lpzvCY9tPk9gWjTlAes5aQ8Pftg+P6dGgAsVqGxT2NvS uNSqYIBdm7Uy7jUG9m6PjQeQ7+oQ1vJqbryqr4QDwnAtHdWFfXak17YZs9YuhesP l5h4Oxi43Q2tZalMUY/rAmn+URqI5jlSWYiH6D9p2j9mEzvFrPQLvsbDb6zbxlAv 8oaqOiOrQa+q3T+loeRX0ErN9qf84Vw7tc7Qp5a4siWyWIHKGHHVveB+ITcHJ2+7 KJf7saRAjcRyHxX3tsPyRVSfg37nIMoPHilnN8bbhgBs0eMq1zcQgEYVceWx4pcZ GonabS85TBsqwQKCAQEAyKfZoot+oOOfWXMVBD761o4msd3fxRJlyS9RsPzRx7VO rQNTw9fCmurcFnF444fCfnEVJ/bCh/rWETyt1wVQhuy+th16hq4NEwGOD87WBXCn b3K8ZNbFDB9WL30q7bLe9UBw4j1ciHGKqpkjEACBrrdBF3HxVjBCQiHUKci3KK7E j6rtmR97UJj3XtTU0XiFm2FNKRa+aw0OQ3rr5Bw9ZURd9aXoDCXUMoXgfFnUxLWd y8Mdh5/PWmf8/o7/WqWpwejRJqfcGR1576QJXZjbduXG5zviDjwe5VKjgH5XRe8x ytCa5Z6APGWA4hhuZYfERcCsirEPO4ruew+iE0c2+wKCAQAA7o28Rb83ihfLuegS /qITWnoEa7XhoGGyqvuREAudmSl+rqYbfUNWDF+JK5O1L1cy2vYqthrfT55GuYiv C0VjoLudC7J4rRXG1kCoj3pDbXNZPLw/dvnbbXkdqQzjHBpUnJSrZPE2eiXcLCly XYLqNKjumjAuXIQNmo4KYymm1l+xdcVifHBXmSUtsgrzFC76J8j1vpfW+Rt5EXrH 2JpoSMTSRgrUD9+COg1ydlKUYoiqko/PxzZWCIr3PFfwcjBauMDBPU2VycQBbHQT qk3NMO1Z0NUX1Fy12DHuBLO4L/oRVj7TAOF4sQMY2VarGKMzUgtKr9oeMYfQfipD 2MKBAoIBAQCyCFuNYP+FePDVyMoI7mhZHd8vSZFVpbEyBA4TXv4yl6eq0pzr0vAT y/Zi42NDXh0vWt5Oix6mz+RHfvMvKMP+MugzZYxlGuD20BZf6ED0qrOkqsSFJBnJ W7R4hjIknOQ97mM6GP+VAEjsfNsjQ4/MmUPjrXFX65GeY61/NVtteUNlxV7y0X/0 TwSM24HIKYtCBd8Uad2h1f+l19acmoHO7A4B+qYcwSO5gBdhvcKOliXfuMrmnuC3 cjSDGBVxNDOenReVmLIshn6+JWk55noy0ETevb8gqi8vgVcYlwCQSF6BeP02Zp+Y 9uaXtN2esAtxaDavB9JgHjDid0hymmkpAoIBABmtcLim8rEIo82NERAUvVHR7MxR hXKx9g3bm1O0w7kJ16jyf5uyJ85JNi1XF2/AomSNWH6ikHuX5Xj6vOdL4Ki9jPDq TOlmvys2LtCAMOM3e3NvzIfTnrQEurGusCQKxCbnlRk2W13j3uc2gVFgB3T1+w2H lSEhzuFpDrxKrsE9QcCf7/Cju+2ir9h3FsPDRKoxfRJ2/onsgQ/Q7NODRRQGjwxw P/Hli/j17jC7TdgC26JhtVHH7K5xC6iNL03Pf3GTSvwN1vK1BY2reoz1FtQrGZvM rydzkVNNVeMVX2TER9yc8AdFqkRlaBWHmO61rYmV+N1quLM0uMVsu55ZNCY= -----END RSA PRIVATE KEY-----`) } func setupGoodPrivateKey() (*rsa.PrivateKey, error) { goodPrivateKeyPEMData := setupGoodPrivateKeyPEMData() goodPrivateKeyBlock, rest := pem.Decode(goodPrivateKeyPEMData) if len(rest) != 0 { return &rsa.PrivateKey{}, fmt.Errorf(excessPEMDataFormatString, rest) } if goodPrivateKeyBlock.Type != "PRIVATE KEY" { return &rsa.PrivateKey{}, fmt.Errorf(wrongPEMBlockTypeFormatString, "private key", goodPrivateKeyBlock.Type) } goodResult, err := x509.ParsePKCS8PrivateKey(goodPrivateKeyBlock.Bytes) if err != nil { return &rsa.PrivateKey{}, err } goodPrivateKey := goodResult.(*rsa.PrivateKey) return goodPrivateKey, nil } func setupGoodPrivateKeyPEMData() []byte { return []byte(`-----BEGIN PRIVATE KEY----- MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCnXsRTTIoV2Oqh 5zvNJzQBYOZPpxmnKzwLvgeop44Csk++zARvg5XIpmPbSEU2PY3pNGvLTH6nD54/ ZfOIRzSN0ipvfcrpJtkrJ7OYo1gX7ROXM30x3bj2KcJ/cMgMiZMQLqPegKhtMHLG z0TX+0MfJ5jqTlowVjSAyUhK6pMtf2ISpHqOA6uvmLhUhkruUrUkHMnbwWMTzrO8 QDMadLvV+hiWZNZFaf6Xt3lNBRY+yrXuSG7ZOc/6UsWDb4NVALL1mJ0IjfSeiK58 Sf8VHUY4MEjy8VW2lfARU/mcNkxrUY1DBNp5zcHMhwoLkLId90PyFyzXMDCvZxHr GEwtZ23UAYY/wAvw1XWm5XJBiLzaL12dStuHeZgtAUOucQHvEOglvPilU6vKf5PF dxqoKEOwXtgLUTlw4otm2bWx5p2LPlxkPApbAv7UaxiTbcpuIMh8WTTSk/EUgpyE UjphiN0uqnp2fH9Mmyn8hgSB/Kf6FhIZFl3VMNN6x8VTkqLkzVG8Ud48gFHfraVQ XvaLcDDCLxTeda6Th6uTw2zCifBzXbWxZKjlinx8MEM/kIA1we/wlwsYwpQNhkbO orR3eJ6Exdl1Ar8l3jHp293hCvxUNuzG5Z9oPDMQ6MSm8xxrBN2pYZNL3DCWaJ0n juNjYeNR7l7s+9ibX5RD+uASiC6hOwIDAQABAoICACkKPc3HKNzukaodjzZg9ImZ /x9s9AdfvJuMf0hFvba/jnpUgInvlBzdYAL3F7GTKmcSqZPyL+Q7GiyljS/Pdf2I mUs5ZzYAhqPkWk+FTiafxXv/7iT6H06NMtoZB5S7b0o6hYgbuDD9/kbtsA3d2Lmc 9gwOJmNP4BPsR0PHfUtcvumfV0XXdJPcOObQ/zKtKSmvvXebVfYXa08v1tgX4682 Yi0OkNP4h6sgHChcpPXEsxO4wyWU3YCeJzxxbZy8TSWXxHnChUcScOadJmVBBmTk wh6AOupIndNB6iT504dFOgRC/md7opfKTRZE5sgeR5TnReGGOYnpFiW1kYhYoj9C iFYEQIg2GOYHGoYoHPm6W0g6Z4s93kiUL5ijD7NCTxa5hpQhfY8DOEWeFF/FSy/Z miWO3YnhgLegqwH1zKeuGiejO/AQnL0wkkVrjz6Z4npaSbou1xEQ+BuMW1+Gs8pC UP7+R8dHqbGGNH3vuWBkC+saGUFNk8rH8wWvRk4/fj4LDZkOsc7UvsZBUSNjQkzY HqUIRi0jwzHLTiCpr6REiMv7ThaGNPXDYuTvW04E00HYCrEr5qm2rEsbYHLiaVbt aWIZkjSP/pxpeSDTaiUv9ap4FZMpKhRct3osPnbcDTZUdCAEOFN0iN7MojUWkBUo Jd0e7CCpJY9CJDHoWR0BAoIBAQDViKc6nRTX/2erl/0563m44ssY0bmWnO8Jj20+ T2BaNOUB6zlpDw9+2D4/p0aACxWobFPY29K41KpggF2btTLuNQb2bo+NB5Dv6hDW 8mpuvKqvhAPCcC0d1YV9dqTXthmz1i6F6w+XmHg7GLjdDa1lqUxRj+sCaf5RGojm OVJZiIfoP2naP2YTO8Ws9Au+xsNvrNvGUC/yhqo6I6tBr6rdP6Wh5FfQSs32p/zh XDu1ztCnlriyJbJYgcoYcdW94H4hNwcnb7sol/uxpECNxHIfFfe2w/JFVJ+Dfucg yg8eKWc3xtuGAGzR4yrXNxCARhVx5bHilxkaidptLzlMGyrBAoIBAQDIp9mii36g 459ZcxUEPvrWjiax3d/FEmXJL1Gw/NHHtU6tA1PD18Ka6twWcXjjh8J+cRUn9sKH +tYRPK3XBVCG7L62HXqGrg0TAY4PztYFcKdvcrxk1sUMH1YvfSrtst71QHDiPVyI cYqqmSMQAIGut0EXcfFWMEJCIdQpyLcorsSPqu2ZH3tQmPde1NTReIWbYU0pFr5r DQ5DeuvkHD1lRF31pegMJdQyheB8WdTEtZ3Lwx2Hn89aZ/z+jv9apanB6NEmp9wZ HXnvpAldmNt25cbnO+IOPB7lUqOAfldF7zHK0JrlnoA8ZYDiGG5lh8RFwKyKsQ87 iu57D6ITRzb7AoIBAADujbxFvzeKF8u56BL+ohNaegRrteGgYbKq+5EQC52ZKX6u pht9Q1YMX4krk7UvVzLa9iq2Gt9Pnka5iK8LRWOgu50LsnitFcbWQKiPekNtc1k8 vD92+dtteR2pDOMcGlSclKtk8TZ6JdwsKXJdguo0qO6aMC5chA2ajgpjKabWX7F1 xWJ8cFeZJS2yCvMULvonyPW+l9b5G3kResfYmmhIxNJGCtQP34I6DXJ2UpRiiKqS j8/HNlYIivc8V/ByMFq4wME9TZXJxAFsdBOqTc0w7VnQ1RfUXLXYMe4Es7gv+hFW PtMA4XixAxjZVqsYozNSC0qv2h4xh9B+KkPYwoECggEBALIIW41g/4V48NXIygju aFkd3y9JkVWlsTIEDhNe/jKXp6rSnOvS8BPL9mLjY0NeHS9a3k6LHqbP5Ed+8y8o w/4y6DNljGUa4PbQFl/oQPSqs6SqxIUkGclbtHiGMiSc5D3uYzoY/5UASOx82yND j8yZQ+OtcVfrkZ5jrX81W215Q2XFXvLRf/RPBIzbgcgpi0IF3xRp3aHV/6XX1pya gc7sDgH6phzBI7mAF2G9wo6WJd+4yuae4LdyNIMYFXE0M56dF5WYsiyGfr4laTnm ejLQRN69vyCqLy+BVxiXAJBIXoF4/TZmn5j25pe03Z6wC3FoNq8H0mAeMOJ3SHKa aSkCggEAGa1wuKbysQijzY0REBS9UdHszFGFcrH2DdubU7TDuQnXqPJ/m7Inzkk2 LVcXb8CiZI1YfqKQe5flePq850vgqL2M8OpM6Wa/KzYu0IAw4zd7c2/Mh9OetAS6 sa6wJArEJueVGTZbXePe5zaBUWAHdPX7DYeVISHO4WkOvEquwT1BwJ/v8KO77aKv 2HcWw8NEqjF9Enb+ieyBD9Ds04NFFAaPDHA/8eWL+PXuMLtN2ALbomG1UcfsrnEL qI0vTc9/cZNK/A3W8rUFjat6jPUW1CsZm8yvJ3ORU01V4xVfZMRH3JzwB0WqRGVo FYeY7rWtiZX43Wq4szS4xWy7nlk0Jg== -----END PRIVATE KEY-----`) } func setupGoodPublicKey() (*rsa.PublicKey, error) { goodPublicKeyPEMData := setupGoodPublicKeyPEMData() goodPublicKeyBlock, rest := pem.Decode(goodPublicKeyPEMData) if len(rest) != 0 { return &rsa.PublicKey{}, fmt.Errorf(excessPEMDataFormatString, rest) } if goodPublicKeyBlock.Type != "PUBLIC KEY" { return &rsa.PublicKey{}, fmt.Errorf(wrongPEMBlockTypeFormatString, "public key", goodPublicKeyBlock.Type) } goodResult, err := x509.ParsePKIXPublicKey(goodPublicKeyBlock.Bytes) if err != nil { return &rsa.PublicKey{}, err } goodPublicKey := goodResult.(*rsa.PublicKey) return goodPublicKey, nil } func setupGoodPublicKeyPEMData() []byte { return []byte(`-----BEGIN PUBLIC KEY----- MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp17EU0yKFdjqoec7zSc0 AWDmT6cZpys8C74HqKeOArJPvswEb4OVyKZj20hFNj2N6TRry0x+pw+eP2XziEc0 jdIqb33K6SbZKyezmKNYF+0TlzN9Md249inCf3DIDImTEC6j3oCobTByxs9E1/tD HyeY6k5aMFY0gMlISuqTLX9iEqR6jgOrr5i4VIZK7lK1JBzJ28FjE86zvEAzGnS7 1foYlmTWRWn+l7d5TQUWPsq17khu2TnP+lLFg2+DVQCy9ZidCI30noiufEn/FR1G ODBI8vFVtpXwEVP5nDZMa1GNQwTaec3BzIcKC5CyHfdD8hcs1zAwr2cR6xhMLWdt 1AGGP8AL8NV1puVyQYi82i9dnUrbh3mYLQFDrnEB7xDoJbz4pVOryn+TxXcaqChD sF7YC1E5cOKLZtm1seadiz5cZDwKWwL+1GsYk23KbiDIfFk00pPxFIKchFI6YYjd Lqp6dnx/TJsp/IYEgfyn+hYSGRZd1TDTesfFU5Ki5M1RvFHePIBR362lUF72i3Aw wi8U3nWuk4erk8Nswonwc121sWSo5Yp8fDBDP5CANcHv8JcLGMKUDYZGzqK0d3ie hMXZdQK/Jd4x6dvd4Qr8VDbsxuWfaDwzEOjEpvMcawTdqWGTS9wwlmidJ47jY2Hj Ue5e7PvYm1+UQ/rgEoguoTsCAwEAAQ== -----END PUBLIC KEY-----`) } func TestCreateCAValid(t *testing.T) { type args struct { opts *certificates.CertOptions } goodCaCertificate, err := setupGoodCaCertificate() if err != nil { t.Errorf("Error setting up certificate: %v", err) } goodCaCertificate.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign goodCaCertificate.ExtKeyUsage = []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth, } goodCaTimeAfterString := "2032-01-07T00:03:51Z" goodCaTimeAfter, err := time.Parse(time.RFC3339, goodCaTimeAfterString) if err != nil { t.Errorf("Error parsing time %s: %v", goodCaTimeAfterString, err) } goodCaTimeBeforeString := "2022-01-07T00:03:51Z" goodCaTimeBefore, err := time.Parse(time.RFC3339, goodCaTimeBeforeString) if err != nil { t.Errorf("Error parsing time %s: %v", goodCaTimeBeforeString, err) } goodCertOptions := certificates.CertOptions{ Bits: 4096, CommonName: "Ansible Automation Controller Nodes Mesh Certificate Authority", NotAfter: goodCaTimeAfter, NotBefore: goodCaTimeBefore, } goodCaPrivateKey, err := setupGoodCaRsaPrivateKey() if err != nil { t.Fatal(err) } tests := []struct { name string args args want *certificates.CA }{ { name: "Positive test", args: args{ opts: &goodCertOptions, }, want: &certificates.CA{ Certificate: goodCaCertificate, PrivateKey: goodCaPrivateKey, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockRsa := mock_certificates.NewMockRsaer(ctrl) mockRsa. EXPECT(). GenerateKey(gomock.Any(), gomock.Any()). DoAndReturn( func(random io.Reader, bits int) (*rsa.PrivateKey, error) { return goodCaPrivateKey, nil }, ) got, _ := certificates.CreateCA(tt.args.opts, mockRsa) if !reflect.DeepEqual(got.PrivateKey, tt.want.PrivateKey) { t.Errorf("CreateCA() Private Key got = %+v, want = %+v", got.PrivateKey, tt.want.PrivateKey) return } certWant := tt.want.Certificate certGot := got.Certificate if certGot.BasicConstraintsValid != true { t.Errorf("CreateCA() Certificate BasicConstraintsValid got = %+v, want = %+v", certGot.BasicConstraintsValid, true) return } if !reflect.DeepEqual(certGot.ExtraExtensions, certWant.ExtraExtensions) { t.Errorf("CreateCA() Certificate ExtraExtensions got = %+v, want = %+v", certGot.ExtraExtensions, certWant.ExtraExtensions) return } if !reflect.DeepEqual(certGot.ExtKeyUsage, certWant.ExtKeyUsage) { t.Errorf("CreateCA() Certificate ExtKeyUsage got = %+v, want = %+v", certGot.ExtKeyUsage, certWant.ExtKeyUsage) return } if certGot.IsCA != true { t.Errorf("CreateCA() Certificate IsCA got = %+v, want = %+v", certGot.IsCA, true) return } if !reflect.DeepEqual(certGot.Issuer, certWant.Issuer) { t.Errorf("CreateCA() Certificate Issuer got = %+v, want = %+v", certGot.Issuer, certWant.Issuer) return } if !reflect.DeepEqual(certGot.KeyUsage, certWant.KeyUsage) { t.Errorf("CreateCA() Certificate KeyUsage got = %+v, want = %+v", certGot.KeyUsage, certWant.KeyUsage) return } if !reflect.DeepEqual(certGot.NotAfter, certWant.NotAfter) { t.Errorf("CreateCA() Certificate NotAfter got = %+v, want = %+v", certGot.NotAfter, certWant.NotAfter) return } if !reflect.DeepEqual(certGot.NotBefore, certWant.NotBefore) { t.Errorf("CreateCA() Certificate NotBefore got = %+v, want = %+v", certGot.NotBefore, certWant.NotBefore) return } if !reflect.DeepEqual(certGot.PublicKeyAlgorithm, certWant.PublicKeyAlgorithm) { t.Errorf("CreateCA() Certificate PublicKeyAlgorithm got = %+v, want = %+v", certGot.PublicKeyAlgorithm, certWant.PublicKeyAlgorithm) return } if !reflect.DeepEqual(certGot.SignatureAlgorithm, certWant.SignatureAlgorithm) { t.Errorf("CreateCA() Certificate SignatureAlgorithm got = %+v, want = %+v", certGot.SignatureAlgorithm, certWant.SignatureAlgorithm) return } if !reflect.DeepEqual(certGot.Subject, certWant.Subject) { t.Errorf("CreateCA() Certificate Subject got = %+v, want = %+v", certGot.Subject, certWant.Subject) return } if !reflect.DeepEqual(certGot.Version, certWant.Version) { t.Errorf("CreateCA() Certificate Version got = %+v, want = %+v", certGot.Version, certWant.Version) return } }) } } func TestCreateCANegative(t *testing.T) { type args struct { opts *certificates.CertOptions } badCertOptions := certificates.CertOptions{ Bits: -1, } tests := []struct { name string args args want *certificates.CA wantErr error }{ { name: "Negative test for Common Name", args: args{ opts: &badCertOptions, }, want: nil, wantErr: fmt.Errorf("must provide CommonName"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { _, gotErr := certificates.CreateCA(tt.args.opts, &certificates.RsaWrapper{}) if gotErr == nil || gotErr.Error() != tt.wantErr.Error() { t.Errorf("CreateCA() error = %v, wantErr = %v", gotErr, tt.wantErr) return } }) } } func setupBadCertRequest() (certificates.CertOptions, *x509.CertificateRequest, error) { badCaTimeAfterString := "2021-01-07T00:03:51Z" badCaTimeAfter, err := time.Parse(time.RFC3339, badCaTimeAfterString) if err != nil { return certificates.CertOptions{}, &x509.CertificateRequest{}, err } badCaTimeBeforeString := "2022-01-07T00:03:51Z" badCaTimeBefore, err := time.Parse(time.RFC3339, badCaTimeBeforeString) if err != nil { return certificates.CertOptions{}, &x509.CertificateRequest{}, err } badDNSName := "receptor.TEST.BAD" badIPAddress := net.ParseIP("127.0.0.1").To4() badNodeIDs := badDNSName badCertOptions := certificates.CertOptions{ Bits: -1, CommonName: "Ansible Automation Controller Nodes Mesh Bad", CertNames: certificates.CertNames{ DNSNames: []string{badDNSName}, IPAddresses: []net.IP{badIPAddress}, NodeIDs: []string{badNodeIDs}, }, NotAfter: badCaTimeAfter, NotBefore: badCaTimeBefore, } badSubjectAltNamesExtension, err := utils.MakeReceptorSAN(badCertOptions.CertNames.DNSNames, badCertOptions.IPAddresses, badCertOptions.NodeIDs) if err != nil { return badCertOptions, &x509.CertificateRequest{}, err } badCertificateRequest := &x509.CertificateRequest{ Attributes: nil, DNSNames: []string{badDNSName}, EmailAddresses: nil, Extensions: []pkix.Extension{ *badSubjectAltNamesExtension, }, ExtraExtensions: nil, IPAddresses: []net.IP{badIPAddress}, PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA256WithRSA, Subject: pkix.Name{ CommonName: badCertOptions.CommonName, Country: nil, ExtraNames: []pkix.AttributeTypeAndValue{}, Locality: nil, Names: []pkix.AttributeTypeAndValue{}, Organization: nil, OrganizationalUnit: nil, PostalCode: nil, Province: nil, SerialNumber: "", StreetAddress: nil, }, URIs: nil, Version: 0, } return badCertOptions, badCertificateRequest, nil } func setupGoodCertRequest() (certificates.CertOptions, *x509.CertificateRequest, error) { goodCaTimeAfterString := "2032-01-07T00:03:51Z" goodCaTimeAfter, err := time.Parse(time.RFC3339, goodCaTimeAfterString) if err != nil { return certificates.CertOptions{}, &x509.CertificateRequest{}, err } goodCaTimeBeforeString := "2022-01-07T00:03:51Z" goodCaTimeBefore, err := time.Parse(time.RFC3339, goodCaTimeBeforeString) if err != nil { return certificates.CertOptions{}, &x509.CertificateRequest{}, err } goodDNSName := "receptor.TEST" goodIPAddress := net.ParseIP("127.0.0.1").To4() goodNodeIDs := goodDNSName goodCertOptions := certificates.CertOptions{ Bits: 4096, CommonName: "Ansible Automation Controller Nodes Mesh", CertNames: certificates.CertNames{ DNSNames: []string{goodDNSName}, IPAddresses: []net.IP{goodIPAddress}, NodeIDs: []string{goodNodeIDs}, }, NotAfter: goodCaTimeAfter, NotBefore: goodCaTimeBefore, } goodSubjectAltNamesExtension, err := utils.MakeReceptorSAN(goodCertOptions.CertNames.DNSNames, goodCertOptions.IPAddresses, goodCertOptions.NodeIDs) if err != nil { return goodCertOptions, &x509.CertificateRequest{}, err } goodCertificateRequest := &x509.CertificateRequest{ Attributes: nil, DNSNames: []string{goodDNSName}, EmailAddresses: nil, Extensions: []pkix.Extension{ *goodSubjectAltNamesExtension, }, ExtraExtensions: nil, IPAddresses: []net.IP{goodIPAddress}, PublicKeyAlgorithm: x509.RSA, SignatureAlgorithm: x509.SHA256WithRSA, Subject: pkix.Name{ CommonName: goodCertOptions.CommonName, Country: nil, ExtraNames: []pkix.AttributeTypeAndValue{}, Locality: nil, Names: []pkix.AttributeTypeAndValue{}, Organization: nil, OrganizationalUnit: nil, PostalCode: nil, Province: nil, SerialNumber: "", StreetAddress: nil, }, URIs: nil, Version: 0, } return goodCertOptions, goodCertificateRequest, nil } func TestCreateCertReqValid(t *testing.T) { type args struct { opts *certificates.CertOptions privateKey *rsa.PrivateKey } goodCertOptions, goodCertificateRequest, err := setupGoodCertRequest() if err != nil { t.Fatal(err) } goodRSAPrivateKey, err := setupGoodCertificateRequestRsaPrivateKey() if err != nil { t.Fatal(err) } tests := []struct { name string args args want *x509.CertificateRequest wantErr bool }{ { name: "Positive test", args: args{ opts: &goodCertOptions, privateKey: goodRSAPrivateKey, }, want: goodCertificateRequest, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := certificates.CreateCertReq(tt.args.opts, tt.args.privateKey) if (err != nil) != tt.wantErr { t.Errorf("CreateCertReq() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got.DNSNames, tt.want.DNSNames) { t.Errorf("CreateCertReq() DNSNames got = %+v, want = %+v", got.DNSNames, tt.want.DNSNames) return } if !reflect.DeepEqual(got.EmailAddresses, tt.want.EmailAddresses) { t.Errorf("CreateCertReq() EmailAddresses got = %+v, want = %+v", got.EmailAddresses, tt.want.EmailAddresses) return } if !reflect.DeepEqual(got.ExtraExtensions, tt.want.ExtraExtensions) { t.Errorf("CreateCertReq() ExtraExtensions got = %+v, want = %+v", got.ExtraExtensions, tt.want.ExtraExtensions) return } if !reflect.DeepEqual(got.IPAddresses, tt.want.IPAddresses) { t.Errorf("CreateCertReq() IPAddresses got = %+v, want = %+v", got.IPAddresses, tt.want.IPAddresses) return } if !reflect.DeepEqual(got.PublicKeyAlgorithm, tt.want.PublicKeyAlgorithm) { t.Errorf("CreateCertReq() PublicKeyAlgorithm = %+v, want = %+v", got.PublicKeyAlgorithm, tt.want.PublicKeyAlgorithm) return } if !reflect.DeepEqual(got.SignatureAlgorithm, tt.want.SignatureAlgorithm) { t.Errorf("CreateCertReq() SignatureAlgorithm got = %+v, want = %+v", got.SignatureAlgorithm, tt.want.SignatureAlgorithm) return } if !reflect.DeepEqual(got.URIs, tt.want.URIs) { t.Errorf("CreateCertReq() URIs got = %+v, want = %+v", got.URIs, tt.want.URIs) return } if !reflect.DeepEqual(got.Version, tt.want.Version) { t.Errorf("CreateCertReq() Version got = %+v, want = %+v", got.Version, tt.want.Version) return } }) } } func TestCreateCertReqNegative(t *testing.T) { type args struct { opts *certificates.CertOptions privateKey *rsa.PrivateKey } badCertOptions := certificates.CertOptions{ Bits: -1, } tests := []struct { name string args args want *x509.CertificateRequest wantErr error }{ { name: "Negative test for Common Name", args: args{ opts: &badCertOptions, privateKey: nil, }, want: nil, wantErr: fmt.Errorf("must provide CommonName"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { _, gotErr := certificates.CreateCertReq(tt.args.opts, tt.args.privateKey) if gotErr == nil || gotErr.Error() != tt.wantErr.Error() { t.Errorf("CreateCertReq() error = %v, wantErr = %v", gotErr, tt.wantErr) } }) } } func TestCreateCertReqWithKey(t *testing.T) { type args struct { opts *certificates.CertOptions } goodCertOptions, goodCertificateRequest, err := setupGoodCertRequest() if err != nil { t.Fatal(err) } tests := []struct { name string args args want *x509.CertificateRequest want1 *rsa.PrivateKey wantErr bool }{ { name: "Positive test", args: args{ opts: &goodCertOptions, }, want: goodCertificateRequest, want1: nil, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { _, _, err := certificates.CreateCertReqWithKey(tt.args.opts) if (err != nil) != tt.wantErr { t.Errorf("CreateCertReqWithKey() error = %v, wantErr %v", err, tt.wantErr) return } }) } } func TestCreateCertReqWithKeyNegative(t *testing.T) { type args struct { opts *certificates.CertOptions } badCertOptions := certificates.CertOptions{ Bits: -1, } tests := []struct { name string args args want *x509.CertificateRequest want1 *rsa.PrivateKey wantErr error }{ { name: "Negative test for Bits", args: args{ opts: &badCertOptions, }, want: nil, want1: nil, wantErr: fmt.Errorf("crypto/rsa: too few primes of given length to generate an RSA key"), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { _, _, gotErr := certificates.CreateCertReqWithKey(tt.args.opts) if gotErr == nil || gotErr.Error() != tt.wantErr.Error() { t.Errorf("CreateCertReqWithKey() error = %v, wantErr = %v", gotErr, tt.wantErr) } }) } } func TestGetReqNames(t *testing.T) { type args struct { request *x509.CertificateRequest } _, goodCertificateRequest, err := setupGoodCertRequest() if err != nil { t.Fatal(err) } goodDNSNames := goodCertificateRequest.DNSNames goodIPAddresses := goodCertificateRequest.IPAddresses goodNodeIDs := goodDNSNames tests := []struct { name string args args want *certificates.CertNames wantErr bool }{ { name: "Positive test", args: args{ request: goodCertificateRequest, }, want: &certificates.CertNames{ DNSNames: goodDNSNames, NodeIDs: goodNodeIDs, IPAddresses: goodIPAddresses, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := certificates.GetReqNames(tt.args.request) if (err != nil) != tt.wantErr { t.Errorf("GetReqNames() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("GetReqNames() = %v, want %v", got, tt.want) } }) } } func TestGetReqNamesNegative(t *testing.T) { type args struct { request *x509.CertificateRequest } _, goodCertificateRequest, err := setupGoodCertRequest() if err != nil { t.Fatal(err) } goodCertificateRequest.Extensions = []pkix.Extension{ { Id: utils.OIDSubjectAltName, Critical: true, Value: nil, }, } tests := []struct { name string args args want *certificates.CertNames wantErr bool }{ { name: "Negative test", args: args{ request: goodCertificateRequest, }, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := certificates.GetReqNames(tt.args.request) if (err != nil) != tt.wantErr { t.Errorf("GetReqNames() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("GetReqNames() = %v, want %v", got, tt.want) } }) } } func TestLoadCertificate(t *testing.T) { type args struct { filename string } goodCertificate, err := setupGoodCertificate() if err != nil { t.Fatal(err) } positiveTestFilename := "positive_test_filename" tests := []struct { name string args args want *x509.Certificate wantErr bool }{ { name: "Positive test", args: args{ filename: positiveTestFilename, }, want: goodCertificate, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) switch tt.args.filename { case positiveTestFilename: o. EXPECT(). ReadFile(gomock.Eq(positiveTestFilename)). Return(setupGoodCertificatePEMData(), nil). Times(1) default: t.Errorf("Unexpected filename: %s", tt.args.filename) } got, err := certificates.LoadCertificate(tt.args.filename, o) if (err != nil) != tt.wantErr { t.Errorf("LoadCertificate() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("LoadCertificate() = %v, want %v", got, tt.want) } }) } } func TestLoadCertificatesNegative(t *testing.T) { type args struct { filename string } _, goodCertificateRequest, err := setupGoodCertRequest() if err != nil { t.Fatal(err) } goodCertificateRequest.Extensions = []pkix.Extension{ { Id: utils.OIDSubjectAltName, Critical: true, Value: nil, }, } negativeMultipleItemTest := "negative_multiple_item_test" multipleCertificates := setupGoodCertificatePEMData() multipleCertificates = append(multipleCertificates, multipleCertificates[0]) negativeNoCertificateTest := "negative_no_certificate_test" noCertificates := []byte{ 0, 0, 0, 0, } tests := []struct { name string args args want *x509.Certificate wantErr bool }{ { name: "Negative multiple item test", args: args{ filename: negativeMultipleItemTest, }, want: nil, wantErr: true, }, { name: "Negative no certificate test", args: args{ filename: negativeNoCertificateTest, }, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) switch tt.args.filename { case negativeMultipleItemTest: o. EXPECT(). ReadFile(gomock.Eq(negativeMultipleItemTest)). Return(multipleCertificates, nil). Times(1) case negativeNoCertificateTest: o. EXPECT(). ReadFile(gomock.Eq(negativeNoCertificateTest)). Return(noCertificates, nil). Times(1) default: t.Errorf("Unexpected filename: %s", tt.args.filename) } got, err := certificates.LoadCertificate(tt.args.filename, o) if (err != nil) != tt.wantErr { t.Errorf("LoadCertificate() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("GetReqNames() = %v, want %v", got, tt.want) } }) } } func TestLoadFromPEMFile(t *testing.T) { type args struct { filename string } ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) errorSettingUpTypeFormatString := "Error setting up %s: %v" certificateTestFilename := "certificate_test_filename" goodCertificate, err := setupGoodCertificate() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "certificate", err) } certificateRequestTestFilename := "certificate_request_test_filename" goodCertificateRequest, err := setupGoodCertificateRequest() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "certificate request", err) } certificateRequestRsaPrivateKeyTestFilename := "rsa_private_key_test_filename" goodCertificateRequestRsaPrivateKey, err := setupGoodCertificateRequestRsaPrivateKey() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "rsa private key", err) } privateKeyTestFilename := "private_key_test_filename" goodPrivateKey, err := setupGoodPrivateKey() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "private key", err) } publicKeyTestFilename := "public_key_test_filename" goodPublicKey, err := setupGoodPublicKey() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "public key", err) } failedToDecodeTestFilename := "failed_to_decode_test_filename" failedToDecodeFileContents := []byte{ 0, 0, 0, 0, } unknownBlockTypeTestFilename := "unknown_block_type_test_filename" tests := []struct { name string args args wantOserReadfileCalls func() want []interface{} wantErr bool }{ { name: "Certificate", args: args{ filename: certificateTestFilename, }, wantOserReadfileCalls: func() { o. EXPECT(). ReadFile(gomock.Any()). Return(interface{}(setupGoodCertificatePEMData()), nil). Times(1) }, want: []interface{}{ goodCertificate, }, wantErr: false, }, { name: "Certificate Request", args: args{ filename: certificateRequestTestFilename, }, wantOserReadfileCalls: func() { o. EXPECT(). ReadFile(gomock.Any()). Return(interface{}(setupGoodCertificateRequestPEMData()), nil). Times(1) }, want: []interface{}{ goodCertificateRequest, }, wantErr: false, }, { name: "RSA Private Key", args: args{ filename: certificateRequestRsaPrivateKeyTestFilename, }, wantOserReadfileCalls: func() { o. EXPECT(). ReadFile(gomock.Any()). Return(interface{}(setupGoodCertificateRequestRsaPrivateKeyPEMData()), nil). Times(1) }, want: []interface{}{ goodCertificateRequestRsaPrivateKey, }, wantErr: false, }, { name: "Private Key", args: args{ filename: privateKeyTestFilename, }, wantOserReadfileCalls: func() { o. EXPECT(). ReadFile(gomock.Any()). Return(interface{}(setupGoodPrivateKeyPEMData()), nil). Times(1) }, want: []interface{}{ goodPrivateKey, }, wantErr: false, }, { name: "Public Key", args: args{ filename: publicKeyTestFilename, }, wantOserReadfileCalls: func() { o. EXPECT(). ReadFile(gomock.Any()). Return(interface{}(setupGoodPublicKeyPEMData()), nil). Times(1) }, want: []interface{}{ goodPublicKey, }, wantErr: false, }, { name: "Failed to decode", args: args{ filename: failedToDecodeTestFilename, }, wantOserReadfileCalls: func() { o. EXPECT(). ReadFile(gomock.Any()). Return(interface{}(failedToDecodeFileContents), nil). Times(1) }, want: nil, wantErr: true, }, { name: "Unknown block type", args: args{ filename: unknownBlockTypeTestFilename, }, wantOserReadfileCalls: func() { o. EXPECT(). ReadFile(gomock.Any()). Return(interface{}(setupUnknownBlockTypeFileContents()), nil). Times(1) }, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.wantOserReadfileCalls() got, err := certificates.LoadFromPEMFile(tt.args.filename, o) if (err != nil) != tt.wantErr { t.Errorf("LoadFromPEMFile() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("LoadFromPEMFile() = %v, want %v", got, tt.want) } }) } } func setupUnknownBlockTypeFileContents() []byte { return []byte(`-----BEGIN PKCS7----- MIIFhAYJKoZIhvcNAQcCoIIFdTCCBXECAQExADALBgkqhkiG9w0BBwGgggVZMIIF VTCCAz2gAwIBAgIEYdeDaTANBgkqhkiG9w0BAQsFADA7MTkwNwYDVQQDEzBBbnNp YmxlIEF1dG9tYXRpb24gQ29udHJvbGxlciBOb2RlcyBNZXNoIFJPT1QgQ0EwHhcN MjIwMTA3MDAwMzUxWhcNMzIwMTA3MDAwMzUxWjA7MTkwNwYDVQQDEzBBbnNpYmxl IEF1dG9tYXRpb24gQ29udHJvbGxlciBOb2RlcyBNZXNoIFJPT1QgQ0EwggIiMA0G CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCxAErOWvVDU8mfZgtE6BSygTWUMkPx xIEQSYs/UesRAHaB+QXa7/0Foa0VUJKcWwUE+2yYkNRrg8MmE8VWMSewcaNIAs40 7stFXP+A2anPEglwemTskpO72sigiYDKShC5n5ciyPsHckwVlOCTtac5TwFeeTmG nHWRcd4uBGvaEXx98fw/wLgYtr9vmKTdnOQjriX9EaAWrjlrlzm54Bs3uVUjGSL7 zY381EuUVV4AjbqQyThbY9cVfsK0nmzLUqpiHG2IhGZDZA9+jxtz2wJWFkNQnWA3 afCUjcWV+4FpP3p1U1myCeh2yR2uCHs9pkUK3ts9uD/Wd5j9M1oBMlymbN/C5Fah d+cTXrPAjsoRqCso9TBP4mIlNl1Jq8MRUWTL5HOuwn+KnufBtuQ1hIb71Esokj90 eWeo/P+temYAEquUVWiej7lnHyZVW647lE+o+xJEOmW+tY5H4jgA/twP4s7UBgR5 45usWF9/utvnhsGSkg1EYcdzaM01pkrWrw1GvHT++HshsrG6Tse8gY7JrTdsLDtU 8LPhPUEnSfVBcgcMg2Dg8lEbaODtdp2xtCJwZHy9CiAx3CKcogVEfecrKSSr2iSo cft9/l8J+mhVG2CI6ekG6Cy9hDct/3SV01Dfd4FG7xXJIE3mTDLILpi1AVIWMHxY D3skuQMyLhJDAwIDAQABo2EwXzAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYwFAYI KwYBBQUHAwIGCCsGAQUFBwMBMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFAl C81rH211fPJoWglERKb/7/NfMA0GCSqGSIb3DQEBCwUAA4ICAQCWCP/O6YQ9jhae 2/BeUeeKsnoxf90prg3o6/QHbelF6yL+MvGg5ZbPSlt+ywLDNR2CYvXk/4SD5To7 CPKhSPBwwUJpafvQfAOZijU30fvXvp5yZEFoOzOyvBP58NfzL5qH6Pf5A6i3rHvt R1v7DgS7u2qWWcSimIM0UPoV3JubLTEORjOR6FIyNkIxdjhrP3SxyZ54xxdeG3bc hKaRcGVNoFYSDN4bAA22JAjlD8kXNYKzIS/0cOR/9SnHd1wMIQ2trx0+TfyGFAA1 mW1mjzQd+h5SGBVeCz2W2XttNSIfQDndJCsyACxmIaOK99AQxdhZsWfHtGO13Tjn yoiHjf8rozJbAVYqrIdB6GDf6fUlxwhUXT0qkgOvvAzjNnLoOBUkE4TWqXHl38a+ ITDNVzaUlrTd63eexS69V6kHe7mrqjywNQ9EXF9kaVeoNTzRf/ztT/DEVAl+rKsh Mt4IOKQf1ScE+EJe1njpREHV+fa+kYvQB6cRuxW9a8sOSeQNaSL73Zv54elZxffY hMv6yXvVxVnJHEsG3kM/CsvsU364BBd9kDcZbHpjNcDHMu+XxECJjD2atVtuFdaO LykGKfMCYVBP+xs97IJO8En/5N9QQwc+N4cfCg9/BWoZKHPbRx/V+57VEj0m69Ep JXbL15ZQLCPsaIcqJqpK23VyJKc8fDEA -----END PKCS7-----`) } func TestLoadPrivateKey(t *testing.T) { type args struct { filename string } positivePrivateKeyFilename := "private_key_test_filename" errorSettingUpTypeFormatString := "Error setting up %s: %v" goodPrivateKey, err := setupGoodPrivateKey() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "private key", err) } negativeMultipleItemFilename := "negative_multiple_item_test" multiplePrivateKeys := setupGoodPrivateKeyPEMData() multiplePrivateKeys = append(multiplePrivateKeys, multiplePrivateKeys[0]) negativeNoPrivateKeyFilename := "negative_no_private_key_test" noPrivateKey := []byte{ 0, 0, 0, 0, 0, } tests := []struct { name string args args wantOserReadfileArg string wantOserReadfileResult []byte want *rsa.PrivateKey wantErr bool }{ { name: "Positive Private Key", args: args{ filename: positivePrivateKeyFilename, }, wantOserReadfileArg: positivePrivateKeyFilename, wantOserReadfileResult: setupGoodPrivateKeyPEMData(), want: goodPrivateKey, wantErr: false, }, { name: "Negative multi item test", args: args{ filename: negativeMultipleItemFilename, }, wantOserReadfileArg: negativeMultipleItemFilename, wantOserReadfileResult: multiplePrivateKeys, want: nil, wantErr: true, }, { name: "Negative no private key test", args: args{ filename: negativeNoPrivateKeyFilename, }, wantOserReadfileArg: negativeNoPrivateKeyFilename, wantOserReadfileResult: noPrivateKey, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) o. EXPECT(). ReadFile(gomock.Eq(tt.wantOserReadfileArg)). Return(tt.wantOserReadfileResult, nil). Times(1) got, err := certificates.LoadPrivateKey(tt.args.filename, o) if (err != nil) != tt.wantErr { t.Errorf("LoadPrivateKey() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("LoadPrivateKey() = %v, want %v", got, tt.want) } }) } } func TestLoadPublicKey(t *testing.T) { type args struct { filename string } errorSettingUpTypeFormatString := "Error setting up %s: %v" positivePublicKeyFilename := "public_key_test_filename" goodPublicKey, err := setupGoodPublicKey() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "public key", err) } negativeMultipleItemFilename := "negative_multiple_item_test" multiplePublicKeys := setupGoodPublicKeyPEMData() multiplePublicKeys = append(multiplePublicKeys, multiplePublicKeys[0]) negativeNoPublicKeyFilename := "negative_no_public_key_test" noPublicKey := []byte{ 0, 0, 0, 0, } tests := []struct { name string args args wantOserReadfileArg string wantOserReadfileResult []byte want *rsa.PublicKey wantErr bool }{ { name: "Positive Public Key", args: args{ filename: positivePublicKeyFilename, }, wantOserReadfileArg: positivePublicKeyFilename, wantOserReadfileResult: setupGoodPublicKeyPEMData(), want: goodPublicKey, wantErr: false, }, { name: "Negative multi item test", args: args{ filename: negativeMultipleItemFilename, }, wantOserReadfileArg: negativeMultipleItemFilename, wantOserReadfileResult: multiplePublicKeys, want: nil, wantErr: true, }, { name: "Negative no public key test", args: args{ filename: negativeNoPublicKeyFilename, }, wantOserReadfileArg: negativeNoPublicKeyFilename, wantOserReadfileResult: noPublicKey, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) o. EXPECT(). ReadFile(gomock.Eq(tt.wantOserReadfileArg)). Return(tt.wantOserReadfileResult, nil). Times(1) got, err := certificates.LoadPublicKey(tt.args.filename, o) if (err != nil) != tt.wantErr { t.Errorf("LoadPublicKey() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("LoadPublicKey() = %v, want %v", got, tt.want) } }) } } func TestLoadRequest(t *testing.T) { type args struct { filename string } errorSettingUpTypeFormatString := "Error setting up %s: %v" positiveRequestFilename := "request_test_filename" goodRequest, err := setupGoodCertificateRequest() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "request", err) } negativeMultipleItemFilename := "negative_multiple_item_test" multipleRequests := setupGoodCertificateRequestPEMData() multipleRequests = append(multipleRequests, multipleRequests[0]) negativeNoRequestFilename := "negative_no_request_test" noRequest := []byte{ 0, 0, 0, 0, } tests := []struct { name string args args wantOserReadfileArg string wantOserReadfileResult []byte want *x509.CertificateRequest wantErr bool }{ { name: "Positive Request", args: args{ filename: positiveRequestFilename, }, wantOserReadfileArg: positiveRequestFilename, wantOserReadfileResult: setupGoodCertificateRequestPEMData(), want: goodRequest, wantErr: false, }, { name: "Negative multi item test", args: args{ filename: negativeMultipleItemFilename, }, wantOserReadfileArg: negativeMultipleItemFilename, wantOserReadfileResult: multipleRequests, want: nil, wantErr: true, }, { name: "Negative no request test", args: args{ filename: negativeNoRequestFilename, }, wantOserReadfileArg: negativeNoRequestFilename, wantOserReadfileResult: noRequest, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) o. EXPECT(). ReadFile(gomock.Eq(tt.wantOserReadfileArg)). Return(tt.wantOserReadfileResult, nil). Times(1) got, err := certificates.LoadRequest(tt.args.filename, o) if (err != nil) != tt.wantErr { t.Errorf("LoadRequest() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("LoadRequest() = %v, want %v", got, tt.want) } }) } } func TestRsaWrapper_GenerateKey(t *testing.T) { type args struct { random io.Reader bits int } errorSettingUpTypeFormatString := "Error setting up %s: %v" goodPrivateKey, err := setupGoodPrivateKey() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "private key", err) } tests := []struct { name string args args wantGenerateKeyRandomArg io.Reader wantGenerateKeyBitsArg int wantGenerateKeyPrivateKeyResult *rsa.PrivateKey wantGenerateKeyErrorResult error want *rsa.PrivateKey wantErr bool }{ { name: "Positive test", args: args{ random: rand.Reader, bits: 2048, }, wantGenerateKeyRandomArg: nil, wantGenerateKeyBitsArg: 2048, wantGenerateKeyPrivateKeyResult: goodPrivateKey, wantGenerateKeyErrorResult: nil, want: goodPrivateKey, wantErr: false, }, { name: "Negative test", args: args{ random: rand.Reader, bits: -1, }, wantGenerateKeyRandomArg: nil, wantGenerateKeyBitsArg: -1, wantGenerateKeyPrivateKeyResult: nil, wantGenerateKeyErrorResult: fmt.Errorf("Error result"), want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockRsa := mock_certificates.NewMockRsaer(ctrl) mockRsa. EXPECT(). GenerateKey(gomock.Any(), gomock.Eq(tt.wantGenerateKeyBitsArg)). Return(tt.wantGenerateKeyPrivateKeyResult, tt.wantGenerateKeyErrorResult). Times(1) got, err := mockRsa.GenerateKey(tt.args.random, tt.args.bits) if (err != nil) != tt.wantErr { t.Errorf("RsaWrapper.GenerateKey() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("RsaWrapper.GenerateKey() = %v, want %v", got, tt.want) t.Errorf("LoadRequest() = %v, want %v", got, tt.want) } }) } } func TestSaveToPEMFile(t *testing.T) { type args struct { filename string data []interface{} } errorSettingUpTypeFormatString := "Error setting up %s: %v" certificateRequestTestFilename := "certificate_request_test_filename" goodRequest, err := setupGoodCertificateRequest() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "request", err) } certificateTestFilename := "certificate_test_filename" goodCaCertificate, err := setupGoodCaCertificate() if err != nil { t.Errorf("Error setting up certificate: %v", err) } failedToEncodeTestFilename := "failed_to_encode_test_filename" privateKeyTestFilename := "private_key_test_filename" goodPrivateKey, err := setupGoodPrivateKey() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "private key", err) } publicKeyTestFilename := "public_key_test_filename" goodPublicKey, err := setupGoodPublicKey() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "public key", err) } rsaPrivateKeyTestFilename := "rsa_private_key_test_filename" goodCaPrivateKey, err := setupGoodCertificateRequestRsaPrivateKey() if err != nil { t.Fatal(err) } unknownBlockTypeTestFilename := "unknown_block_type_test_filename" ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) tests := []struct { name string args args wantOserWritefileCalls func() want []interface{} wantErr bool }{ { name: "Certificate", args: args{ filename: certificateTestFilename, data: []interface{}{ goodCaCertificate, }, }, wantOserWritefileCalls: func() { o. EXPECT(). WriteFile(gomock.Any(), gomock.Any(), gomock.Any()). Return(interface{}(nil)). Times(1) }, want: []interface{}{ nil, }, wantErr: false, }, { name: "Certificate Request", args: args{ filename: certificateRequestTestFilename, data: []interface{}{ goodRequest, }, }, wantOserWritefileCalls: func() { o. EXPECT(). WriteFile(gomock.Any(), gomock.Any(), gomock.Any()). Return(interface{}(nil)). Times(1) }, want: []interface{}{ nil, }, wantErr: false, }, { name: "RSA Private Key", args: args{ filename: rsaPrivateKeyTestFilename, data: []interface{}{ goodCaPrivateKey, }, }, wantOserWritefileCalls: func() { o. EXPECT(). WriteFile(gomock.Any(), gomock.Any(), gomock.Any()). Return(interface{}(nil)). Times(1) }, want: []interface{}{ nil, }, wantErr: false, }, { name: "Private Key", args: args{ filename: privateKeyTestFilename, data: []interface{}{ goodPrivateKey, }, }, wantOserWritefileCalls: func() { o. EXPECT(). WriteFile(gomock.Any(), gomock.Any(), gomock.Any()). Return(interface{}(nil)). Times(1) }, want: []interface{}{ nil, }, wantErr: false, }, { name: "Public Key", args: args{ filename: publicKeyTestFilename, data: []interface{}{ goodPublicKey, }, }, wantOserWritefileCalls: func() { o. EXPECT(). WriteFile(gomock.Any(), gomock.Any(), gomock.Any()). Return(interface{}(nil)). Times(1) }, want: []interface{}{ nil, }, wantErr: false, }, { name: "Failed to encode", args: args{ filename: failedToEncodeTestFilename, data: []interface{}{ []byte{ 0, 0, 0, 0, }, }, }, wantOserWritefileCalls: func() { o. EXPECT(). WriteFile(gomock.Any(), gomock.Any(), gomock.Any()). Return(interface{}(nil)). Times(0) }, want: nil, wantErr: true, }, { name: "Unknown block type", args: args{ filename: unknownBlockTypeTestFilename, data: []interface{}{ nil, }, }, wantOserWritefileCalls: func() { o. EXPECT(). WriteFile(gomock.Any(), gomock.Any(), gomock.Any()). Return(interface{}(nil)). Times(0) }, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.wantOserWritefileCalls() if err := certificates.SaveToPEMFile(tt.args.filename, tt.args.data, o); (err != nil) != tt.wantErr { t.Errorf("SaveToPEMFile() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestSignCertReq(t *testing.T) { type args struct { req *x509.CertificateRequest // ca *certificates.CA caOpts *certificates.CertOptions opts *certificates.CertOptions } badCertOptions, badCertificateRequest, err := setupBadCertRequest() if err != nil { t.Fatal(err) } badCertOptions.Bits = -1 badCertificateRequest.SignatureAlgorithm = x509.DSAWithSHA1 goodCaCertOptions, _, err := setupGoodCertRequest() if err != nil { t.Fatal(err) } goodCaPrivateKey, err := setupGoodCaRsaPrivateKey() if err != nil { t.Fatal(err) } goodCaPublicKey := goodCaPrivateKey.Public() errorSettingUpTypeFormatString := "Error setting up %s: %v" goodCertificate, err := setupGoodCertificate() if err != nil { t.Errorf(errorSettingUpTypeFormatString, "certificate", err) } goodCertificate.KeyUsage = 1 goodCertOptions, goodCertificateRequest, err := setupGoodCertRequest() if err != nil { t.Fatal(err) } goodCertOptions.CommonName = "Ansible Automation Controller Nodes Mesh" goodCertificateRequest.PublicKey = goodCaPublicKey tests := []struct { name string args args want *x509.Certificate wantErr bool }{ { name: "Positive test", args: args{ req: goodCertificateRequest, caOpts: &goodCaCertOptions, opts: &goodCertOptions, }, want: goodCertificate, wantErr: false, }, { name: "Negative test", args: args{ req: badCertificateRequest, caOpts: &goodCaCertOptions, opts: &badCertOptions, }, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockRsa := mock_certificates.NewMockRsaer(ctrl) mockRsa. EXPECT(). GenerateKey(gomock.Any(), gomock.Any()). DoAndReturn( func(random io.Reader, bits int) (*rsa.PrivateKey, error) { return goodCaPrivateKey, nil }, ) CA, err := setupGoodCA(tt.args.caOpts, mockRsa) if err != nil { t.Fatal(err) } certGot, err := certificates.SignCertReq(tt.args.req, CA, tt.args.opts) if (err != nil) != tt.wantErr { t.Errorf("SignCertReq() error = %v, wantErr %v", err, tt.wantErr) return } if err == nil { if certGot.BasicConstraintsValid != false { t.Errorf("CreateCA() Certificate BasicConstraintsValid got = %+v, want = %+v", certGot.BasicConstraintsValid, false) return } if !reflect.DeepEqual(certGot.ExtraExtensions, tt.want.ExtraExtensions) { t.Errorf("CreateCA() Certificate ExtraExtensions got = %+v, want = %+v", certGot.ExtraExtensions, tt.want.ExtraExtensions) return } if !reflect.DeepEqual(certGot.ExtKeyUsage, tt.want.ExtKeyUsage) { t.Errorf("CreateCA() Certificate ExtKeyUsage got = %+v, want = %+v", certGot.ExtKeyUsage, tt.want.ExtKeyUsage) return } if certGot.IsCA != false { t.Errorf("CreateCA() Certificate IsCA got = %+v, want = %+v", certGot.IsCA, false) return } if !reflect.DeepEqual(certGot.Issuer, tt.want.Issuer) { t.Errorf("CreateCA() Certificate Issuer got = %+v, want = %+v", certGot.Issuer, tt.want.Issuer) return } if !reflect.DeepEqual(certGot.KeyUsage, tt.want.KeyUsage) { t.Errorf("CreateCA() Certificate KeyUsage got = %+v, want = %+v", certGot.KeyUsage, tt.want.KeyUsage) return } if !reflect.DeepEqual(certGot.NotAfter, tt.want.NotAfter) { t.Errorf("CreateCA() Certificate NotAfter got = %+v, want = %+v", certGot.NotAfter, tt.want.NotAfter) return } if !reflect.DeepEqual(certGot.NotBefore, tt.want.NotBefore) { t.Errorf("CreateCA() Certificate NotBefore got = %+v, want = %+v", certGot.NotBefore, tt.want.NotBefore) return } if !reflect.DeepEqual(certGot.PublicKeyAlgorithm, tt.want.PublicKeyAlgorithm) { t.Errorf("CreateCA() Certificate PublicKeyAlgorithm got = %+v, want = %+v", certGot.PublicKeyAlgorithm, tt.want.PublicKeyAlgorithm) return } if !reflect.DeepEqual(certGot.SignatureAlgorithm, tt.want.SignatureAlgorithm) { t.Errorf("CreateCA() Certificate SignatureAlgorithm got = %+v, want = %+v", certGot.SignatureAlgorithm, tt.want.SignatureAlgorithm) return } if certGot.Subject.String() != "CN=Ansible Automation Controller Nodes Mesh" { t.Errorf("CreateCA() Certificate Subject got = %+v, want = %+v", certGot.Subject, "CN=Ansible Automation Controller Nodes Mesh") return } if !reflect.DeepEqual(certGot.Version, tt.want.Version) { t.Errorf("CreateCA() Certificate Version got = %+v, want = %+v", certGot.Version, tt.want.Version) return } } }) } } receptor-1.5.3/pkg/certificates/cli.go000066400000000000000000000175161475465740700177240ustar00rootroot00000000000000//go:build !no_cert_auth // +build !no_cert_auth package certificates import ( "crypto/rsa" "crypto/x509" "fmt" "net" "strings" "time" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) // Oser is the function calls interfaces for mocking os. // type Oser interface { // ReadFile(name string) ([]byte, error) // WriteFile(name string, data []byte, perm fs.FileMode) error //} // OsWrapper is the Wrapper structure for Oser. // type OsWrapper struct{} // InitCA Initialize Certificate Authority. func InitCA(opts *CertOptions, certOut, keyOut string, osWrapper Oser) error { ca, err := CreateCA(opts, &RsaWrapper{}) if err == nil { err = SaveToPEMFile(certOut, []interface{}{ca.Certificate}, osWrapper) } if err == nil { err = SaveToPEMFile(keyOut, []interface{}{ca.PrivateKey}, osWrapper) } return err } type InitCAConfig struct { CommonName string `description:"Common name to assign to the certificate" required:"Yes"` Bits int `description:"Bit length of the encryption keys of the certificate" required:"Yes"` NotBefore string `description:"Effective (NotBefore) date/time, in RFC3339 format"` NotAfter string `description:"Expiration (NotAfter) date/time, in RFC3339 format"` OutCert string `description:"File to save the CA certificate to" required:"Yes"` OutKey string `description:"File to save the CA private key to" required:"Yes"` } func (ica InitCAConfig) Run() (err error) { opts := &CertOptions{ CommonName: ica.CommonName, Bits: ica.Bits, } if ica.NotBefore != "" { opts.NotBefore, err = time.Parse(time.RFC3339, ica.NotBefore) if err != nil { return } } if ica.NotAfter != "" { opts.NotAfter, err = time.Parse(time.RFC3339, ica.NotAfter) if err != nil { return } } return InitCA(opts, ica.OutCert, ica.OutKey, &OsWrapper{}) } // MakeReq Create Certificate Request. func MakeReq(opts *CertOptions, keyIn, keyOut, reqOut string, osWrapper Oser) error { var req *x509.CertificateRequest var key *rsa.PrivateKey if keyIn != "" { data, err := LoadFromPEMFile(keyIn, osWrapper) if err != nil { return err } for _, elem := range data { ckey, ok := elem.(*rsa.PrivateKey) if !ok { continue } if key != nil { return fmt.Errorf("multiple private keys in file %s", keyIn) } key = ckey } if key == nil { return fmt.Errorf("no private keys in file %s", keyIn) } req, err = CreateCertReq(opts, key) if err != nil { return err } } else { var err error req, key, err = CreateCertReqWithKey(opts) if err != nil { return err } } err := SaveToPEMFile(reqOut, []interface{}{req}, osWrapper) if err != nil { return err } if keyOut != "" { err = SaveToPEMFile(keyOut, []interface{}{key}, osWrapper) if err != nil { return err } } return nil } type MakeReqConfig struct { CommonName string `description:"Common name to assign to the certificate" required:"Yes"` Bits int `description:"Bit length of the encryption keys of the certificate"` DNSName []string `description:"DNS names to add to the certificate"` IPAddress []string `description:"IP addresses to add to the certificate"` NodeID []string `description:"Receptor node IDs to add to the certificate"` OutReq string `description:"File to save the certificate request to" required:"Yes"` InKey string `description:"Private key to use for the request"` OutKey string `description:"File to save the private key to (new key will be generated)"` } func (mr MakeReqConfig) Prepare() error { if mr.InKey == "" && mr.OutKey == "" { return fmt.Errorf("must provide either InKey or OutKey") } if mr.InKey != "" && mr.OutKey != "" { return fmt.Errorf("cannot use both InKey and OutKey") } if mr.InKey != "" && mr.Bits != 0 { return fmt.Errorf("cannot specify key bits when reading an already-existing key") } if mr.OutKey != "" && mr.Bits == 0 { return fmt.Errorf("must specify key bits when creating a new key") } return nil } func (mr MakeReqConfig) Run() error { opts := &CertOptions{ CommonName: mr.CommonName, Bits: mr.Bits, } opts.DNSNames = mr.DNSName opts.NodeIDs = mr.NodeID for _, ipstr := range mr.IPAddress { ip := net.ParseIP(ipstr) if ip == nil { return fmt.Errorf("invalid IP address: %s", ipstr) } if opts.IPAddresses == nil { opts.IPAddresses = make([]net.IP, 0) } opts.IPAddresses = append(opts.IPAddresses, ip) } return MakeReq(opts, mr.InKey, mr.OutKey, mr.OutReq, &OsWrapper{}) } // SignReq Sign Certificate Request. func SignReq(opts *CertOptions, caCrtPath, caKeyPath, reqPath, certOut string, verify bool, osWrapper Oser) error { ca := &CA{} var err error ca.Certificate, err = LoadCertificate(caCrtPath, osWrapper) if err != nil { return err } ca.PrivateKey, err = LoadPrivateKey(caKeyPath, osWrapper) if err != nil { return err } var req *x509.CertificateRequest req, err = LoadRequest(reqPath, osWrapper) if err != nil { return err } var names *CertNames names, err = GetReqNames(req) if err != nil { return err } if len(names.DNSNames) == 0 && len(names.IPAddresses) == 0 && len(names.NodeIDs) == 0 { return fmt.Errorf("cannot sign: no names found in certificate") } if !verify { fmt.Printf("Requested certificate:\n") fmt.Printf(" Subject: %s\n", req.Subject) algo := req.PublicKeyAlgorithm.String() if algo == "RSA" { rpk := req.PublicKey.(*rsa.PublicKey) algo = fmt.Sprintf("%s (%d bits)", algo, rpk.Size()*8) } fmt.Printf(" Encryption Algorithm: %s\n", algo) fmt.Printf(" Signature Algorithm: %s\n", req.SignatureAlgorithm.String()) fmt.Printf(" Names:\n") for _, name := range names.DNSNames { fmt.Printf(" DNS Name: %s\n", name) } for _, ip := range names.IPAddresses { fmt.Printf(" IP Address: %v\n", ip) } for _, node := range names.NodeIDs { fmt.Printf(" Receptor Node ID: %s\n", node) } fmt.Printf("Sign certificate (yes/no)? ") var response string _, err = fmt.Scanln(&response) if err != nil { return err } response = strings.ToLower(response) if response != "y" && response != "yes" { return fmt.Errorf("user declined") } } var cert *x509.Certificate cert, err = SignCertReq(req, ca, opts) if err != nil { return err } return SaveToPEMFile(certOut, []interface{}{cert}, &OsWrapper{}) } type SignReqConfig struct { Req string `description:"Certificate Request PEM filename" required:"Yes"` CACert string `description:"CA certificate PEM filename" required:"Yes"` CAKey string `description:"CA private key PEM filename" required:"Yes"` NotBefore string `description:"Effective (NotBefore) date/time, in RFC3339 format"` NotAfter string `description:"Expiration (NotAfter) date/time, in RFC3339 format"` OutCert string `description:"File to save the signed certificate to" required:"Yes"` Verify bool `description:"If true, do not prompt the user for verification" default:"False"` } func (sr SignReqConfig) Run() error { opts := &CertOptions{} if sr.NotBefore != "" { t, err := time.Parse(time.RFC3339, sr.NotBefore) if err != nil { return err } opts.NotBefore = t } if sr.NotAfter != "" { t, err := time.Parse(time.RFC3339, sr.NotAfter) if err != nil { return err } opts.NotAfter = t } return SignReq(opts, sr.CACert, sr.CAKey, sr.Req, sr.OutCert, sr.Verify, &OsWrapper{}) } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-certificates", "cert-init", "Initialize PKI CA", InitCAConfig{}, cmdline.Exclusive, cmdline.Section(certSection)) cmdline.RegisterConfigTypeForApp("receptor-certificates", "cert-makereq", "Create certificate request", MakeReqConfig{}, cmdline.Exclusive, cmdline.Section(certSection)) cmdline.RegisterConfigTypeForApp("receptor-certificates", "cert-signreq", "Sign request and produce certificate", SignReqConfig{}, cmdline.Exclusive, cmdline.Section(certSection)) } receptor-1.5.3/pkg/certificates/cli_test.go000066400000000000000000000203051475465740700207510ustar00rootroot00000000000000//go:build !no_cert_auth // +build !no_cert_auth package certificates_test import ( "io/fs" "net" "testing" "time" "github.com/ansible/receptor/pkg/certificates" "github.com/ansible/receptor/pkg/certificates/mock_certificates" "go.uber.org/mock/gomock" ) func TestInitCA(t *testing.T) { type args struct { opts *certificates.CertOptions certOut string keyOut string } positiveCertOut := "/tmp/receptor_ca_cert.pem" positiveKeyOut := "/tmp/receptor_ca_key.pem" positiveCaTimeNotAfterString := "2032-01-07T00:03:51Z" positiveCaTimeNotAfter, err := time.Parse(time.RFC3339, positiveCaTimeNotAfterString) if err != nil { t.Errorf("Invalid CA NOT after time: %+v", err) } positiveCaTimeNotBeforeString := "2022-01-07T00:03:51Z" positiveCaTimeNotBefore, err := time.Parse(time.RFC3339, positiveCaTimeNotBeforeString) if err != nil { t.Errorf("Invalid CA NOT before time: %+v", err) } tests := []struct { name string args args wantErr bool }{ { name: "Positive test", args: args{ opts: &certificates.CertOptions{ Bits: 8192, CommonName: "Ansible Automation Controller Nodes Mesh CA", NotAfter: positiveCaTimeNotAfter, NotBefore: positiveCaTimeNotBefore, }, certOut: positiveCertOut, keyOut: positiveKeyOut, }, wantErr: false, }, { name: "Negative test", args: args{ opts: &certificates.CertOptions{ Bits: -1, CommonName: "Ansible Automation Controller Nodes Mesh CA", NotAfter: positiveCaTimeNotAfter, NotBefore: positiveCaTimeNotBefore, }, certOut: positiveCertOut, keyOut: positiveKeyOut, }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) switch tt.args.certOut { case positiveCertOut: o. EXPECT(). WriteFile(gomock.Eq(positiveCertOut), gomock.Any(), gomock.Any()). Return(nil). MaxTimes(1). MinTimes(0) default: t.Errorf("Unexpected certOut filename: %s", tt.args.certOut) } switch tt.args.keyOut { case positiveKeyOut: o. EXPECT(). WriteFile(gomock.Eq(positiveKeyOut), gomock.Any(), gomock.Any()). Return(nil). MaxTimes(1). MinTimes(0) default: t.Errorf("Unexpected keyOut filename: %s", tt.args.keyOut) } if err := certificates.InitCA(tt.args.opts, tt.args.certOut, tt.args.keyOut, o); (err != nil) != tt.wantErr { t.Errorf("InitCA() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestMakeReq(t *testing.T) { type args struct { opts *certificates.CertOptions keyIn string keyOut string reqOut string } positiveKeyIn := "/tmp/receptor_key.pem" positiveKeyOut := "/tmp/receptor_key_out.pem" positiveReqOut := "/tmp/receptor_request_out.pem" negativeKeyIn := "/tmp" tests := []struct { name string args args wantErr bool }{ { name: "Positive test", args: args{ opts: &certificates.CertOptions{ Bits: 8192, CommonName: "Ansible Automation Controller Nodes Mesh", }, keyIn: positiveKeyIn, keyOut: positiveKeyOut, reqOut: positiveReqOut, }, wantErr: false, }, { name: "Negative test", args: args{ opts: &certificates.CertOptions{ Bits: -1, CommonName: "Ansible Automation Controller Nodes Mesh", }, keyIn: negativeKeyIn, keyOut: positiveKeyOut, reqOut: positiveReqOut, }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) switch tt.args.keyIn { case negativeKeyIn: o. EXPECT(). ReadFile(gomock.Eq(negativeKeyIn)). Return(nil, fs.ErrInvalid). Times(1) case positiveKeyIn: o. EXPECT(). ReadFile(gomock.Eq(positiveKeyIn)). Return(setupGoodPrivateKeyPEMData(), nil). Times(1) default: t.Errorf("Unexpected keyIn filename: %s", tt.args.keyIn) } switch tt.args.keyOut { case positiveKeyOut: o. EXPECT(). WriteFile(gomock.Eq(positiveKeyOut), gomock.Any(), gomock.Any()). Return(nil). MinTimes(0). MaxTimes(1) default: t.Errorf("Unexpected keyOut filename: %s", tt.args.keyOut) } switch tt.args.reqOut { case positiveReqOut: o. EXPECT(). WriteFile(gomock.Eq(positiveReqOut), gomock.Any(), gomock.Any()). Return(nil). MinTimes(0). MaxTimes(1) default: t.Errorf("Unexpected reqOut filename: %s", tt.args.reqOut) } if err := certificates.MakeReq(tt.args.opts, tt.args.keyIn, tt.args.keyOut, tt.args.reqOut, o); (err != nil) != tt.wantErr { t.Errorf("MakeReq() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestSignReq(t *testing.T) { type args struct { opts *certificates.CertOptions caCrtPath string caKeyPath string reqPath string certOut string verify bool } positiveCaCrtPath := "/tmp/receptor_ca_cert.pem" positiveCaKeyPath := "/tmp/receptor_ca_key.pem" positiveCertOut := "/tmp/receptor_cert_out.pem" positiveReqPath := "/tmp/receptor_request.pem" positiveCertOptions, _, err := setupGoodCertRequest() if err != nil { t.Errorf("Invalid good Certificate Request: %+v", err) } negativeCaTimeNotAfterString := "2021-01-07T00:03:51Z" negativeCaTimeNotAfter, err := time.Parse(time.RFC3339, negativeCaTimeNotAfterString) if err != nil { t.Errorf("Invalid CA after time: %+v", err) } negativeCaTimeNotBeforeString := "2022-01-07T00:03:51Z" negativeCaTimeNotBefore, err := time.Parse(time.RFC3339, negativeCaTimeNotBeforeString) if err != nil { t.Errorf("Invalid CA before time: %+v", err) } negativeReqPath := "/tmp/receptor_request_bad.pem" negativeDNSName := "receptor.TEST.BAD" negativeIPAddress := net.ParseIP("127.0.0.1").To4() negativeNodeIDs := negativeDNSName tests := []struct { name string args args wantErr bool }{ { name: "Positive test", args: args{ opts: &positiveCertOptions, caCrtPath: positiveCaCrtPath, caKeyPath: positiveCaKeyPath, reqPath: positiveReqPath, certOut: positiveCertOut, verify: true, }, wantErr: false, }, { name: "Negative test", args: args{ opts: &certificates.CertOptions{ Bits: -1, CertNames: certificates.CertNames{ DNSNames: []string{ negativeDNSName, }, IPAddresses: []net.IP{ negativeIPAddress, }, NodeIDs: []string{ negativeNodeIDs, }, }, CommonName: "Ansible Automation Controller Nodes Mesh", NotAfter: negativeCaTimeNotAfter, NotBefore: negativeCaTimeNotBefore, }, caCrtPath: positiveCaCrtPath, caKeyPath: positiveCaKeyPath, reqPath: negativeReqPath, certOut: positiveCertOut, verify: true, }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() o := mock_certificates.NewMockOser(ctrl) switch tt.args.caCrtPath { case positiveCaCrtPath: o. EXPECT(). ReadFile(gomock.Eq(positiveCaCrtPath)). Return(setupGoodCaCertificatePEMData(), nil). Times(1) default: t.Errorf("Unexpected filename: %s", tt.args.caCrtPath) } switch tt.args.caKeyPath { case positiveCaKeyPath: o. EXPECT(). ReadFile(gomock.Eq(positiveCaKeyPath)). Return(setupGoodCaRsaPrivateKeyPEMData(), nil). Times(1) default: t.Errorf("Unexpected filename: %s", tt.args.reqPath) } switch tt.args.reqPath { case negativeReqPath: o. EXPECT(). ReadFile(gomock.Eq(negativeReqPath)). Return(setupGoodCertificatePEMData(), nil). Times(1) case positiveReqPath: o. EXPECT(). ReadFile(gomock.Eq(positiveReqPath)). Return(setupGoodCertificateRequestPEMData(), nil). Times(1) default: t.Errorf("Unexpected filename: %s", tt.args.reqPath) } if err := certificates.SignReq(tt.args.opts, tt.args.caCrtPath, tt.args.caKeyPath, tt.args.reqPath, tt.args.certOut, tt.args.verify, o); (err != nil) != tt.wantErr { t.Errorf("SignReq() error = %v, wantErr %v", err, tt.wantErr) } }) } } receptor-1.5.3/pkg/certificates/cmdline.go000066400000000000000000000003111475465740700205510ustar00rootroot00000000000000package certificates import "github.com/ghjm/cmdline" var certSection = &cmdline.ConfigSection{ Description: "Commands to generate certificates and run a certificate authority", Order: 90, } receptor-1.5.3/pkg/certificates/mock_certificates/000077500000000000000000000000001475465740700222725ustar00rootroot00000000000000receptor-1.5.3/pkg/certificates/mock_certificates/Oser.go000066400000000000000000000037351475465740700235410ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: github.com/ansible/receptor/pkg/certificates (interfaces: Oser) // // Generated by this command: // // mockgen -destination=mock_certificates/Oser.go github.com/ansible/receptor/pkg/certificates Oser // // Package mock_certificates is a generated GoMock package. package mock_certificates import ( fs "io/fs" reflect "reflect" gomock "go.uber.org/mock/gomock" ) // MockOser is a mock of Oser interface. type MockOser struct { ctrl *gomock.Controller recorder *MockOserMockRecorder } // MockOserMockRecorder is the mock recorder for MockOser. type MockOserMockRecorder struct { mock *MockOser } // NewMockOser creates a new mock instance. func NewMockOser(ctrl *gomock.Controller) *MockOser { mock := &MockOser{ctrl: ctrl} mock.recorder = &MockOserMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockOser) EXPECT() *MockOserMockRecorder { return m.recorder } // ReadFile mocks base method. func (m *MockOser) ReadFile(arg0 string) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadFile", arg0) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // ReadFile indicates an expected call of ReadFile. func (mr *MockOserMockRecorder) ReadFile(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadFile", reflect.TypeOf((*MockOser)(nil).ReadFile), arg0) } // WriteFile mocks base method. func (m *MockOser) WriteFile(arg0 string, arg1 []byte, arg2 fs.FileMode) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteFile", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // WriteFile indicates an expected call of WriteFile. func (mr *MockOserMockRecorder) WriteFile(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteFile", reflect.TypeOf((*MockOser)(nil).WriteFile), arg0, arg1, arg2) } receptor-1.5.3/pkg/certificates/mock_certificates/Rsaer.go000066400000000000000000000027111475465740700236760ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: ../ca.go // // Generated by this command: // // mockgen --source=../ca.go // // Package mock_certificates is a generated GoMock package. package mock_certificates import ( rsa "crypto/rsa" io "io" reflect "reflect" gomock "go.uber.org/mock/gomock" ) // MockRsaer is a mock of Rsaer interface. type MockRsaer struct { ctrl *gomock.Controller recorder *MockRsaerMockRecorder } // MockRsaerMockRecorder is the mock recorder for MockRsaer. type MockRsaerMockRecorder struct { mock *MockRsaer } // NewMockRsaer creates a new mock instance. func NewMockRsaer(ctrl *gomock.Controller) *MockRsaer { mock := &MockRsaer{ctrl: ctrl} mock.recorder = &MockRsaerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockRsaer) EXPECT() *MockRsaerMockRecorder { return m.recorder } // GenerateKey mocks base method. func (m *MockRsaer) GenerateKey(random io.Reader, bits int) (*rsa.PrivateKey, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GenerateKey", random, bits) ret0, _ := ret[0].(*rsa.PrivateKey) ret1, _ := ret[1].(error) return ret0, ret1 } // GenerateKey indicates an expected call of GenerateKey. func (mr *MockRsaerMockRecorder) GenerateKey(random, bits any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateKey", reflect.TypeOf((*MockRsaer)(nil).GenerateKey), random, bits) } receptor-1.5.3/pkg/controlsvc/000077500000000000000000000000001475465740700163435ustar00rootroot00000000000000receptor-1.5.3/pkg/controlsvc/connect.go000066400000000000000000000041741475465740700203310ustar00rootroot00000000000000package controlsvc import ( "context" "fmt" "strings" "github.com/ansible/receptor/pkg/netceptor" ) type ( ConnectCommandType struct{} ConnectCommand struct { targetNode string targetService string tlsConfigName string } ) func (t *ConnectCommandType) InitFromString(params string) (ControlCommand, error) { tokens := strings.Split(params, " ") if len(tokens) < 2 { return nil, fmt.Errorf("no connect target") } if len(tokens) > 3 { return nil, fmt.Errorf("too many parameters") } var tlsConfigName string if len(tokens) == 3 { tlsConfigName = tokens[2] } c := &ConnectCommand{ targetNode: tokens[0], targetService: tokens[1], tlsConfigName: tlsConfigName, } return c, nil } func (t *ConnectCommandType) InitFromJSON(config map[string]interface{}) (ControlCommand, error) { targetNode, ok := config["node"] if !ok { return nil, fmt.Errorf("no connect target node") } targetNodeStr, ok := targetNode.(string) if !ok { return nil, fmt.Errorf("connect target node must be string") } targetService, ok := config["service"] if !ok { return nil, fmt.Errorf("no connect target service") } targetServiceStr, ok := targetService.(string) if !ok { return nil, fmt.Errorf("connect target service must be string") } var tlsConfigStr string tlsConfig, ok := config["tls"] if ok { tlsConfigStr, ok = tlsConfig.(string) if !ok { return nil, fmt.Errorf("connect tls name must be string") } } else { tlsConfigStr = "" } c := &ConnectCommand{ targetNode: targetNodeStr, targetService: targetServiceStr, tlsConfigName: tlsConfigStr, } return c, nil } func (c *ConnectCommand) ControlFunc(_ context.Context, nc NetceptorForControlCommand, cfo ControlFuncOperations) (map[string]interface{}, error) { tlscfg, err := nc.GetClientTLSConfig(c.tlsConfigName, c.targetNode, netceptor.ExpectedHostnameTypeReceptor) if err != nil { return nil, err } rc, err := nc.Dial(c.targetNode, c.targetService, tlscfg) if err != nil { return nil, err } err = cfo.BridgeConn("Connecting\n", rc, "connected service", nc.GetLogger(), &Util{}) if err != nil { return nil, err } return nil, nil } receptor-1.5.3/pkg/controlsvc/connect_test.go000066400000000000000000000124511475465740700213650ustar00rootroot00000000000000package controlsvc_test import ( "context" "errors" "testing" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/controlsvc/mock_controlsvc" "github.com/ansible/receptor/pkg/logger" "go.uber.org/mock/gomock" ) func CheckExpectedError(expectedError bool, errorMessage string, t *testing.T, err error) { if expectedError && errorMessage != err.Error() { t.Errorf("expected: %s , received: %s", errorMessage, err) } if !expectedError && err != nil { t.Error(err) } } func TestConnectInitFromString(t *testing.T) { connectCommandType := controlsvc.ConnectCommandType{} initFromStringTestCases := []struct { name string expectedError bool errorMessage string input string }{ { name: "no connect target", expectedError: true, errorMessage: "no connect target", input: "", }, { name: "too many parameters", expectedError: true, errorMessage: "too many parameters", input: "one two three four", }, { name: "three params - pass", expectedError: false, errorMessage: "", input: "one two three", }, } for _, testCase := range initFromStringTestCases { t.Run(testCase.name, func(t *testing.T) { _, err := connectCommandType.InitFromString(testCase.input) CheckExpectedError(testCase.expectedError, testCase.errorMessage, t, err) }) } } func TestConnectInitFromJSON(t *testing.T) { connectCommandType := controlsvc.ConnectCommandType{} initFromJSONTestCases := []struct { name string expectedError bool errorMessage string input map[string]interface{} }{ BuildInitFromJSONTestCases("no connect target node", true, "no connect target node", map[string]interface{}{}), BuildInitFromJSONTestCases("connect target node must be string 1", true, "connect target node must be string", map[string]interface{}{"node": 7}), BuildInitFromJSONTestCases("no connect target service", true, "no connect target service", map[string]interface{}{"node": "node1"}), BuildInitFromJSONTestCases("connect target service must be string1", true, "connect target service must be string", map[string]interface{}{"node": "node2", "service": 7}), BuildInitFromJSONTestCases("connect tls name be string", true, "connect tls name must be string", map[string]interface{}{"node": "node3", "service": "service1", "tls": 7}), BuildInitFromJSONTestCases("pass with empty tls config", false, "connect target service must be string", map[string]interface{}{"node": "node4", "service": "service2"}), BuildInitFromJSONTestCases("pass with all targets and tls config", false, "", map[string]interface{}{"node": "node4", "service": "service3", "tls": "tls1"}), } for _, testCase := range initFromJSONTestCases { t.Run(testCase.name, func(t *testing.T) { _, err := connectCommandType.InitFromJSON(testCase.input) CheckExpectedError(testCase.expectedError, testCase.errorMessage, t, err) }) } } func TestConnectControlFunc(t *testing.T) { connectCommand := controlsvc.ConnectCommand{} ctrl := gomock.NewController(t) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) mockControlFunc := mock_controlsvc.NewMockControlFuncOperations(ctrl) logger := logger.NewReceptorLogger("") controlFuncTestCases := []struct { name string expectedError bool errorMessage string expectedCalls func() }{ { name: "tls config error", expectedError: true, errorMessage: "terminated tls", expectedCalls: func() { mockNetceptor.EXPECT().GetClientTLSConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("terminated tls")) }, }, { name: "dial error", errorMessage: "terminated dial", expectedError: true, expectedCalls: func() { mockNetceptor.EXPECT().GetClientTLSConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) mockNetceptor.EXPECT().Dial(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("terminated dial")) }, }, { name: "bridge conn error", errorMessage: "terminated bridge conn", expectedError: true, expectedCalls: func() { mockNetceptor.EXPECT().GetClientTLSConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) mockNetceptor.EXPECT().Dial(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) mockControlFunc.EXPECT().BridgeConn(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("terminated bridge conn")) mockNetceptor.EXPECT().GetLogger().Return(logger) }, }, { name: "control func pass", errorMessage: "", expectedError: false, expectedCalls: func() { mockNetceptor.EXPECT().GetClientTLSConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) mockNetceptor.EXPECT().Dial(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) mockControlFunc.EXPECT().BridgeConn(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) mockNetceptor.EXPECT().GetLogger().Return(logger) }, }, } for _, testCase := range controlFuncTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() _, err := connectCommand.ControlFunc(context.Background(), mockNetceptor, mockControlFunc) CheckExpectedError(testCase.expectedError, testCase.errorMessage, t, err) }) } } receptor-1.5.3/pkg/controlsvc/controlsvc.go000066400000000000000000000360251475465740700210740ustar00rootroot00000000000000//go:build !no_controlsvc // +build !no_controlsvc package controlsvc import ( "bufio" "context" "crypto/tls" "encoding/json" "fmt" "io" "io/fs" "net" "os" "reflect" "runtime" "strconv" "strings" "sync" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/utils" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) const ( normalCloseError = "normal close" writeControlServiceError = "Write error in control service" ) type Copier interface { Copy(dst io.Writer, src io.Reader) (written int64, err error) } type SocketConnIO struct{} func (s *SocketConnIO) Copy(dst io.Writer, src io.Reader) (written int64, err error) { return io.Copy(dst, src) } type NetceptorForControlsvc interface { ListenAndAdvertise(service string, tlscfg *tls.Config, tags map[string]string) (*netceptor.Listener, error) NetceptorForControlCommand } type Utiler interface { BridgeConns(c1 io.ReadWriteCloser, c1Name string, c2 io.ReadWriteCloser, c2Name string, logger *logger.ReceptorLogger) UnixSocketListen(filename string, permissions fs.FileMode) (net.Listener, *utils.FLock, error) } type Util struct{} func (u *Util) BridgeConns(c1 io.ReadWriteCloser, c1Name string, c2 io.ReadWriteCloser, c2Name string, logger *logger.ReceptorLogger) { utils.BridgeConns(c1, c1Name, c2, c2Name, logger) } func (u *Util) UnixSocketListen(filename string, permissions fs.FileMode) (net.Listener, *utils.FLock, error) { return utils.UnixSocketListen(filename, permissions) } type Neter interface { Listen(network string, address string) (net.Listener, error) } type Net struct{} func (n *Net) Listen(network string, address string) (net.Listener, error) { return net.Listen(network, address) } type Tlser interface { NewListener(inner net.Listener, config *tls.Config) net.Listener } type TLS struct{} func (t *TLS) NewListener(inner net.Listener, config *tls.Config) net.Listener { return tls.NewListener(inner, config) } // SockControl implements the ControlFuncOperations interface that is passed back to control functions. type SockControl struct { conn net.Conn } func NewSockControl(conn net.Conn) *SockControl { return &SockControl{ conn: conn, } } func (s *SockControl) RemoteAddr() net.Addr { return s.conn.RemoteAddr() } // WriteMessage attempts to write a message to a connection. func (s *SockControl) WriteMessage(message string) error { if message != "" { _, err := s.conn.Write([]byte(message)) if err != nil { return err } } return nil } // BridgeConn bridges the socket to another socket. func (s *SockControl) BridgeConn(message string, bc io.ReadWriteCloser, bcName string, logger *logger.ReceptorLogger, utils Utiler) error { if err := s.WriteMessage(message); err != nil { return err } utils.BridgeConns(s.conn, "control service", bc, bcName, logger) return nil } // ReadFromConn copies from the socket to an io.Writer, until EOF. func (s *SockControl) ReadFromConn(message string, out io.Writer, io Copier) error { if err := s.WriteMessage(message); err != nil { return err } payloadDebug, _ := strconv.Atoi(os.Getenv("RECEPTOR_PAYLOAD_TRACE_LEVEL")) if payloadDebug != 0 { var connectionType string var payload string if s.conn.LocalAddr().Network() == "unix" { connectionType = "unix socket" } else { connectionType = "network connection" } reader := bufio.NewReader(s.conn) for { response, err := reader.ReadString('\n') if err != nil { if err.Error() != "EOF" { MainInstance.nc.GetLogger().Error("Error reading from conn: %v \n", err) } break } payload += response } MainInstance.nc.GetLogger().DebugPayload(payloadDebug, payload, "", connectionType) if _, err := out.Write([]byte(payload)); err != nil { return err } } else { if _, err := io.Copy(out, s.conn); err != nil { return err } } return nil } // WriteToConn writes an initial string, and then messages to a channel, to the connection. func (s *SockControl) WriteToConn(message string, in chan []byte) error { if err := s.WriteMessage(message); err != nil { return err } for bytes := range in { _, err := s.conn.Write(bytes) if err != nil { return err } } return nil } func (s *SockControl) Close() error { return s.conn.Close() } // Server is an instance of a control service. type Server struct { nc NetceptorForControlsvc controlFuncLock sync.RWMutex controlTypes map[string]ControlCommandType serverUtils Utiler serverNet Neter serverTLS Tlser } // New returns a new instance of a control service. func New(stdServices bool, nc NetceptorForControlsvc) *Server { s := &Server{ nc: nc, controlFuncLock: sync.RWMutex{}, controlTypes: make(map[string]ControlCommandType), serverUtils: &Util{}, serverNet: &Net{}, serverTLS: &TLS{}, } if stdServices { s.controlTypes["ping"] = &PingCommandType{} s.controlTypes["status"] = &StatusCommandType{} s.controlTypes["connect"] = &ConnectCommandType{} s.controlTypes["traceroute"] = &TracerouteCommandType{} s.controlTypes["reload"] = &ReloadCommandType{} } return s } func (s *Server) SetServerUtils(u Utiler) { s.serverUtils = u } func (s *Server) SetServerNet(n Neter) { s.serverNet = n } func (s *Server) SetServerTLS(t Tlser) { s.serverTLS = t } // MainInstance is the global instance of the control service instantiated by the command-line main() function. var MainInstance *Server // AddControlFunc registers a function that can be used from a control socket. func (s *Server) AddControlFunc(name string, cType ControlCommandType) error { s.controlFuncLock.Lock() defer s.controlFuncLock.Unlock() if _, ok := s.controlTypes[name]; ok { return fmt.Errorf("control function named %s already exists", name) } s.controlTypes[name] = cType return nil } func errorNormal(nc NetceptorForControlsvc, logMessage string, err error) bool { if err == nil { return false } if !strings.HasSuffix(err.Error(), normalCloseError) { nc.GetLogger().Error("%s: %s\n", logMessage, err) } return true } func writeToConnWithLog(conn net.Conn, nc NetceptorForControlsvc, writeMessage string, logMessage string) bool { _, err := conn.Write([]byte(writeMessage)) return errorNormal(nc, logMessage, err) } // RunControlSession runs the server protocol on the given connection. func (s *Server) RunControlSession(conn net.Conn) { s.nc.GetLogger().Debug("Client connected to control service %s\n", conn.RemoteAddr().String()) defer func() { s.nc.GetLogger().Debug("Client disconnected from control service %s\n", conn.RemoteAddr().String()) if conn != nil { err := conn.Close() if err != nil { s.nc.GetLogger().Warning("Could not close connection: %s\n", err) } } }() writeMsg := fmt.Sprintf("Receptor Control, node %s\n", s.nc.NodeID()) logMsg := "Could not write in control service" if writeToConnWithLog(conn, s.nc, writeMsg, logMsg) { return } done := false for !done { // Inefficiently read one line from the socket - we can't use bufio // because we cannot read ahead beyond the newline character cmdBytes := make([]byte, 0) buf := make([]byte, 1) for { n, err := conn.Read(buf) if err == io.EOF { s.nc.GetLogger().Debug("Control service closed\n") done = true break } else if err != nil { if !strings.HasSuffix(err.Error(), normalCloseError) { s.nc.GetLogger().Warning("Could not read in control service: %s\n", err) } return } if n == 1 { if buf[0] == '\r' { continue } else if buf[0] == '\n' { break } cmdBytes = append(cmdBytes, buf[0]) } } if len(cmdBytes) == 0 { continue } var cmd string var params string var jsonData map[string]interface{} if cmdBytes[0] == '{' { err := json.Unmarshal(cmdBytes, &jsonData) if err == nil { cmdIf, ok := jsonData["command"] if ok { cmd, ok = cmdIf.(string) if !ok { err = fmt.Errorf("command must be a string") } } else { err = fmt.Errorf("JSON did not contain a command") } } if err != nil { writeMsg := fmt.Sprintf("ERROR: %s\n", err) if writeToConnWithLog(conn, s.nc, writeMsg, writeControlServiceError) { return } } } else { tokens := strings.SplitN(string(cmdBytes), " ", 2) if len(tokens) > 0 { cmd = strings.ToLower(tokens[0]) if len(tokens) > 1 { params = tokens[1] } } } s.controlFuncLock.RLock() var ct ControlCommandType for f := range s.controlTypes { if f == cmd { ct = s.controlTypes[f] break } } s.controlFuncLock.RUnlock() if ct != nil { cfo := NewSockControl(conn) var cfr map[string]interface{} var cc ControlCommand var err error if jsonData == nil { cc, err = ct.InitFromString(params) } else { cc, err = ct.InitFromJSON(jsonData) } if err == nil { ctx, cancel := context.WithCancel(context.Background()) defer cancel() cfr, err = cc.ControlFunc(ctx, s.nc, cfo) } if err != nil { errorNormal(s.nc, "", err) writeMsg := fmt.Sprintf("ERROR: %s\n", err) if writeToConnWithLog(conn, s.nc, writeMsg, writeControlServiceError) { return } } else if cfr != nil { rbytes, err := json.Marshal(cfr) if err != nil { writeMsg := fmt.Sprintf("ERROR: could not convert response to JSON: %s\n", err) if writeToConnWithLog(conn, s.nc, writeMsg, writeControlServiceError) { return } } rbytes = append(rbytes, '\n') writeMsg := string(rbytes) if writeToConnWithLog(conn, s.nc, writeMsg, writeControlServiceError) { return } } } else { writeMsg := fmt.Sprintf("ERROR: Unknown command, %v\n", cmd) if writeToConnWithLog(conn, s.nc, writeMsg, writeControlServiceError) { return } } } } func (s *Server) ConnectionListener(ctx context.Context, listener net.Listener) { for { if ctx.Err() != nil { return } conn, err := listener.Accept() if err != nil { if !strings.HasSuffix(err.Error(), normalCloseError) { s.nc.GetLogger().Error("Error accepting connection: %s\n", err) } continue } go s.SetupConnection(conn) } } func (s *Server) SetupConnection(conn net.Conn) { defer conn.Close() tlsConn, ok := conn.(*tls.Conn) if ok { // Explicitly run server TLS handshake so we can deal with timeout and errors here err := conn.SetDeadline(time.Now().Add(10 * time.Second)) if err != nil { s.nc.GetLogger().Error("Error setting timeout: %s. Closing socket.\n", err) return } err = tlsConn.Handshake() if err != nil { s.nc.GetLogger().Error("TLS handshake error: %s. Closing socket.\n", err) return } err = conn.SetDeadline(time.Time{}) if err != nil { s.nc.GetLogger().Error("Error clearing timeout: %s. Closing socket.\n", err) return } } s.RunControlSession(conn) } // RunControlSvc runs the main accept loop of the control service. func (s *Server) RunControlSvc(ctx context.Context, service string, tlscfg *tls.Config, unixSocket string, unixSocketPermissions os.FileMode, tcpListen string, tcptls *tls.Config, ) error { var uli net.Listener var lock *utils.FLock var err error if unixSocket != "" { uli, lock, err = s.serverUtils.UnixSocketListen(unixSocket, unixSocketPermissions) if err != nil { return fmt.Errorf("error opening Unix socket: %s", err) } } else { uli = nil } var tli net.Listener if tcpListen != "" { var listenAddr string if strings.Contains(tcpListen, ":") { listenAddr = tcpListen } else { listenAddr = fmt.Sprintf("0.0.0.0:%s", tcpListen) } tli, err = s.serverNet.Listen("tcp", listenAddr) if err != nil { return fmt.Errorf("error listening on TCP socket: %s", err) } if tcptls != nil { tli = s.serverTLS.NewListener(tli, tcptls) } } else { tli = nil } var li *netceptor.Listener if service != "" { li, err = s.nc.ListenAndAdvertise(service, tlscfg, map[string]string{ "type": "Control Service", }) if err != nil { return fmt.Errorf("error opening Unix socket: %s", err) } } else { li = nil } if uli == nil && tli == nil && li == nil { return fmt.Errorf("no listeners specified") } s.nc.GetLogger().Info("Running control service %s\n", service) go func() { <-ctx.Done() if uli != nil { _ = uli.Close() _ = lock.Unlock() } if li != nil { _ = li.Close() } if tli != nil { _ = tli.Close() } }() for _, listener := range []net.Listener{uli, tli, li} { if listener == nil || reflect.ValueOf(listener).IsNil() { continue } go s.ConnectionListener(ctx, listener) } return nil } // ************************************************************************** // Command line // ************************************************************************** // cmdlineConfigWindows is the cmdline configuration object for a control service on Windows. type cmdlineConfigWindows struct { Service string `description:"Receptor service name to listen on" default:"control"` TLS string `description:"Name of TLS server config for the Receptor listener"` TCPListen string `description:"Local TCP port or host:port to bind to the control service"` TCPTLS string `description:"Name of TLS server config for the TCP listener"` } // cmdlineConfigUnix is the cmdline configuration object for a control service on Unix. type CmdlineConfigUnix struct { Service string `description:"Receptor service name to listen on" default:"control"` Filename string `description:"Specifies the filename of a local Unix socket to bind to the service."` Permissions int `description:"Socket file permissions" default:"0600"` TLS string `description:"Name of TLS server config for the Receptor listener"` TCPListen string `description:"Local TCP port or host:port to bind to the control service"` TCPTLS string `description:"Name of TLS server config for the TCP listener"` } // Run runs the action. func (cfg CmdlineConfigUnix) Run() error { if cfg.TLS != "" && cfg.TCPListen != "" && cfg.TCPTLS == "" { netceptor.MainInstance.Logger.Warning("Control service %s has TLS configured on the Receptor listener but not the TCP listener.", cfg.Service) } tlscfg, err := netceptor.MainInstance.GetServerTLSConfig(cfg.TLS) if err != nil { return err } var tcptls *tls.Config if cfg.TCPListen != "" { tcptls, err = netceptor.MainInstance.GetServerTLSConfig(cfg.TCPTLS) if err != nil { return err } } err = MainInstance.RunControlSvc(context.Background(), cfg.Service, tlscfg, cfg.Filename, os.FileMode(cfg.Permissions), cfg.TCPListen, tcptls) //nolint:gosec if err != nil { return err } return nil } // Run runs the action. func (cfg cmdlineConfigWindows) Run() error { return CmdlineConfigUnix{ Service: cfg.Service, TLS: cfg.TLS, TCPListen: cfg.TCPListen, TCPTLS: cfg.TCPTLS, }.Run() } func init() { version := viper.GetInt("version") if version > 1 { return } if runtime.GOOS == "windows" { cmdline.RegisterConfigTypeForApp("receptor-control-service", "control-service", "Runs a control service", cmdlineConfigWindows{}) } else { cmdline.RegisterConfigTypeForApp("receptor-control-service", "control-service", "Runs a control service", CmdlineConfigUnix{}) } } receptor-1.5.3/pkg/controlsvc/controlsvc_stub.go000066400000000000000000000024661475465740700221330ustar00rootroot00000000000000//go:build no_controlsvc // +build no_controlsvc // Stub package to satisfy controlsvc dependencies while providing no functionality package controlsvc import ( "context" "crypto/tls" "fmt" "net" "os" "github.com/ansible/receptor/pkg/netceptor" ) // ErrNotImplemented is returned by most functions in this unit since it is a non-functional stub var ErrNotImplemented = fmt.Errorf("not implemented") // Server is an instance of a control service type Server struct{} // New returns a new instance of a control service. func New(stdServices bool, nc *netceptor.Netceptor) *Server { return &Server{} } // MainInstance is the global instance of the control service instantiated by the command-line main() function var MainInstance *Server // AddControlFunc registers a function that can be used from a control socket. func (s *Server) AddControlFunc(name string, cType ControlCommandType) error { return nil } // RunControlSession runs the server protocol on the given connection func (s *Server) RunControlSession(conn net.Conn) { } // RunControlSvc runs the main accept loop of the control service func (s *Server) RunControlSvc(ctx context.Context, service string, tlscfg *tls.Config, unixSocket string, unixSocketPermissions os.FileMode, tcpListen string, tcptls *tls.Config, ) error { return ErrNotImplemented } receptor-1.5.3/pkg/controlsvc/controlsvc_test.go000066400000000000000000000411411475465740700221260ustar00rootroot00000000000000package controlsvc_test import ( "context" "crypto/tls" "errors" "io" "net" "os" "testing" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/controlsvc/mock_controlsvc" "github.com/ansible/receptor/pkg/logger" "go.uber.org/mock/gomock" ) const ( writeToConnError = "write to conn write message err" ) func printErrorMessage(t *testing.T, err error) { t.Errorf("expected error %s", err) } func TestConnectionListener(t *testing.T) { ctrl := gomock.NewController(t) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) mockListener := mock_controlsvc.NewMockListener(ctrl) logger := logger.NewReceptorLogger("") connectionListenerTestCases := []struct { name string expectedError bool expectedCalls func(context.CancelFunc) }{ { name: "error accepting connection", expectedError: false, expectedCalls: func(ctxCancel context.CancelFunc) { mockListener.EXPECT().Accept().DoAndReturn(func() (net.Conn, error) { ctxCancel() return nil, errors.New("terminated") }) mockNetceptor.EXPECT().GetLogger().Return(logger) }, }, } for _, testCase := range connectionListenerTestCases { t.Run(testCase.name, func(t *testing.T) { ctx, ctxCancel := context.WithCancel(context.Background()) defer ctxCancel() testCase.expectedCalls(ctxCancel) s := controlsvc.New(false, mockNetceptor) s.ConnectionListener(ctx, mockListener) }) } } func TestSetupConnection(t *testing.T) { ctrl := gomock.NewController(t) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) mockConn := mock_controlsvc.NewMockConn(ctrl) logger := logger.NewReceptorLogger("") setupConnectionTestCases := []struct { name string expectedCalls func() }{ { name: "log error - setting timeout", expectedCalls: func() { mockConn.EXPECT().SetDeadline(gomock.Any()).Return(errors.New("terminated")) mockNetceptor.EXPECT().GetLogger().Return(logger) mockConn.EXPECT().Close() }, }, { name: "log error - tls handshake", expectedCalls: func() { mockConn.EXPECT().SetDeadline(gomock.Any()).Return(nil) mockNetceptor.EXPECT().GetLogger().Return(logger) mockConn.EXPECT().Close().AnyTimes() }, }, } for _, testCase := range setupConnectionTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() s := controlsvc.New(false, mockNetceptor) tlsConn := tls.Client(mockConn, &tls.Config{}) s.SetupConnection(tlsConn) }) } } func TestRunControlSvc(t *testing.T) { ctrl := gomock.NewController(t) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) mockUnix := mock_controlsvc.NewMockUtiler(ctrl) mockNet := mock_controlsvc.NewMockNeter(ctrl) mockTLS := mock_controlsvc.NewMockTlser(ctrl) mockListener := mock_controlsvc.NewMockListener(ctrl) logger := logger.NewReceptorLogger("") runControlSvcTestCases := []struct { name string expectedError string expectedCalls func() listeners map[string]string }{ { name: "unix listener error", expectedError: "error opening Unix socket: terminated", expectedCalls: func() { mockUnix.EXPECT().UnixSocketListen(gomock.Any(), gomock.Any()).Return(nil, nil, errors.New("terminated")) }, listeners: map[string]string{ "service": "", "unixSocket": "unix", "tcpListen": "", }, }, { name: "tcp listener error", expectedError: "error listening on TCP socket: terminated", expectedCalls: func() { mockNet.EXPECT().Listen(gomock.Any(), gomock.Any()).Return(nil, errors.New("terminated")) }, listeners: map[string]string{ "service": "", "unixSocket": "", "tcpListen": "tcp", }, }, { name: "service listener error", expectedError: "error opening Unix socket: terminated", expectedCalls: func() { mockNetceptor.EXPECT().ListenAndAdvertise(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, errors.New("terminated")) }, listeners: map[string]string{ "service": "service", "unixSocket": "", "tcpListen": "", }, }, { name: "no listeners error", expectedError: "no listeners specified", expectedCalls: func() { // empty func for testing }, listeners: map[string]string{ "service": "", "unixSocket": "", "tcpListen": "", }, }, { name: "tcp listener set", expectedError: "", expectedCalls: func() { mockNet.EXPECT().Listen(gomock.Any(), gomock.Any()).Return(mockListener, nil) mockTLS.EXPECT().NewListener(gomock.Any(), gomock.Any()).Return(mockListener) mockNetceptor.EXPECT().GetLogger().Return(logger).AnyTimes() mockListener.EXPECT().Accept().Return(nil, errors.New("normal close")).AnyTimes() }, listeners: map[string]string{ "service": "", "unixSocket": "", "tcpListen": "tcp:1", }, }, } for _, testCase := range runControlSvcTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() s := controlsvc.New(false, mockNetceptor) s.SetServerUtils(mockUnix) s.SetServerNet(mockNet) s.SetServerTLS(mockTLS) err := s.RunControlSvc(context.Background(), testCase.listeners["service"], &tls.Config{}, testCase.listeners["unixSocket"], os.FileMode(0o600), testCase.listeners["tcpListen"], &tls.Config{}) if err != nil && err.Error() != testCase.expectedError { t.Errorf("expected error %s, got %v", testCase.expectedError, err) } }) } } func TestSockControlRemoteAddr(t *testing.T) { ctrl := gomock.NewController(t) mockCon := mock_controlsvc.NewMockConn(ctrl) mockAddr := mock_controlsvc.NewMockAddr(ctrl) sockControl := controlsvc.NewSockControl(mockCon) localhost := "127.0.0.1" mockCon.EXPECT().RemoteAddr().Return(mockAddr) mockAddr.EXPECT().String().Return(localhost) remoteAddr := sockControl.RemoteAddr() if remoteAddr.String() != localhost { t.Errorf("expected: %s, received: %s", localhost, remoteAddr) } } func TestSockControlWriteMessage(t *testing.T) { ctrl := gomock.NewController(t) mockCon := mock_controlsvc.NewMockConn(ctrl) sockControl := controlsvc.NewSockControl(mockCon) writeMessageTestCases := []struct { name string message string expectedError bool expectedCalls func() }{ { name: "pass without message", message: "", expectedError: false, expectedCalls: func() { // empty func for testing }, }, { name: "fail with message", message: "message", expectedError: true, expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, errors.New("cannot write message")) }, }, { name: "pass with message", message: "message", expectedError: false, expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, nil) }, }, } for _, testCase := range writeMessageTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() err := sockControl.WriteMessage(testCase.message) if !testCase.expectedError && err != nil { t.Errorf("write message ran unsuccessfully %s", err) } if testCase.expectedError && err.Error() != "cannot write message" { printErrorMessage(t, err) } }) } } func TestSockControlBridgeConn(t *testing.T) { ctrl := gomock.NewController(t) mockCon := mock_controlsvc.NewMockConn(ctrl) mockUtil := mock_controlsvc.NewMockUtiler(ctrl) logger := logger.NewReceptorLogger("") sockControl := controlsvc.NewSockControl(mockCon) bridgeConnTestCases := []struct { name string message string expectedCalls func() }{ { name: "without message and no error", message: "", expectedCalls: func() { mockUtil.EXPECT().BridgeConns(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()) }, }, { name: "with message and error", message: "message", expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, errors.New("terminated")) }, }, } for _, testCase := range bridgeConnTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() err := sockControl.BridgeConn(testCase.message, mockCon, "test", logger, mockUtil) if testCase.message == "" && err != nil { t.Errorf("bridge conn ran unsuccessfully") } if testCase.message != "" && err.Error() != "terminated" { t.Errorf("write message error for bridge conn %v", err) } }) } } func TestSockControlReadFromConn(t *testing.T) { ctrl := gomock.NewController(t) mockCon := mock_controlsvc.NewMockConn(ctrl) mockCopier := mock_controlsvc.NewMockCopier(ctrl) sockControl := controlsvc.NewSockControl(mockCon) bridgeConnTestCases := []struct { name string message string expectedCalls func() expectedError bool errorMessage string }{ { name: "without message and copier error", message: "", expectedCalls: func() { mockCopier.EXPECT().Copy(gomock.Any(), gomock.Any()).Return(int64(0), errors.New("read from conn copy error")) }, expectedError: true, errorMessage: "read from conn copy error", }, { name: "with message and no error", message: "message", expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, errors.New("read from conn write error")) }, expectedError: true, errorMessage: "read from conn write error", }, { name: "without message and no copier error", message: "", expectedCalls: func() { mockCopier.EXPECT().Copy(gomock.Any(), gomock.Any()).Return(int64(0), nil) }, expectedError: false, errorMessage: "", }, } for _, testCase := range bridgeConnTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() err := sockControl.ReadFromConn(testCase.message, mockCon, mockCopier) if testCase.expectedError && err.Error() != testCase.errorMessage { printErrorMessage(t, err) } if !testCase.expectedError && err != nil { printErrorMessage(t, err) } }) } } func TestSockControlWriteToConn(t *testing.T) { ctrl := gomock.NewController(t) mockCon := mock_controlsvc.NewMockConn(ctrl) sockControl := controlsvc.NewSockControl(mockCon) bridgeConnTestCases := []struct { name string message string input chan []byte expectedCalls func() expectedError bool errorMessage string }{ { name: "without message and with error", message: "", expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, errors.New("write to conn chan error")) }, expectedError: true, errorMessage: "write to conn chan error", }, { name: "with message and with error", message: "message", expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, errors.New(writeToConnError)) }, expectedError: true, errorMessage: writeToConnError, }, { name: "without message and error", message: "", expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, nil) }, expectedError: false, errorMessage: writeToConnError, }, } for _, testCase := range bridgeConnTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() c := make(chan []byte) go func(c chan []byte) { c <- []byte{7} defer close(c) }(c) err := sockControl.WriteToConn(testCase.message, c) if testCase.expectedError && err.Error() != testCase.errorMessage { printErrorMessage(t, err) } if !testCase.expectedError && err != nil { printErrorMessage(t, err) } }) } } func TestSockControlClose(t *testing.T) { ctrl := gomock.NewController(t) mockCon := mock_controlsvc.NewMockConn(ctrl) sockControl := controlsvc.NewSockControl(mockCon) errorMessage := "cannot close connection" mockCon.EXPECT().Close().Return(errors.New(errorMessage)) err := sockControl.Close() if err == nil && err.Error() != errorMessage { printErrorMessage(t, err) } } func TestAddControlFunc(t *testing.T) { ctrl := gomock.NewController(t) mockCtrlCmd := mock_controlsvc.NewMockControlCommandType(ctrl) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) controlFuncTestsCases := []struct { name string input string errorMessage string testCase func(msg string, err error) }{ { name: "ping command", input: "ping", errorMessage: "control function named ping already exists", testCase: func(msg string, err error) { if msg != err.Error() { t.Errorf("expected error: %s, received: %s", msg, err) } }, }, { name: "obliterate command", input: "obliterate", testCase: func(msg string, err error) { if err != nil { t.Errorf("error should be nil. received %s", err) } }, }, } for _, testCase := range controlFuncTestsCases { t.Run(testCase.name, func(t *testing.T) { s := controlsvc.New(true, mockNetceptor) err := s.AddControlFunc(testCase.input, mockCtrlCmd) testCase.testCase(testCase.errorMessage, err) }) } } func TestRunControlSession(t *testing.T) { ctrl := gomock.NewController(t) mockCon := mock_controlsvc.NewMockConn(ctrl) mockAddr := mock_controlsvc.NewMockAddr(ctrl) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) logger := logger.NewReceptorLogger("") mandatoryExpects := func() { mockNetceptor.EXPECT().GetLogger().Return(logger).Times(3) mockCon.EXPECT().RemoteAddr().Return(mockAddr).Times(2) mockAddr.EXPECT().String().Times(2) mockNetceptor.EXPECT().NodeID() } runControlSessionTestCases := []struct { name string expectedCalls func() }{ { name: "logger warning - could not close connection", expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, nil) mockCon.EXPECT().Read(make([]byte, 1)).Return(0, io.EOF) mockCon.EXPECT().Close().Return(errors.New("test")) mockNetceptor.EXPECT().GetLogger().Return(logger) }, }, { name: "logger error", expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, errors.New("test")) mockCon.EXPECT().Close() }, }, { name: "logger debug - control service closed", expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, nil) mockCon.EXPECT().Read(make([]byte, 1)).Return(0, io.EOF) mockCon.EXPECT().Close() }, }, { name: "logger warning - could not read in control service", expectedCalls: func() { mockCon.EXPECT().Write(gomock.Any()).Return(0, nil) mockCon.EXPECT().Read(make([]byte, 1)).Return(0, errors.New("terminated")) mockCon.EXPECT().Close() }, }, } for _, testCase := range runControlSessionTestCases { t.Run(testCase.name, func(t *testing.T) { mandatoryExpects() testCase.expectedCalls() s := controlsvc.New(false, mockNetceptor) s.RunControlSession(mockCon) }) } } func TestRunControlSessionTwo(t *testing.T) { ctrl := gomock.NewController(t) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) logger := logger.NewReceptorLogger("") runControlSessionTestCases := []struct { name string expectedCalls func() commandByte []byte }{ { name: "command must be a string", expectedCalls: func() { mockNetceptor.EXPECT().NodeID() mockNetceptor.EXPECT().GetLogger().Return(logger).Times(4) }, commandByte: []byte("{\"command\": 0}"), }, { name: "JSON did not contain a command", expectedCalls: func() { mockNetceptor.EXPECT().NodeID() mockNetceptor.EXPECT().GetLogger().Return(logger).Times(4) }, commandByte: []byte("{}"), }, { name: "command must be a string", expectedCalls: func() { mockNetceptor.EXPECT().NodeID() mockNetceptor.EXPECT().GetLogger().Return(logger).Times(4) }, commandByte: []byte("{\"command\": \"echo\"}"), }, { name: "tokens", expectedCalls: func() { mockNetceptor.EXPECT().NodeID() mockNetceptor.EXPECT().GetLogger().Return(logger).Times(4) }, commandByte: []byte("a b"), }, { name: "control types - reload", expectedCalls: func() { mockNetceptor.EXPECT().NodeID() mockNetceptor.EXPECT().GetLogger().Return(logger).Times(6) }, commandByte: []byte("{\"command\": \"reload\"}"), }, { name: "control types - no ping target", expectedCalls: func() { mockNetceptor.EXPECT().NodeID() mockNetceptor.EXPECT().GetLogger().Return(logger).Times(5) }, commandByte: []byte("{\"command\": \"ping\"}"), }, } for _, testCase := range runControlSessionTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() s := controlsvc.New(true, mockNetceptor) pipeA, pipeB := net.Pipe() go func() { pipeA.Write(testCase.commandByte) pipeA.Close() }() go func() { io.ReadAll(pipeA) }() s.RunControlSession(pipeB) }) } } receptor-1.5.3/pkg/controlsvc/interfaces.go000066400000000000000000000030611475465740700210150ustar00rootroot00000000000000package controlsvc import ( "context" "crypto/tls" "io" "net" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" ) // ControlCommandType is a type of command that can be run from the control service. type ControlCommandType interface { InitFromString(string) (ControlCommand, error) InitFromJSON(map[string]interface{}) (ControlCommand, error) } type NetceptorForControlCommand interface { GetClientTLSConfig(name string, expectedHostName string, expectedHostNameType netceptor.ExpectedHostnameType) (*tls.Config, error) Dial(node string, service string, tlscfg *tls.Config) (*netceptor.Conn, error) Ping(ctx context.Context, target string, hopsToLive byte) (time.Duration, string, error) MaxForwardingHops() byte Status() netceptor.Status Traceroute(ctx context.Context, target string) <-chan *netceptor.TracerouteResult NodeID() string GetLogger() *logger.ReceptorLogger CancelBackends() } // ControlCommand is an instance of a command that is being run from the control service. type ControlCommand interface { ControlFunc(context.Context, NetceptorForControlCommand, ControlFuncOperations) (map[string]interface{}, error) } // ControlFuncOperations provides callbacks for control services to take actions. type ControlFuncOperations interface { BridgeConn(message string, bc io.ReadWriteCloser, bcName string, logger *logger.ReceptorLogger, utils Utiler) error ReadFromConn(message string, out io.Writer, io Copier) error WriteToConn(message string, in chan []byte) error Close() error RemoteAddr() net.Addr } receptor-1.5.3/pkg/controlsvc/mock_controlsvc/000077500000000000000000000000001475465740700215505ustar00rootroot00000000000000receptor-1.5.3/pkg/controlsvc/mock_controlsvc/controlsvc.go000066400000000000000000000307041475465740700242770ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/controlsvc/controlsvc.go // // Generated by this command: // // mockgen -source=pkg/controlsvc/controlsvc.go -destination=pkg/controlsvc/mock_controlsvc/controlsvc.go // // Package mock_controlsvc is a generated GoMock package. package mock_controlsvc import ( context "context" tls "crypto/tls" io "io" fs "io/fs" net "net" reflect "reflect" time "time" logger "github.com/ansible/receptor/pkg/logger" netceptor "github.com/ansible/receptor/pkg/netceptor" utils "github.com/ansible/receptor/pkg/utils" gomock "go.uber.org/mock/gomock" ) // MockCopier is a mock of Copier interface. type MockCopier struct { ctrl *gomock.Controller recorder *MockCopierMockRecorder isgomock struct{} } // MockCopierMockRecorder is the mock recorder for MockCopier. type MockCopierMockRecorder struct { mock *MockCopier } // NewMockCopier creates a new mock instance. func NewMockCopier(ctrl *gomock.Controller) *MockCopier { mock := &MockCopier{ctrl: ctrl} mock.recorder = &MockCopierMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockCopier) EXPECT() *MockCopierMockRecorder { return m.recorder } // Copy mocks base method. func (m *MockCopier) Copy(dst io.Writer, src io.Reader) (int64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Copy", dst, src) ret0, _ := ret[0].(int64) ret1, _ := ret[1].(error) return ret0, ret1 } // Copy indicates an expected call of Copy. func (mr *MockCopierMockRecorder) Copy(dst, src any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Copy", reflect.TypeOf((*MockCopier)(nil).Copy), dst, src) } // MockNetceptorForControlsvc is a mock of NetceptorForControlsvc interface. type MockNetceptorForControlsvc struct { ctrl *gomock.Controller recorder *MockNetceptorForControlsvcMockRecorder isgomock struct{} } // MockNetceptorForControlsvcMockRecorder is the mock recorder for MockNetceptorForControlsvc. type MockNetceptorForControlsvcMockRecorder struct { mock *MockNetceptorForControlsvc } // NewMockNetceptorForControlsvc creates a new mock instance. func NewMockNetceptorForControlsvc(ctrl *gomock.Controller) *MockNetceptorForControlsvc { mock := &MockNetceptorForControlsvc{ctrl: ctrl} mock.recorder = &MockNetceptorForControlsvcMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockNetceptorForControlsvc) EXPECT() *MockNetceptorForControlsvcMockRecorder { return m.recorder } // CancelBackends mocks base method. func (m *MockNetceptorForControlsvc) CancelBackends() { m.ctrl.T.Helper() m.ctrl.Call(m, "CancelBackends") } // CancelBackends indicates an expected call of CancelBackends. func (mr *MockNetceptorForControlsvcMockRecorder) CancelBackends() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelBackends", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).CancelBackends)) } // Dial mocks base method. func (m *MockNetceptorForControlsvc) Dial(node, service string, tlscfg *tls.Config) (*netceptor.Conn, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Dial", node, service, tlscfg) ret0, _ := ret[0].(*netceptor.Conn) ret1, _ := ret[1].(error) return ret0, ret1 } // Dial indicates an expected call of Dial. func (mr *MockNetceptorForControlsvcMockRecorder) Dial(node, service, tlscfg any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dial", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).Dial), node, service, tlscfg) } // GetClientTLSConfig mocks base method. func (m *MockNetceptorForControlsvc) GetClientTLSConfig(name, expectedHostName string, expectedHostNameType netceptor.ExpectedHostnameType) (*tls.Config, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetClientTLSConfig", name, expectedHostName, expectedHostNameType) ret0, _ := ret[0].(*tls.Config) ret1, _ := ret[1].(error) return ret0, ret1 } // GetClientTLSConfig indicates an expected call of GetClientTLSConfig. func (mr *MockNetceptorForControlsvcMockRecorder) GetClientTLSConfig(name, expectedHostName, expectedHostNameType any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClientTLSConfig", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).GetClientTLSConfig), name, expectedHostName, expectedHostNameType) } // GetLogger mocks base method. func (m *MockNetceptorForControlsvc) GetLogger() *logger.ReceptorLogger { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLogger") ret0, _ := ret[0].(*logger.ReceptorLogger) return ret0 } // GetLogger indicates an expected call of GetLogger. func (mr *MockNetceptorForControlsvcMockRecorder) GetLogger() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogger", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).GetLogger)) } // ListenAndAdvertise mocks base method. func (m *MockNetceptorForControlsvc) ListenAndAdvertise(service string, tlscfg *tls.Config, tags map[string]string) (*netceptor.Listener, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListenAndAdvertise", service, tlscfg, tags) ret0, _ := ret[0].(*netceptor.Listener) ret1, _ := ret[1].(error) return ret0, ret1 } // ListenAndAdvertise indicates an expected call of ListenAndAdvertise. func (mr *MockNetceptorForControlsvcMockRecorder) ListenAndAdvertise(service, tlscfg, tags any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenAndAdvertise", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).ListenAndAdvertise), service, tlscfg, tags) } // MaxForwardingHops mocks base method. func (m *MockNetceptorForControlsvc) MaxForwardingHops() byte { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MaxForwardingHops") ret0, _ := ret[0].(byte) return ret0 } // MaxForwardingHops indicates an expected call of MaxForwardingHops. func (mr *MockNetceptorForControlsvcMockRecorder) MaxForwardingHops() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxForwardingHops", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).MaxForwardingHops)) } // NodeID mocks base method. func (m *MockNetceptorForControlsvc) NodeID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NodeID") ret0, _ := ret[0].(string) return ret0 } // NodeID indicates an expected call of NodeID. func (mr *MockNetceptorForControlsvcMockRecorder) NodeID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).NodeID)) } // Ping mocks base method. func (m *MockNetceptorForControlsvc) Ping(ctx context.Context, target string, hopsToLive byte) (time.Duration, string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Ping", ctx, target, hopsToLive) ret0, _ := ret[0].(time.Duration) ret1, _ := ret[1].(string) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // Ping indicates an expected call of Ping. func (mr *MockNetceptorForControlsvcMockRecorder) Ping(ctx, target, hopsToLive any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).Ping), ctx, target, hopsToLive) } // Status mocks base method. func (m *MockNetceptorForControlsvc) Status() netceptor.Status { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Status") ret0, _ := ret[0].(netceptor.Status) return ret0 } // Status indicates an expected call of Status. func (mr *MockNetceptorForControlsvcMockRecorder) Status() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).Status)) } // Traceroute mocks base method. func (m *MockNetceptorForControlsvc) Traceroute(ctx context.Context, target string) <-chan *netceptor.TracerouteResult { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Traceroute", ctx, target) ret0, _ := ret[0].(<-chan *netceptor.TracerouteResult) return ret0 } // Traceroute indicates an expected call of Traceroute. func (mr *MockNetceptorForControlsvcMockRecorder) Traceroute(ctx, target any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Traceroute", reflect.TypeOf((*MockNetceptorForControlsvc)(nil).Traceroute), ctx, target) } // MockUtiler is a mock of Utiler interface. type MockUtiler struct { ctrl *gomock.Controller recorder *MockUtilerMockRecorder isgomock struct{} } // MockUtilerMockRecorder is the mock recorder for MockUtiler. type MockUtilerMockRecorder struct { mock *MockUtiler } // NewMockUtiler creates a new mock instance. func NewMockUtiler(ctrl *gomock.Controller) *MockUtiler { mock := &MockUtiler{ctrl: ctrl} mock.recorder = &MockUtilerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockUtiler) EXPECT() *MockUtilerMockRecorder { return m.recorder } // BridgeConns mocks base method. func (m *MockUtiler) BridgeConns(c1 io.ReadWriteCloser, c1Name string, c2 io.ReadWriteCloser, c2Name string, logger *logger.ReceptorLogger) { m.ctrl.T.Helper() m.ctrl.Call(m, "BridgeConns", c1, c1Name, c2, c2Name, logger) } // BridgeConns indicates an expected call of BridgeConns. func (mr *MockUtilerMockRecorder) BridgeConns(c1, c1Name, c2, c2Name, logger any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BridgeConns", reflect.TypeOf((*MockUtiler)(nil).BridgeConns), c1, c1Name, c2, c2Name, logger) } // UnixSocketListen mocks base method. func (m *MockUtiler) UnixSocketListen(filename string, permissions fs.FileMode) (net.Listener, *utils.FLock, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UnixSocketListen", filename, permissions) ret0, _ := ret[0].(net.Listener) ret1, _ := ret[1].(*utils.FLock) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // UnixSocketListen indicates an expected call of UnixSocketListen. func (mr *MockUtilerMockRecorder) UnixSocketListen(filename, permissions any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnixSocketListen", reflect.TypeOf((*MockUtiler)(nil).UnixSocketListen), filename, permissions) } // MockNeter is a mock of Neter interface. type MockNeter struct { ctrl *gomock.Controller recorder *MockNeterMockRecorder isgomock struct{} } // MockNeterMockRecorder is the mock recorder for MockNeter. type MockNeterMockRecorder struct { mock *MockNeter } // NewMockNeter creates a new mock instance. func NewMockNeter(ctrl *gomock.Controller) *MockNeter { mock := &MockNeter{ctrl: ctrl} mock.recorder = &MockNeterMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockNeter) EXPECT() *MockNeterMockRecorder { return m.recorder } // Listen mocks base method. func (m *MockNeter) Listen(network, address string) (net.Listener, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Listen", network, address) ret0, _ := ret[0].(net.Listener) ret1, _ := ret[1].(error) return ret0, ret1 } // Listen indicates an expected call of Listen. func (mr *MockNeterMockRecorder) Listen(network, address any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Listen", reflect.TypeOf((*MockNeter)(nil).Listen), network, address) } // MockTlser is a mock of Tlser interface. type MockTlser struct { ctrl *gomock.Controller recorder *MockTlserMockRecorder isgomock struct{} } // MockTlserMockRecorder is the mock recorder for MockTlser. type MockTlserMockRecorder struct { mock *MockTlser } // NewMockTlser creates a new mock instance. func NewMockTlser(ctrl *gomock.Controller) *MockTlser { mock := &MockTlser{ctrl: ctrl} mock.recorder = &MockTlserMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockTlser) EXPECT() *MockTlserMockRecorder { return m.recorder } // NewListener mocks base method. func (m *MockTlser) NewListener(inner net.Listener, config *tls.Config) net.Listener { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewListener", inner, config) ret0, _ := ret[0].(net.Listener) return ret0 } // NewListener indicates an expected call of NewListener. func (mr *MockTlserMockRecorder) NewListener(inner, config any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewListener", reflect.TypeOf((*MockTlser)(nil).NewListener), inner, config) } receptor-1.5.3/pkg/controlsvc/mock_controlsvc/interfaces.go000066400000000000000000000331421475465740700242250ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/controlsvc/interfaces.go // // Generated by this command: // // mockgen -source=pkg/controlsvc/interfaces.go -destination=pkg/controlsvc/mock_controlsvc/interfaces.go // // Package mock_controlsvc is a generated GoMock package. package mock_controlsvc import ( context "context" tls "crypto/tls" io "io" net "net" reflect "reflect" time "time" controlsvc "github.com/ansible/receptor/pkg/controlsvc" logger "github.com/ansible/receptor/pkg/logger" netceptor "github.com/ansible/receptor/pkg/netceptor" gomock "go.uber.org/mock/gomock" ) // MockControlCommandType is a mock of ControlCommandType interface. type MockControlCommandType struct { ctrl *gomock.Controller recorder *MockControlCommandTypeMockRecorder isgomock struct{} } // MockControlCommandTypeMockRecorder is the mock recorder for MockControlCommandType. type MockControlCommandTypeMockRecorder struct { mock *MockControlCommandType } // NewMockControlCommandType creates a new mock instance. func NewMockControlCommandType(ctrl *gomock.Controller) *MockControlCommandType { mock := &MockControlCommandType{ctrl: ctrl} mock.recorder = &MockControlCommandTypeMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockControlCommandType) EXPECT() *MockControlCommandTypeMockRecorder { return m.recorder } // InitFromJSON mocks base method. func (m *MockControlCommandType) InitFromJSON(arg0 map[string]any) (controlsvc.ControlCommand, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "InitFromJSON", arg0) ret0, _ := ret[0].(controlsvc.ControlCommand) ret1, _ := ret[1].(error) return ret0, ret1 } // InitFromJSON indicates an expected call of InitFromJSON. func (mr *MockControlCommandTypeMockRecorder) InitFromJSON(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitFromJSON", reflect.TypeOf((*MockControlCommandType)(nil).InitFromJSON), arg0) } // InitFromString mocks base method. func (m *MockControlCommandType) InitFromString(arg0 string) (controlsvc.ControlCommand, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "InitFromString", arg0) ret0, _ := ret[0].(controlsvc.ControlCommand) ret1, _ := ret[1].(error) return ret0, ret1 } // InitFromString indicates an expected call of InitFromString. func (mr *MockControlCommandTypeMockRecorder) InitFromString(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitFromString", reflect.TypeOf((*MockControlCommandType)(nil).InitFromString), arg0) } // MockNetceptorForControlCommand is a mock of NetceptorForControlCommand interface. type MockNetceptorForControlCommand struct { ctrl *gomock.Controller recorder *MockNetceptorForControlCommandMockRecorder isgomock struct{} } // MockNetceptorForControlCommandMockRecorder is the mock recorder for MockNetceptorForControlCommand. type MockNetceptorForControlCommandMockRecorder struct { mock *MockNetceptorForControlCommand } // NewMockNetceptorForControlCommand creates a new mock instance. func NewMockNetceptorForControlCommand(ctrl *gomock.Controller) *MockNetceptorForControlCommand { mock := &MockNetceptorForControlCommand{ctrl: ctrl} mock.recorder = &MockNetceptorForControlCommandMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockNetceptorForControlCommand) EXPECT() *MockNetceptorForControlCommandMockRecorder { return m.recorder } // CancelBackends mocks base method. func (m *MockNetceptorForControlCommand) CancelBackends() { m.ctrl.T.Helper() m.ctrl.Call(m, "CancelBackends") } // CancelBackends indicates an expected call of CancelBackends. func (mr *MockNetceptorForControlCommandMockRecorder) CancelBackends() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelBackends", reflect.TypeOf((*MockNetceptorForControlCommand)(nil).CancelBackends)) } // Dial mocks base method. func (m *MockNetceptorForControlCommand) Dial(node, service string, tlscfg *tls.Config) (*netceptor.Conn, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Dial", node, service, tlscfg) ret0, _ := ret[0].(*netceptor.Conn) ret1, _ := ret[1].(error) return ret0, ret1 } // Dial indicates an expected call of Dial. func (mr *MockNetceptorForControlCommandMockRecorder) Dial(node, service, tlscfg any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dial", reflect.TypeOf((*MockNetceptorForControlCommand)(nil).Dial), node, service, tlscfg) } // GetClientTLSConfig mocks base method. func (m *MockNetceptorForControlCommand) GetClientTLSConfig(name, expectedHostName string, expectedHostNameType netceptor.ExpectedHostnameType) (*tls.Config, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetClientTLSConfig", name, expectedHostName, expectedHostNameType) ret0, _ := ret[0].(*tls.Config) ret1, _ := ret[1].(error) return ret0, ret1 } // GetClientTLSConfig indicates an expected call of GetClientTLSConfig. func (mr *MockNetceptorForControlCommandMockRecorder) GetClientTLSConfig(name, expectedHostName, expectedHostNameType any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClientTLSConfig", reflect.TypeOf((*MockNetceptorForControlCommand)(nil).GetClientTLSConfig), name, expectedHostName, expectedHostNameType) } // GetLogger mocks base method. func (m *MockNetceptorForControlCommand) GetLogger() *logger.ReceptorLogger { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLogger") ret0, _ := ret[0].(*logger.ReceptorLogger) return ret0 } // GetLogger indicates an expected call of GetLogger. func (mr *MockNetceptorForControlCommandMockRecorder) GetLogger() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogger", reflect.TypeOf((*MockNetceptorForControlCommand)(nil).GetLogger)) } // MaxForwardingHops mocks base method. func (m *MockNetceptorForControlCommand) MaxForwardingHops() byte { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MaxForwardingHops") ret0, _ := ret[0].(byte) return ret0 } // MaxForwardingHops indicates an expected call of MaxForwardingHops. func (mr *MockNetceptorForControlCommandMockRecorder) MaxForwardingHops() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxForwardingHops", reflect.TypeOf((*MockNetceptorForControlCommand)(nil).MaxForwardingHops)) } // NodeID mocks base method. func (m *MockNetceptorForControlCommand) NodeID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NodeID") ret0, _ := ret[0].(string) return ret0 } // NodeID indicates an expected call of NodeID. func (mr *MockNetceptorForControlCommandMockRecorder) NodeID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockNetceptorForControlCommand)(nil).NodeID)) } // Ping mocks base method. func (m *MockNetceptorForControlCommand) Ping(ctx context.Context, target string, hopsToLive byte) (time.Duration, string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Ping", ctx, target, hopsToLive) ret0, _ := ret[0].(time.Duration) ret1, _ := ret[1].(string) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // Ping indicates an expected call of Ping. func (mr *MockNetceptorForControlCommandMockRecorder) Ping(ctx, target, hopsToLive any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockNetceptorForControlCommand)(nil).Ping), ctx, target, hopsToLive) } // Status mocks base method. func (m *MockNetceptorForControlCommand) Status() netceptor.Status { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Status") ret0, _ := ret[0].(netceptor.Status) return ret0 } // Status indicates an expected call of Status. func (mr *MockNetceptorForControlCommandMockRecorder) Status() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockNetceptorForControlCommand)(nil).Status)) } // Traceroute mocks base method. func (m *MockNetceptorForControlCommand) Traceroute(ctx context.Context, target string) <-chan *netceptor.TracerouteResult { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Traceroute", ctx, target) ret0, _ := ret[0].(<-chan *netceptor.TracerouteResult) return ret0 } // Traceroute indicates an expected call of Traceroute. func (mr *MockNetceptorForControlCommandMockRecorder) Traceroute(ctx, target any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Traceroute", reflect.TypeOf((*MockNetceptorForControlCommand)(nil).Traceroute), ctx, target) } // MockControlCommand is a mock of ControlCommand interface. type MockControlCommand struct { ctrl *gomock.Controller recorder *MockControlCommandMockRecorder isgomock struct{} } // MockControlCommandMockRecorder is the mock recorder for MockControlCommand. type MockControlCommandMockRecorder struct { mock *MockControlCommand } // NewMockControlCommand creates a new mock instance. func NewMockControlCommand(ctrl *gomock.Controller) *MockControlCommand { mock := &MockControlCommand{ctrl: ctrl} mock.recorder = &MockControlCommandMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockControlCommand) EXPECT() *MockControlCommandMockRecorder { return m.recorder } // ControlFunc mocks base method. func (m *MockControlCommand) ControlFunc(arg0 context.Context, arg1 controlsvc.NetceptorForControlCommand, arg2 controlsvc.ControlFuncOperations) (map[string]any, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ControlFunc", arg0, arg1, arg2) ret0, _ := ret[0].(map[string]any) ret1, _ := ret[1].(error) return ret0, ret1 } // ControlFunc indicates an expected call of ControlFunc. func (mr *MockControlCommandMockRecorder) ControlFunc(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControlFunc", reflect.TypeOf((*MockControlCommand)(nil).ControlFunc), arg0, arg1, arg2) } // MockControlFuncOperations is a mock of ControlFuncOperations interface. type MockControlFuncOperations struct { ctrl *gomock.Controller recorder *MockControlFuncOperationsMockRecorder isgomock struct{} } // MockControlFuncOperationsMockRecorder is the mock recorder for MockControlFuncOperations. type MockControlFuncOperationsMockRecorder struct { mock *MockControlFuncOperations } // NewMockControlFuncOperations creates a new mock instance. func NewMockControlFuncOperations(ctrl *gomock.Controller) *MockControlFuncOperations { mock := &MockControlFuncOperations{ctrl: ctrl} mock.recorder = &MockControlFuncOperationsMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockControlFuncOperations) EXPECT() *MockControlFuncOperationsMockRecorder { return m.recorder } // BridgeConn mocks base method. func (m *MockControlFuncOperations) BridgeConn(message string, bc io.ReadWriteCloser, bcName string, logger *logger.ReceptorLogger, utils controlsvc.Utiler) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BridgeConn", message, bc, bcName, logger, utils) ret0, _ := ret[0].(error) return ret0 } // BridgeConn indicates an expected call of BridgeConn. func (mr *MockControlFuncOperationsMockRecorder) BridgeConn(message, bc, bcName, logger, utils any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BridgeConn", reflect.TypeOf((*MockControlFuncOperations)(nil).BridgeConn), message, bc, bcName, logger, utils) } // Close mocks base method. func (m *MockControlFuncOperations) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockControlFuncOperationsMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockControlFuncOperations)(nil).Close)) } // ReadFromConn mocks base method. func (m *MockControlFuncOperations) ReadFromConn(message string, out io.Writer, io controlsvc.Copier) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadFromConn", message, out, io) ret0, _ := ret[0].(error) return ret0 } // ReadFromConn indicates an expected call of ReadFromConn. func (mr *MockControlFuncOperationsMockRecorder) ReadFromConn(message, out, io any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadFromConn", reflect.TypeOf((*MockControlFuncOperations)(nil).ReadFromConn), message, out, io) } // RemoteAddr mocks base method. func (m *MockControlFuncOperations) RemoteAddr() net.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RemoteAddr") ret0, _ := ret[0].(net.Addr) return ret0 } // RemoteAddr indicates an expected call of RemoteAddr. func (mr *MockControlFuncOperationsMockRecorder) RemoteAddr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoteAddr", reflect.TypeOf((*MockControlFuncOperations)(nil).RemoteAddr)) } // WriteToConn mocks base method. func (m *MockControlFuncOperations) WriteToConn(message string, in chan []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteToConn", message, in) ret0, _ := ret[0].(error) return ret0 } // WriteToConn indicates an expected call of WriteToConn. func (mr *MockControlFuncOperationsMockRecorder) WriteToConn(message, in any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteToConn", reflect.TypeOf((*MockControlFuncOperations)(nil).WriteToConn), message, in) } receptor-1.5.3/pkg/controlsvc/mock_controlsvc/io.go000066400000000000000000000046061475465740700225140ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: io (interfaces: ReadWriteCloser) // // Generated by this command: // // mockgen io ReadWriteCloser // // Package mock_io is a generated GoMock package. package mock_controlsvc import ( reflect "reflect" gomock "go.uber.org/mock/gomock" ) // MockReadWriteCloser is a mock of ReadWriteCloser interface. type MockReadWriteCloser struct { ctrl *gomock.Controller recorder *MockReadWriteCloserMockRecorder isgomock struct{} } // MockReadWriteCloserMockRecorder is the mock recorder for MockReadWriteCloser. type MockReadWriteCloserMockRecorder struct { mock *MockReadWriteCloser } // NewMockReadWriteCloser creates a new mock instance. func NewMockReadWriteCloser(ctrl *gomock.Controller) *MockReadWriteCloser { mock := &MockReadWriteCloser{ctrl: ctrl} mock.recorder = &MockReadWriteCloserMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockReadWriteCloser) EXPECT() *MockReadWriteCloserMockRecorder { return m.recorder } // Close mocks base method. func (m *MockReadWriteCloser) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockReadWriteCloserMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockReadWriteCloser)(nil).Close)) } // Read mocks base method. func (m *MockReadWriteCloser) Read(p []byte) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Read", p) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // Read indicates an expected call of Read. func (mr *MockReadWriteCloserMockRecorder) Read(p any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockReadWriteCloser)(nil).Read), p) } // Write mocks base method. func (m *MockReadWriteCloser) Write(p []byte) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", p) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // Write indicates an expected call of Write. func (mr *MockReadWriteCloserMockRecorder) Write(p any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockReadWriteCloser)(nil).Write), p) } receptor-1.5.3/pkg/controlsvc/mock_controlsvc/mock_net.go000066400000000000000000000172551475465740700237100ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: net (interfaces: Listener,Conn,Addr) // // Generated by this command: // // mockgen net Listener,Conn,Addr // // Package mock_net is a generated GoMock package. package mock_controlsvc import ( net "net" reflect "reflect" time "time" gomock "go.uber.org/mock/gomock" ) // MockListener is a mock of Listener interface. type MockListener struct { ctrl *gomock.Controller recorder *MockListenerMockRecorder isgomock struct{} } // MockListenerMockRecorder is the mock recorder for MockListener. type MockListenerMockRecorder struct { mock *MockListener } // NewMockListener creates a new mock instance. func NewMockListener(ctrl *gomock.Controller) *MockListener { mock := &MockListener{ctrl: ctrl} mock.recorder = &MockListenerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockListener) EXPECT() *MockListenerMockRecorder { return m.recorder } // Accept mocks base method. func (m *MockListener) Accept() (net.Conn, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Accept") ret0, _ := ret[0].(net.Conn) ret1, _ := ret[1].(error) return ret0, ret1 } // Accept indicates an expected call of Accept. func (mr *MockListenerMockRecorder) Accept() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockListener)(nil).Accept)) } // Addr mocks base method. func (m *MockListener) Addr() net.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Addr") ret0, _ := ret[0].(net.Addr) return ret0 } // Addr indicates an expected call of Addr. func (mr *MockListenerMockRecorder) Addr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Addr", reflect.TypeOf((*MockListener)(nil).Addr)) } // Close mocks base method. func (m *MockListener) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockListenerMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockListener)(nil).Close)) } // MockConn is a mock of Conn interface. type MockConn struct { ctrl *gomock.Controller recorder *MockConnMockRecorder isgomock struct{} } // MockConnMockRecorder is the mock recorder for MockConn. type MockConnMockRecorder struct { mock *MockConn } // NewMockConn creates a new mock instance. func NewMockConn(ctrl *gomock.Controller) *MockConn { mock := &MockConn{ctrl: ctrl} mock.recorder = &MockConnMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockConn) EXPECT() *MockConnMockRecorder { return m.recorder } // Close mocks base method. func (m *MockConn) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockConnMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockConn)(nil).Close)) } // LocalAddr mocks base method. func (m *MockConn) LocalAddr() net.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LocalAddr") ret0, _ := ret[0].(net.Addr) return ret0 } // LocalAddr indicates an expected call of LocalAddr. func (mr *MockConnMockRecorder) LocalAddr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocalAddr", reflect.TypeOf((*MockConn)(nil).LocalAddr)) } // Read mocks base method. func (m *MockConn) Read(b []byte) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Read", b) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // Read indicates an expected call of Read. func (mr *MockConnMockRecorder) Read(b any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockConn)(nil).Read), b) } // RemoteAddr mocks base method. func (m *MockConn) RemoteAddr() net.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RemoteAddr") ret0, _ := ret[0].(net.Addr) return ret0 } // RemoteAddr indicates an expected call of RemoteAddr. func (mr *MockConnMockRecorder) RemoteAddr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoteAddr", reflect.TypeOf((*MockConn)(nil).RemoteAddr)) } // SetDeadline mocks base method. func (m *MockConn) SetDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetDeadline indicates an expected call of SetDeadline. func (mr *MockConnMockRecorder) SetDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDeadline", reflect.TypeOf((*MockConn)(nil).SetDeadline), t) } // SetReadDeadline mocks base method. func (m *MockConn) SetReadDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetReadDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetReadDeadline indicates an expected call of SetReadDeadline. func (mr *MockConnMockRecorder) SetReadDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetReadDeadline", reflect.TypeOf((*MockConn)(nil).SetReadDeadline), t) } // SetWriteDeadline mocks base method. func (m *MockConn) SetWriteDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetWriteDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetWriteDeadline indicates an expected call of SetWriteDeadline. func (mr *MockConnMockRecorder) SetWriteDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteDeadline", reflect.TypeOf((*MockConn)(nil).SetWriteDeadline), t) } // Write mocks base method. func (m *MockConn) Write(b []byte) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", b) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // Write indicates an expected call of Write. func (mr *MockConnMockRecorder) Write(b any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockConn)(nil).Write), b) } // MockAddr is a mock of Addr interface. type MockAddr struct { ctrl *gomock.Controller recorder *MockAddrMockRecorder isgomock struct{} } // MockAddrMockRecorder is the mock recorder for MockAddr. type MockAddrMockRecorder struct { mock *MockAddr } // NewMockAddr creates a new mock instance. func NewMockAddr(ctrl *gomock.Controller) *MockAddr { mock := &MockAddr{ctrl: ctrl} mock.recorder = &MockAddrMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockAddr) EXPECT() *MockAddrMockRecorder { return m.recorder } // Network mocks base method. func (m *MockAddr) Network() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Network") ret0, _ := ret[0].(string) return ret0 } // Network indicates an expected call of Network. func (mr *MockAddrMockRecorder) Network() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Network", reflect.TypeOf((*MockAddr)(nil).Network)) } // String mocks base method. func (m *MockAddr) String() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "String") ret0, _ := ret[0].(string) return ret0 } // String indicates an expected call of String. func (mr *MockAddrMockRecorder) String() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "String", reflect.TypeOf((*MockAddr)(nil).String)) } receptor-1.5.3/pkg/controlsvc/ping.go000066400000000000000000000022161475465740700176300ustar00rootroot00000000000000package controlsvc import ( "context" "fmt" ) type ( PingCommandType struct{} PingCommand struct { target string } ) func (t *PingCommandType) InitFromString(params string) (ControlCommand, error) { if params == "" { return nil, fmt.Errorf("no ping target") } c := &PingCommand{ target: params, } return c, nil } func (t *PingCommandType) InitFromJSON(config map[string]interface{}) (ControlCommand, error) { target, ok := config["target"] if !ok { return nil, fmt.Errorf("no ping target") } targetStr, ok := target.(string) if !ok { return nil, fmt.Errorf("ping target must be string") } c := &PingCommand{ target: targetStr, } return c, nil } func (c *PingCommand) ControlFunc(ctx context.Context, nc NetceptorForControlCommand, _ ControlFuncOperations) (map[string]interface{}, error) { pingTime, pingRemote, err := nc.Ping(ctx, c.target, nc.MaxForwardingHops()) cfr := make(map[string]interface{}) if err == nil { cfr["Success"] = true cfr["From"] = pingRemote cfr["Time"] = pingTime cfr["TimeStr"] = fmt.Sprint(pingTime) } else { cfr["Success"] = false cfr["Error"] = err.Error() } return cfr, nil } receptor-1.5.3/pkg/controlsvc/ping_test.go000066400000000000000000000067231475465740700206760ustar00rootroot00000000000000package controlsvc_test import ( "context" "errors" "testing" "time" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/controlsvc/mock_controlsvc" "go.uber.org/mock/gomock" ) func TestPingInitFromString(t *testing.T) { pingCommandType := controlsvc.PingCommandType{} initFromStringTestCases := []struct { name string expectedError bool errorMessage string input string }{ { name: "no ping target 1", expectedError: true, errorMessage: "no ping target", input: "", }, { name: "single param - pass", expectedError: false, errorMessage: "", input: "one", }, } for _, testCase := range initFromStringTestCases { t.Run(testCase.name, func(t *testing.T) { _, err := pingCommandType.InitFromString(testCase.input) CheckExpectedError(testCase.expectedError, testCase.errorMessage, t, err) }) } } type InitFromJSONTestCase struct { name string expectedError bool errorMessage string input map[string]interface{} } func BuildInitFromJSONTestCases(name string, expectedError bool, errorMessage string, input map[string]interface{}) InitFromJSONTestCase { return InitFromJSONTestCase{ name: name, expectedError: expectedError, errorMessage: errorMessage, input: input, } } func TestPingInitFromJSON(t *testing.T) { pingCommandType := controlsvc.PingCommandType{} initFromJSONTestCases := []InitFromJSONTestCase{ BuildInitFromJSONTestCases("no ping target 2", true, "no ping target", map[string]interface{}{}), BuildInitFromJSONTestCases("ping target must be string", true, "ping target must be string", map[string]interface{}{"target": 7}), BuildInitFromJSONTestCases("three params - pass", false, "", map[string]interface{}{"target": "some target"}), } for _, testCase := range initFromJSONTestCases { t.Run(testCase.name, func(t *testing.T) { _, err := pingCommandType.InitFromJSON(testCase.input) CheckExpectedError(testCase.expectedError, testCase.errorMessage, t, err) }) } } func TestPingControlFunc(t *testing.T) { pingCommand := controlsvc.PingCommand{} ctrl := gomock.NewController(t) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) mockControlFunc := mock_controlsvc.NewMockControlFuncOperations(ctrl) controlFuncTestCases := []struct { name string expectedError bool errorMessage string expectedCalls func() }{ { name: "ping error", expectedError: true, errorMessage: "terminated ping", expectedCalls: func() { mockNetceptor.EXPECT().Ping(gomock.Any(), gomock.Any(), gomock.Any()).Return(time.Second, "", errors.New("terminated ping")) mockNetceptor.EXPECT().MaxForwardingHops() }, }, { name: "control func pass", errorMessage: "", expectedError: false, expectedCalls: func() { mockNetceptor.EXPECT().Ping(gomock.Any(), gomock.Any(), gomock.Any()).Return(time.Second, "", nil) mockNetceptor.EXPECT().MaxForwardingHops() }, }, } for _, testCase := range controlFuncTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() cfr, _ := pingCommand.ControlFunc(context.Background(), mockNetceptor, mockControlFunc) err, ok := cfr["Error"] if testCase.expectedError && testCase.errorMessage != err { t.Errorf("expected: %s , received: %s", testCase.errorMessage, err) } if !testCase.expectedError && ok { t.Error(cfr["Error"]) } }) } } receptor-1.5.3/pkg/controlsvc/reload.go000066400000000000000000000115351475465740700201450ustar00rootroot00000000000000package controlsvc import ( "context" "fmt" "os" "strings" "github.com/ansible/receptor/pkg/logger" "gopkg.in/yaml.v2" ) type ( ReloadCommandType struct{} ReloadCommand struct{} ) var configPath = "" var reloadParseAndRun = func(toRun []string) error { return fmt.Errorf("no configuration file was provided, reload function not set") } var cfgNotReloadable = make(map[string]bool) var reloadableActions = []string{ "tcp-peer", "tcp-listener", "ws-peer", "ws-listener", "udp-peer", "udp-listener", "local-only", } func isReloadable(cfg string) bool { // checks if top-level keys (e.g. tcp-peer) are in the list of reloadable // actions for _, a := range reloadableActions { if strings.HasPrefix(cfg, a) { return true } } return false } func getActionKeyword(cfg string) string { // extracts top-level key from the full configuration item cfgSplit := strings.Split(cfg, ":") var action string if len(cfgSplit) == 0 { action = cfg } else { action = cfgSplit[0] } return action } func parseConfigForReload(filename string, checkReload bool) error { // cfgNotReloadable is a map, each key being the full configuration item // e.g. "work-command: worktype: echosleep command: bash params:..." // Initially all values of map are set to false, // e.g. cfgNotReloadable["work-command: worktype: echosleep..."] = false // // Upon reload, the config is reparsed and each item is checked. // if item not in cfgNotReloadable, return error // if item is in cfgNotReloadable, set it to true // e.g. cfgNotReloadable["work-command: worktype: echosleep..."] = true // // Finally, cfgAbsent() will loop through the map and check for any remaining // items that are still false. This means the original item is missing from // the config, and an error will be thrown data, err := os.ReadFile(filename) if err != nil { return err } m := make([]interface{}, 0) err = yaml.Unmarshal(data, &m) if err != nil { return err } for i := range m { cfgBytes, err := yaml.Marshal(&m[i]) if err != nil { return err } cfg := string(cfgBytes) if !isReloadable(cfg) { if checkReload { if _, ok := cfgNotReloadable[cfg]; !ok { action := getActionKeyword(cfg) return fmt.Errorf("a non-reloadable config action '%s' was modified or added. Must restart receptor for these changes to take effect", action) } cfgNotReloadable[cfg] = true } else { cfgNotReloadable[cfg] = false } } } return nil } func cfgAbsent() error { // checks to see if any item in cfgNotReloadable has a value of false, // if so, that means an unreloadable item has been removed from the config defer func() { for k := range cfgNotReloadable { cfgNotReloadable[k] = false } }() for cfg, v := range cfgNotReloadable { if !v { action := getActionKeyword(cfg) return fmt.Errorf("a non-reloadable config action '%s' was removed. Must restart receptor for changes to take effect", action) } } return nil } // InitReload initializes objects required before reload commands are issued. func InitReload(cPath string, fParseAndRun func([]string) error) error { configPath = cPath reloadParseAndRun = fParseAndRun return parseConfigForReload(configPath, false) } func checkReload() error { return parseConfigForReload(configPath, true) } func (t *ReloadCommandType) InitFromString(_ string) (ControlCommand, error) { c := &ReloadCommand{} return c, nil } func (t *ReloadCommandType) InitFromJSON(_ map[string]interface{}) (ControlCommand, error) { c := &ReloadCommand{} return c, nil } func handleError(err error, errorcode int, logger *logger.ReceptorLogger) (map[string]interface{}, error) { cfr := make(map[string]interface{}) cfr["Success"] = false cfr["Error"] = fmt.Sprintf("%s ERRORCODE %d", err.Error(), errorcode) logger.Warning("Reload not successful: %s", err.Error()) return cfr, nil } func (c *ReloadCommand) ControlFunc(_ context.Context, nc NetceptorForControlCommand, _ ControlFuncOperations) (map[string]interface{}, error) { // Reload command stops all backends, and re-runs the ParseAndRun() on the // initial config file nc.GetLogger().Debug("Reloading") // Do a quick check to catch any yaml errors before canceling backends err := reloadParseAndRun([]string{"PreReload"}) if err != nil { return handleError(err, 4, nc.GetLogger()) } // check if non-reloadable items have been added or modified err = checkReload() if err != nil { return handleError(err, 3, nc.GetLogger()) } // check if non-reloadable items have been removed err = cfgAbsent() if err != nil { return handleError(err, 3, nc.GetLogger()) } nc.CancelBackends() // reloadParseAndRun is a ParseAndRun closure, set in receptor.go/main() err = reloadParseAndRun([]string{"PreReload", "Reload"}) if err != nil { return handleError(err, 4, nc.GetLogger()) } cfr := make(map[string]interface{}) cfr["Success"] = true return cfr, nil } receptor-1.5.3/pkg/controlsvc/reload_test.go000066400000000000000000000032111475465740700211740ustar00rootroot00000000000000package controlsvc import ( "testing" ) func TestReload(t *testing.T) { type yamltest struct { filename string modifyError bool absentError bool } scenarios := []yamltest{ {filename: "reload_test_yml/init.yml", modifyError: false, absentError: false}, {filename: "reload_test_yml/add_cfg.yml", modifyError: true, absentError: false}, {filename: "reload_test_yml/drop_cfg.yml", modifyError: false, absentError: true}, {filename: "reload_test_yml/modify_cfg.yml", modifyError: true, absentError: true}, {filename: "reload_test_yml/syntax_error.yml", modifyError: true, absentError: true}, {filename: "reload_test_yml/successful_reload.yml", modifyError: false, absentError: false}, } err := parseConfigForReload("reload_test_yml/init.yml", false) if err != nil { t.Errorf("parseConfigForReload %s: Unexpected err: %v", "init.yml", err) } if len(cfgNotReloadable) != 5 { t.Errorf("cfNotReloadable length expected %d, got %d", 5, len(cfgNotReloadable)) } for _, s := range scenarios { t.Logf("%s", s.filename) err = parseConfigForReload(s.filename, true) if s.modifyError { if err == nil { t.Errorf("parseConfigForReload %s %s: Expected err, got %v", s.filename, "modifyError", err) } } else { if err != nil { t.Errorf("parseConfigForReload %s %s: Unexpected err: %v", s.filename, "modifyError", err) } } err = cfgAbsent() if s.absentError { if err == nil { t.Errorf("parseConfigForReload %s %s: Expected err, got %v", s.filename, "absentError", err) } } else { if err != nil { t.Errorf("parseConfigForReload %s %s: Unexpected err: %v", s.filename, "absentError", err) } } } } receptor-1.5.3/pkg/controlsvc/reload_test_yml/000077500000000000000000000000001475465740700215315ustar00rootroot00000000000000receptor-1.5.3/pkg/controlsvc/reload_test_yml/add_cfg.yml000066400000000000000000000005701475465740700236250ustar00rootroot00000000000000--- - node: id: foo allowedpeers: bar - log-level: Info - trace - tcp-peer: address: localhost:8001 - control-service: service: control filename: /tmp/foo.sock - work-command: workType: hello command: bash params: "-c \"echo hello\"" - work-command: # not reloadable workType: hello2 command: bash params: "-c \"echo hello2\"" receptor-1.5.3/pkg/controlsvc/reload_test_yml/drop_cfg.yml000066400000000000000000000004261475465740700240410ustar00rootroot00000000000000--- - node: id: foo allowedpeers: bar - log-level: Info - trace - tcp-peer: address: localhost:8001 - control-service: service: control filename: /tmp/foo.sock # - work-command: # workType: hello # command: bash # params: "-c \"echo hello\"" receptor-1.5.3/pkg/controlsvc/reload_test_yml/init.yml000066400000000000000000000004161475465740700232200ustar00rootroot00000000000000--- - node: id: foo allowedpeers: bar - log-level: Info - trace - tcp-peer: address: localhost:8001 - control-service: service: control filename: /tmp/foo.sock - work-command: workType: hello command: bash params: "-c \"echo hello\"" receptor-1.5.3/pkg/controlsvc/reload_test_yml/modify_cfg.yml000066400000000000000000000004471475465740700243670ustar00rootroot00000000000000--- - node: id: foo allowedpeers: bar - log-level: Info - trace - tcp-peer: address: localhost:8001 - control-service: service: control filename: /tmp/foo.sock - work-command: workType: hello2 # changed workType name command: bash params: "-c \"echo hello\"" receptor-1.5.3/pkg/controlsvc/reload_test_yml/successful_reload.yml000066400000000000000000000005071475465740700257630ustar00rootroot00000000000000--- - node: id: foo allowedpeers: bar - log-level: Info - trace - tcp-peer: address: localhost:8001 - tcp-peer: address: localhost:8002 # is reloadable - control-service: service: control filename: /tmp/foo.sock - work-command: workType: hello command: bash params: "-c \"echo hello\"" receptor-1.5.3/pkg/controlsvc/reload_test_yml/syntax_error.yml000066400000000000000000000003261475465740700250140ustar00rootroot00000000000000--- 1234syntaxerror # syntax error - node: id: foo allowedpeers: bar - log-level: Info - trace - tcp-peer: address: localhost:8001 - control-service: service: control filename: /tmp/foo.sock receptor-1.5.3/pkg/controlsvc/status.go000066400000000000000000000043741475465740700202250ustar00rootroot00000000000000package controlsvc import ( "context" "fmt" "github.com/ansible/receptor/internal/version" "github.com/ansible/receptor/pkg/utils" ) type ( StatusCommandType struct{} StatusCommand struct { requestedFields []string } ) func (t *StatusCommandType) InitFromString(params string) (ControlCommand, error) { if params != "" { return nil, fmt.Errorf("status command does not take parameters") } c := &StatusCommand{} return c, nil } func (t *StatusCommandType) InitFromJSON(config map[string]interface{}) (ControlCommand, error) { requestedFields, ok := config["requested_fields"] var requestedFieldsStr []string if ok { requestedFieldsStr = make([]string, 0) for _, v := range requestedFields.([]interface{}) { vStr, ok := v.(string) if !ok { return nil, fmt.Errorf("each element of requested_fields must be a string") } requestedFieldsStr = append(requestedFieldsStr, vStr) } } else { requestedFieldsStr = nil } c := &StatusCommand{ requestedFields: requestedFieldsStr, } return c, nil } func (c *StatusCommand) ControlFunc(_ context.Context, nc NetceptorForControlCommand, _ ControlFuncOperations) (map[string]interface{}, error) { status := nc.Status() statusGetters := make(map[string]func() interface{}) statusGetters["Version"] = func() interface{} { return version.Version } statusGetters["SystemCPUCount"] = func() interface{} { return utils.GetSysCPUCount() } statusGetters["SystemMemoryMiB"] = func() interface{} { return utils.GetSysMemoryMiB() } statusGetters["NodeID"] = func() interface{} { return status.NodeID } statusGetters["Connections"] = func() interface{} { return status.Connections } statusGetters["RoutingTable"] = func() interface{} { return status.RoutingTable } statusGetters["Advertisements"] = func() interface{} { return status.Advertisements } statusGetters["KnownConnectionCosts"] = func() interface{} { return status.KnownConnectionCosts } cfr := make(map[string]interface{}) if c.requestedFields == nil { // if nil, fill it with the keys in statusGetters for field := range statusGetters { c.requestedFields = append(c.requestedFields, field) } } for _, field := range c.requestedFields { getter, ok := statusGetters[field] if ok { cfr[field] = getter() } } return cfr, nil } receptor-1.5.3/pkg/controlsvc/status_test.go000066400000000000000000000057251475465740700212650ustar00rootroot00000000000000package controlsvc_test import ( "context" "testing" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/controlsvc/mock_controlsvc" "github.com/ansible/receptor/pkg/netceptor" "go.uber.org/mock/gomock" ) func TestStatusInitFromString(t *testing.T) { statusCommandType := controlsvc.StatusCommandType{} initFromStringTestCases := []struct { name string expectedError bool errorMessage string input string }{ { name: "status command does not take parameters", expectedError: true, errorMessage: "status command does not take parameters", input: "one", }, { name: "pass without params", expectedError: false, errorMessage: "", input: "", }, } for _, testCase := range initFromStringTestCases { t.Run(testCase.name, func(t *testing.T) { _, err := statusCommandType.InitFromString(testCase.input) CheckExpectedError(testCase.expectedError, testCase.errorMessage, t, err) }) } } func TestStatusInitFromJSON(t *testing.T) { statusCommandType := controlsvc.StatusCommandType{} initFromJSONTestCases := []struct { name string expectedError bool errorMessage string input map[string]interface{} }{ { name: "each element of requested_fields must be a string", expectedError: true, errorMessage: "each element of requested_fields must be a string", input: map[string]interface{}{ "requested_fields": []interface{}{ 0: 7, }, }, }, { name: "pass with no requested fields", expectedError: false, errorMessage: "", input: map[string]interface{}{}, }, { name: "pass with requested fields", expectedError: false, errorMessage: "", input: map[string]interface{}{ "requested_fields": []interface{}{ 0: "request", }, }, }, } for _, testCase := range initFromJSONTestCases { t.Run(testCase.name, func(t *testing.T) { _, err := statusCommandType.InitFromJSON(testCase.input) CheckExpectedError(testCase.expectedError, testCase.errorMessage, t, err) }) } } func TestStatusControlFunc(t *testing.T) { statusCommand := controlsvc.StatusCommand{} ctrl := gomock.NewController(t) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) mockControlFunc := mock_controlsvc.NewMockControlFuncOperations(ctrl) controlFuncTestCases := []struct { name string expectedError bool errorMessage string expectedCalls func() }{ { name: "control func pass", errorMessage: "", expectedError: false, expectedCalls: func() { mockNetceptor.EXPECT().Status().Return(netceptor.Status{}) }, }, } for _, testCase := range controlFuncTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() _, err := statusCommand.ControlFunc(context.Background(), mockNetceptor, mockControlFunc) if !testCase.expectedError && err != nil { t.Error(err) } }) } } receptor-1.5.3/pkg/controlsvc/traceroute.go000066400000000000000000000024311475465740700210470ustar00rootroot00000000000000package controlsvc import ( "context" "fmt" "strconv" ) type ( TracerouteCommandType struct{} TracerouteCommand struct { target string } ) func (t *TracerouteCommandType) InitFromString(params string) (ControlCommand, error) { if params == "" { return nil, fmt.Errorf("no traceroute target") } c := &TracerouteCommand{ target: params, } return c, nil } func (t *TracerouteCommandType) InitFromJSON(config map[string]interface{}) (ControlCommand, error) { target, ok := config["target"] if !ok { return nil, fmt.Errorf("no traceroute target") } targetStr, ok := target.(string) if !ok { return nil, fmt.Errorf("traceroute target must be string") } c := &TracerouteCommand{ target: targetStr, } return c, nil } func (c *TracerouteCommand) ControlFunc(ctx context.Context, nc NetceptorForControlCommand, _ ControlFuncOperations) (map[string]interface{}, error) { cfr := make(map[string]interface{}) results := nc.Traceroute(ctx, c.target) i := 0 for res := range results { thisResult := make(map[string]interface{}) thisResult["From"] = res.From thisResult["Time"] = res.Time thisResult["TimeStr"] = fmt.Sprint(res.Time) if res.Err != nil { thisResult["Error"] = res.Err.Error() } cfr[strconv.Itoa(i)] = thisResult i++ } return cfr, nil } receptor-1.5.3/pkg/controlsvc/traceroute_test.go000066400000000000000000000065051475465740700221140ustar00rootroot00000000000000package controlsvc_test import ( "context" "errors" "testing" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/controlsvc/mock_controlsvc" "github.com/ansible/receptor/pkg/netceptor" "go.uber.org/mock/gomock" ) func TestTracerouteInitFromString(t *testing.T) { tracerouteCommandType := controlsvc.TracerouteCommandType{} initFromStringTestCases := []struct { name string expectedError bool errorMessage string input string }{ { name: "no traceroute target 1", expectedError: true, errorMessage: "no traceroute target", input: "", }, { name: "pass with params", expectedError: false, errorMessage: "", input: "one", }, } for _, testCase := range initFromStringTestCases { t.Run(testCase.name, func(t *testing.T) { _, err := tracerouteCommandType.InitFromString(testCase.input) CheckExpectedError(testCase.expectedError, testCase.errorMessage, t, err) }) } } func TestTracerouteInitFromJSON(t *testing.T) { tracerouteCommandType := controlsvc.TracerouteCommandType{} initFromJSONTestCases := []InitFromJSONTestCase{ BuildInitFromJSONTestCases("no traceroute target 2", true, "no traceroute target", map[string]interface{}{}), BuildInitFromJSONTestCases("traceroute target must be string", true, "traceroute target must be string", map[string]interface{}{"target": 7}), BuildInitFromJSONTestCases("pass with target", false, "", map[string]interface{}{"target": "some target"}), } for _, testCase := range initFromJSONTestCases { t.Run(testCase.name, func(t *testing.T) { _, err := tracerouteCommandType.InitFromJSON(testCase.input) CheckExpectedError(testCase.expectedError, testCase.errorMessage, t, err) }) } } func TestTracerouteControlFunc(t *testing.T) { tracerouteCommand := controlsvc.TracerouteCommand{} ctrl := gomock.NewController(t) mockNetceptor := mock_controlsvc.NewMockNetceptorForControlsvc(ctrl) mockControlFunc := mock_controlsvc.NewMockControlFuncOperations(ctrl) controlFuncTestCases := []struct { name string expectedError bool errorMessage string expectedCalls func() }{ { name: "control func pass with result error", errorMessage: "terminated", expectedError: false, expectedCalls: func() { c := make(chan *netceptor.TracerouteResult) go func() { c <- &netceptor.TracerouteResult{ Err: errors.New("terminated"), } close(c) }() mockNetceptor.EXPECT().Traceroute(gomock.Any(), gomock.Any()).Return(c) }, }, { name: "control func pass", errorMessage: "", expectedError: false, expectedCalls: func() { c := make(chan *netceptor.TracerouteResult) go func() { c <- &netceptor.TracerouteResult{} close(c) }() mockNetceptor.EXPECT().Traceroute(gomock.Any(), gomock.Any()).Return(c) }, }, } for _, testCase := range controlFuncTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() cfr, _ := tracerouteCommand.ControlFunc(context.Background(), mockNetceptor, mockControlFunc) err, ok := cfr["Error"] if testCase.expectedError && testCase.errorMessage != err { t.Errorf("expected: %s , received: %s", testCase.errorMessage, err) } if !testCase.expectedError && ok { t.Error(cfr["Error"]) } }) } } receptor-1.5.3/pkg/framer/000077500000000000000000000000001475465740700154235ustar00rootroot00000000000000receptor-1.5.3/pkg/framer/framer.go000066400000000000000000000033241475465740700172300ustar00rootroot00000000000000package framer import ( "encoding/binary" "fmt" "sync" ) // Framer provides framing of discrete data entities over a stream connection. type Framer interface { SendData(data []byte) []byte RecvData(buf []byte) MessageReady() bool GetMessage() ([]byte, error) } type framer struct { bufLock *sync.RWMutex buffer []byte } // New returns a new framer instance. func New() Framer { f := &framer{ bufLock: &sync.RWMutex{}, buffer: make([]byte, 0), } return f } // SendData takes a data buffer and returns a framed buffer. func (f *framer) SendData(data []byte) []byte { buf := make([]byte, len(data)+2) binary.LittleEndian.PutUint16(buf[0:2], uint16(len(data))) //nolint:gosec copy(buf[2:], data) return buf } // RecvData adds more data to the buffer from the network. func (f *framer) RecvData(buf []byte) { f.bufLock.Lock() defer f.bufLock.Unlock() f.buffer = append(f.buffer, buf...) } // Caller must already hold at least a read lock on f.bufLock. func (f *framer) messageReady() (int, bool) { if len(f.buffer) < 2 { return 0, false } msgSize := int(binary.LittleEndian.Uint16(f.buffer[:2])) return msgSize, len(f.buffer) >= msgSize+2 } // MessageReady returns true if a full framed message is available to read. func (f *framer) MessageReady() bool { f.bufLock.RLock() defer f.bufLock.RUnlock() _, ready := f.messageReady() return ready } // GetMessage returns a single framed message, or an error if one is not available. func (f *framer) GetMessage() ([]byte, error) { f.bufLock.Lock() defer f.bufLock.Unlock() msgSize, ready := f.messageReady() if !ready { return nil, fmt.Errorf("message not ready") } data := f.buffer[2 : msgSize+2] f.buffer = f.buffer[msgSize+2:] return data, nil } receptor-1.5.3/pkg/framer/framer_test.go000066400000000000000000000053221475465740700202670ustar00rootroot00000000000000package framer_test import ( "bytes" "testing" "github.com/ansible/receptor/pkg/framer" ) func TestSendData(t *testing.T) { f := framer.New() smallBuffer := []byte{1, 2, 3, 4, 5, 6} largeBuffer := []byte{} for i := 1; i <= 271; i++ { largeBuffer = append(largeBuffer, byte(i)) } framedBufferTestCases := []struct { name string inputBuffer []byte expectedBuffer []byte }{ { name: "small buffer", inputBuffer: smallBuffer, expectedBuffer: append([]byte{6, 0}, smallBuffer...), }, { name: "large buffer", inputBuffer: largeBuffer, expectedBuffer: append([]byte{15, 1}, largeBuffer...), }, } for _, testCase := range framedBufferTestCases { t.Run(testCase.name, func(t *testing.T) { result := f.SendData(testCase.inputBuffer) if !bytes.Equal(testCase.expectedBuffer, result) { t.Errorf("%s - expected: %+v, received: %+v", testCase.name, testCase.expectedBuffer, result) } }) } } func TestMessageReady(t *testing.T) { buffer := []byte{1, 2, 3} messageReadyTestCases := []struct { name string inputBuffer []byte expectedResult bool }{ { name: "message ready", inputBuffer: append([]byte{3, 0}, buffer...), expectedResult: true, }, { name: "message not ready", inputBuffer: buffer, expectedResult: false, }, } for _, testCase := range messageReadyTestCases { t.Run(testCase.name, func(t *testing.T) { f := framer.New() f.RecvData(testCase.inputBuffer) receivedResult := f.MessageReady() if testCase.expectedResult != receivedResult { t.Errorf("%s - expected: %+v, received: %+v", testCase.name, testCase.expectedResult, receivedResult) } }) } } func TestGetMessage(t *testing.T) { buffer := []byte{1, 2, 3} framedBuffer := append([]byte{3, 0}, buffer...) getMessageTestCases := []struct { name string inputBuffer []byte expectedError bool }{ { name: "message read", inputBuffer: framedBuffer, expectedError: false, }, { name: "message not read with sending non framed buffer", inputBuffer: buffer, expectedError: true, }, { name: "message not read with sending insufficient buffer length", inputBuffer: []byte{1}, expectedError: true, }, } for _, testCase := range getMessageTestCases { t.Run(testCase.name, func(t *testing.T) { f := framer.New() f.RecvData(testCase.inputBuffer) receivedResult, err := f.GetMessage() if testCase.expectedError && err == nil { t.Error(testCase.name) } if !testCase.expectedError && !bytes.Equal(buffer, receivedResult) { t.Errorf("expected: %+v, received: %+v", buffer, receivedResult) } }) } } receptor-1.5.3/pkg/framer/mock_framer/000077500000000000000000000000001475465740700177105ustar00rootroot00000000000000receptor-1.5.3/pkg/framer/mock_framer/framer.go000066400000000000000000000052751475465740700215240ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/framer/framer.go // // Generated by this command: // // mockgen -source=pkg/framer/framer.go -destination=pkg/framer/mock_framer/framer.go // // Package mock_framer is a generated GoMock package. package mock_framer import ( reflect "reflect" gomock "go.uber.org/mock/gomock" ) // MockFramer is a mock of Framer interface. type MockFramer struct { ctrl *gomock.Controller recorder *MockFramerMockRecorder isgomock struct{} } // MockFramerMockRecorder is the mock recorder for MockFramer. type MockFramerMockRecorder struct { mock *MockFramer } // NewMockFramer creates a new mock instance. func NewMockFramer(ctrl *gomock.Controller) *MockFramer { mock := &MockFramer{ctrl: ctrl} mock.recorder = &MockFramerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockFramer) EXPECT() *MockFramerMockRecorder { return m.recorder } // GetMessage mocks base method. func (m *MockFramer) GetMessage() ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetMessage") ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMessage indicates an expected call of GetMessage. func (mr *MockFramerMockRecorder) GetMessage() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMessage", reflect.TypeOf((*MockFramer)(nil).GetMessage)) } // MessageReady mocks base method. func (m *MockFramer) MessageReady() bool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MessageReady") ret0, _ := ret[0].(bool) return ret0 } // MessageReady indicates an expected call of MessageReady. func (mr *MockFramerMockRecorder) MessageReady() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MessageReady", reflect.TypeOf((*MockFramer)(nil).MessageReady)) } // RecvData mocks base method. func (m *MockFramer) RecvData(buf []byte) { m.ctrl.T.Helper() m.ctrl.Call(m, "RecvData", buf) } // RecvData indicates an expected call of RecvData. func (mr *MockFramerMockRecorder) RecvData(buf any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvData", reflect.TypeOf((*MockFramer)(nil).RecvData), buf) } // SendData mocks base method. func (m *MockFramer) SendData(data []byte) []byte { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SendData", data) ret0, _ := ret[0].([]byte) return ret0 } // SendData indicates an expected call of SendData. func (mr *MockFramerMockRecorder) SendData(data any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendData", reflect.TypeOf((*MockFramer)(nil).SendData), data) } receptor-1.5.3/pkg/logger/000077500000000000000000000000001475465740700154265ustar00rootroot00000000000000receptor-1.5.3/pkg/logger/logger.go000066400000000000000000000200121475465740700172270ustar00rootroot00000000000000package logger import ( "fmt" "io" "log" "os" "strings" "sync" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) var ( logLevel int showTrace bool ) // Log level constants. const ( ErrorLevel = iota + 1 WarningLevel InfoLevel DebugLevel ) // QuietMode turns off all log output. func SetGlobalQuietMode() { logLevel = 0 } // SetLogLevel is a helper function for setting logLevel int. func SetGlobalLogLevel(level int) { logLevel = level } // GetLogLevelByName is a helper function for returning level associated with log // level string. func GetLogLevelByName(logName string) (int, error) { var err error if val, hasKey := logLevelMap[strings.ToLower(logName)]; hasKey { return val, nil } err = fmt.Errorf("%s is not a valid log level name", logName) return 0, err } // This doesn't seem to be used anywhere. // GetLogLevel returns current log level. func GetLogLevel() int { return logLevel } // LogLevelToName takes an int and returns the corresponding log level name. func LogLevelToName(logLevel int) (string, error) { var err error for k, v := range logLevelMap { if v == logLevel { return k, nil } } err = fmt.Errorf("%d is not a valid log level", logLevel) return "", err } // logLevelMap maps strings to log level int // allows for --LogLevel Debug at command line. var logLevelMap = map[string]int{ "error": ErrorLevel, "warning": WarningLevel, "info": InfoLevel, "debug": DebugLevel, } type MessageFunc func(level int, format string, v ...interface{}) var logger MessageFunc // RegisterLogger registers a function for log delivery. func RegisterLogger(msgFunc MessageFunc) { logger = msgFunc } type ReceptorLogger struct { log.Logger Prefix string m sync.Mutex } // NewReceptorLogger to instantiate a new logger object. func NewReceptorLogger(prefix string) *ReceptorLogger { return &ReceptorLogger{ Logger: *log.New(os.Stdout, prefix, log.LstdFlags), Prefix: prefix, } } // SetOutput sets the output destination for the logger. func (rl *ReceptorLogger) SetOutput(w io.Writer) { rl.Logger.SetOutput(w) } // SetShowTrace is a helper function for setting showTrace bool. func (rl *ReceptorLogger) SetShowTrace(trace bool) { showTrace = trace } // GetLogLevel returns the log level. func (rl *ReceptorLogger) GetLogLevel() int { return logLevel } // Error reports unexpected behavior, likely to result in termination. func (rl *ReceptorLogger) Error(format string, v ...interface{}) { rl.Log(ErrorLevel, format, v...) } // SanError reports unexpected behavior, likely to result in termination. func (rl *ReceptorLogger) SanitizedError(format string, v ...interface{}) { rl.SanitizedLog(ErrorLevel, format, v...) } // Warning reports unexpected behavior, not necessarily resulting in termination. func (rl *ReceptorLogger) Warning(format string, v ...interface{}) { rl.Log(WarningLevel, format, v...) } // SanitizedWarning reports unexpected behavior, not necessarily resulting in termination. func (rl *ReceptorLogger) SanitizedWarning(format string, v ...interface{}) { rl.SanitizedLog(WarningLevel, format, v...) } // Info provides general purpose statements useful to end user. func (rl *ReceptorLogger) Info(format string, v ...interface{}) { rl.Log(InfoLevel, format, v...) } // SanitizedInfo provides general purpose statements useful to end user. func (rl *ReceptorLogger) SanitizedInfo(format string, v ...interface{}) { rl.SanitizedLog(InfoLevel, format, v...) } // Debug contains extra information helpful to developers. func (rl *ReceptorLogger) Debug(format string, v ...interface{}) { rl.Log(DebugLevel, format, v...) } // Debug payload data. func (rl *ReceptorLogger) DebugPayload(payloadDebug int, payload string, workUnitID string, connectionType string) { var payloadMessage string var workunitIDMessage string var connectionTypeMessage string switch payloadDebug { case 3: payloadMessage = fmt.Sprintf(" with a payload of: %s", payload) fallthrough case 2: if workUnitID != "" { workunitIDMessage = fmt.Sprintf(" with work unit %s", workUnitID) } else { workunitIDMessage = ", work unit not created yet" } fallthrough case 1: if connectionType != "" { connectionTypeMessage = fmt.Sprintf("Reading from %s", connectionType) } default: } rl.Debug(fmt.Sprintf("PACKET TRACING ENABLED: %s%s%s", connectionTypeMessage, workunitIDMessage, payloadMessage)) //nolint:govet } // SanitizedDebug contains extra information helpful to developers. func (rl *ReceptorLogger) SanitizedDebug(format string, v ...interface{}) { rl.SanitizedLog(DebugLevel, format, v...) } // Trace outputs detailed packet traversal. func (rl *ReceptorLogger) Trace(format string, v ...interface{}) { if showTrace { rl.SetPrefix("TRACE") rl.Log(logLevel, format, v...) } } // SanitizedTrace outputs detailed packet traversal. func (rl *ReceptorLogger) SanitizedTrace(format string, v ...interface{}) { if showTrace { rl.SetPrefix("TRACE") rl.SanitizedLog(logLevel, format, v...) } } // Log adds a prefix and prints a given log message. func (rl *ReceptorLogger) Log(level int, format string, v ...interface{}) { if logger != nil { logger(level, format, v...) return } var prefix string logLevelName, err := LogLevelToName(level) if err != nil { rl.Error("Log entry received with invalid level: %s\n", fmt.Sprintf(format, v...)) return } if rl.GetPrefix() != "" { prefix = rl.GetPrefix() + " " + strings.ToUpper(logLevelName) + " " } else { prefix = strings.ToUpper(logLevelName) + " " } if logLevel >= level { rl.Logger.SetPrefix(prefix) rl.Logger.Printf(format, v...) } } // SanitizedLog adds a prefix and prints a given log message. func (rl *ReceptorLogger) SanitizedLog(level int, format string, v ...interface{}) { if logger != nil { logger(level, format, v...) return } var prefix string logLevelName, err := LogLevelToName(level) if err != nil { message := fmt.Sprintf(format, v...) sanMessage := strings.ReplaceAll(message, "\n", "") rl.Error("Log entry received with invalid level: %s\n", sanMessage) return } if rl.GetPrefix() != "" { prefix = rl.GetPrefix() + " " + strings.ToUpper(logLevelName) + " " } else { prefix = strings.ToUpper(logLevelName) + " " } if logLevel >= level { message := fmt.Sprintf(format, v...) sanMessage := strings.ReplaceAll(message, "\n", "") rl.Logger.SetPrefix(prefix) rl.Logger.Print(sanMessage) } } func (rl *ReceptorLogger) SetPrefix(prefix string) { rl.m.Lock() defer rl.m.Unlock() rl.Prefix = prefix } func (rl *ReceptorLogger) GetPrefix() string { rl.m.Lock() defer rl.m.Unlock() return rl.Prefix } // GetLogLevelByName is a helper function for returning level associated with log // level string. func (rl *ReceptorLogger) GetLogLevelByName(logName string) (int, error) { var err error if val, hasKey := logLevelMap[strings.ToLower(logName)]; hasKey { return val, nil } err = fmt.Errorf("%s is not a valid log level name", logName) return 0, err } // LogLevelToName takes an int and returns the corresponding log level name. func (rl *ReceptorLogger) LogLevelToName(logLevel int) (string, error) { var err error for k, v := range logLevelMap { if v == logLevel { return k, nil } } err = fmt.Errorf("%d is not a valid log level", logLevel) return "", err } type LoglevelCfg struct { Level string `description:"Log level: Error, Warning, Info or Debug" barevalue:"yes" default:"error"` } func (cfg LoglevelCfg) Init() error { var err error val, err := GetLogLevelByName(cfg.Level) if err != nil { return err } SetGlobalLogLevel(val) return nil } type TraceCfg struct{} func (cfg TraceCfg) Prepare() error { return nil } func init() { version := viper.GetInt("version") if version > 1 { return } logLevel = InfoLevel showTrace = false log.SetOutput(os.Stdout) log.SetFlags(log.Ldate | log.Ltime) cmdline.RegisterConfigTypeForApp("receptor-logging", "log-level", "Specifies the verbosity level for command output", LoglevelCfg{}, cmdline.Singleton) cmdline.RegisterConfigTypeForApp("receptor-logging", "trace", "Enables packet tracing output", TraceCfg{}, cmdline.Singleton) } receptor-1.5.3/pkg/logger/logger_test.go000066400000000000000000000107351475465740700203010ustar00rootroot00000000000000package logger_test import ( "bytes" "fmt" "os" "testing" "github.com/ansible/receptor/pkg/logger" ) func TestGetLogLevelByName(t *testing.T) { receptorLogger := logger.NewReceptorLogger("") testCases := []struct { name string error bool }{ {name: "error"}, {name: "warning"}, {name: "info"}, {name: "debug"}, } for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { _, err := receptorLogger.GetLogLevelByName(testCase.name) if err != nil { t.Error(err) } }) } } func TestGetLogLevelByNameWithError(t *testing.T) { receptorLogger := logger.NewReceptorLogger("") _, err := receptorLogger.GetLogLevelByName("does not exist") if err == nil { t.Error("should have error") } } func TestLogLevelToName(t *testing.T) { receptorLogger := logger.NewReceptorLogger("") testCases := []struct { level int }{ {level: 1}, {level: 2}, {level: 3}, {level: 4}, } for _, testCase := range testCases { name := fmt.Sprintf("level: %d", testCase.level) t.Run(name, func(t *testing.T) { _, err := receptorLogger.LogLevelToName(testCase.level) if err != nil { t.Error(err) } }) } } func TestLogLevelToNameWithError(t *testing.T) { receptorLogger := logger.NewReceptorLogger("") _, err := receptorLogger.LogLevelToName(123) if err == nil { t.Error("should have error") } } func TestDebugPayload(t *testing.T) { logFilePath := "/tmp/test-output" logger.SetGlobalLogLevel(4) receptorLogger := logger.NewReceptorLogger("testDebugPayload") logFile, err := os.OpenFile(logFilePath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o600) if err != nil { t.Error("error creating test-output file") } payload := "Testing debugPayload" workUnitID := "1234" connectionType := "unix socket" debugPayloadTestCases := []struct { name string debugPayload int payload string workUnitID string connectionType string expectedLog string }{ {name: "debugPayload no log", debugPayload: 0, payload: "", workUnitID: "", connectionType: "", expectedLog: ""}, {name: "debugPayload log level 1", debugPayload: 1, payload: "", workUnitID: "", connectionType: connectionType, expectedLog: fmt.Sprintf("PACKET TRACING ENABLED: Reading from %v", connectionType)}, {name: "debugPayload log level 2 with workUnitID", debugPayload: 2, payload: "", workUnitID: workUnitID, connectionType: connectionType, expectedLog: fmt.Sprintf("PACKET TRACING ENABLED: Reading from %v with work unit %v", connectionType, workUnitID)}, {name: "debugPayload log level 2 without workUnitID", debugPayload: 2, payload: "", workUnitID: "", connectionType: connectionType, expectedLog: fmt.Sprintf("PACKET TRACING ENABLED: Reading from %v", connectionType)}, {name: "debugPayload log level 3 with workUnitID", debugPayload: 3, payload: payload, workUnitID: workUnitID, connectionType: connectionType, expectedLog: fmt.Sprintf("PACKET TRACING ENABLED: Reading from %v with work unit %v with a payload of: %v", connectionType, workUnitID, payload)}, {name: "debugPayload log level 3 without workUnitID", debugPayload: 3, payload: payload, workUnitID: "", connectionType: connectionType, expectedLog: fmt.Sprintf("PACKET TRACING ENABLED: Reading from %v, work unit not created yet with a payload of: %v", connectionType, payload)}, {name: "debugPayload log level 3 without workUnitID and payload is new line", debugPayload: 3, payload: "\n", workUnitID: "", connectionType: connectionType, expectedLog: fmt.Sprintf("PACKET TRACING ENABLED: Reading from %v, work unit not created yet with a payload of: %v", connectionType, "\n")}, {name: "debugPayload log level 3 without workUnitID or payload", debugPayload: 3, payload: "", workUnitID: "", connectionType: connectionType, expectedLog: fmt.Sprintf("PACKET TRACING ENABLED: Reading from %v, work unit not created yet with a payload of: %v", connectionType, "")}, } for _, testCase := range debugPayloadTestCases { t.Run(testCase.name, func(t *testing.T) { receptorLogger.SetOutput(logFile) receptorLogger.DebugPayload(testCase.debugPayload, testCase.payload, testCase.workUnitID, testCase.connectionType) testOutput, err := os.ReadFile(logFilePath) if err != nil { t.Error("error reading test-output file") } if !bytes.Contains(testOutput, []byte(testCase.expectedLog)) { t.Errorf("failed to log correctly, expected: %v got %v", testCase.expectedLog, string(testOutput)) } if err := os.Truncate(logFilePath, 0); err != nil { t.Errorf("failed to truncate: %v", err) } }) } } receptor-1.5.3/pkg/netceptor/000077500000000000000000000000001475465740700161525ustar00rootroot00000000000000receptor-1.5.3/pkg/netceptor/addr.go000066400000000000000000000012711475465740700174140ustar00rootroot00000000000000package netceptor import "fmt" // Addr represents an endpoint address on the Netceptor network. type Addr struct { network string node string service string } // Network returns the network name. func (a Addr) Network() string { return a.network } // String formats this address as a string. func (a Addr) String() string { return fmt.Sprintf("%s:%s", a.node, a.service) } // SetNetwork sets the network variable. func (a *Addr) SetNetwork(network string) { a.network = network } // SetNetwork sets the node variable. func (a *Addr) SetNode(node string) { a.node = node } // SetNetwork sets the service variable. func (a *Addr) SetService(service string) { a.service = service } receptor-1.5.3/pkg/netceptor/addr_test.go000066400000000000000000000015451475465740700204570ustar00rootroot00000000000000package netceptor_test import ( "testing" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/netceptor/mock_netceptor" "go.uber.org/mock/gomock" ) func TestNetwork(t *testing.T) { networkResult := "netceptor-testNode1" strResult := "testNode2:testService" ctrl := gomock.NewController(t) mockNetceptor := mock_netceptor.NewMockNetcForPing(ctrl) mockNetceptor.EXPECT().NewAddr(gomock.Any(), gomock.Any()).Return(netceptor.Addr{}) addr := mockNetceptor.NewAddr("testNode2", "testService") addr.SetNetwork(networkResult) addr.SetNode("testNode2") addr.SetService("testService") network := addr.Network() str := addr.String() if network != networkResult { t.Errorf("Expected network to be %v, got %v", networkResult, network) } if str != strResult { t.Errorf("Expected network to be %v, got %v", strResult, str) } } receptor-1.5.3/pkg/netceptor/conn.go000066400000000000000000000340331475465740700174410ustar00rootroot00000000000000package netceptor import ( "context" "crypto/rand" "crypto/rsa" "crypto/tls" "crypto/x509" "crypto/x509/pkix" "encoding/pem" "fmt" "math/big" "net" "os" "strings" "sync" "time" "github.com/quic-go/quic-go" "github.com/quic-go/quic-go/logging" "github.com/quic-go/quic-go/qlog" ) // MaxIdleTimeoutForQuicConnections for quic connections. The default is 30 which we have replicated here. // This value is set on both Dial and Listen connections as the quic library would take the smallest of either connection. var MaxIdleTimeoutForQuicConnections = 30 * time.Second // KeepAliveForQuicConnections is variablized to enable testing of the timeout. // If you are doing a heartbeat your connection wont timeout without severing the connection i.e. firewall. // Having this variablized allows the tests to set KeepAliveForQuicConnections = False so that things will properly fail. var KeepAliveForQuicConnections = true type acceptResult struct { conn net.Conn err error } // Listener implements the net.Listener interface via the Receptor network. type Listener struct { s *Netceptor pc PacketConner ql *quic.Listener acceptChan chan *acceptResult doneChan chan struct{} doneOnce *sync.Once } // Internal implementation of Listen and ListenAndAdvertise. func (s *Netceptor) listen(ctx context.Context, service string, tlscfg *tls.Config, advertise bool, adTags map[string]string) (*Listener, error) { if len(service) > 8 { return nil, fmt.Errorf("service name %s too long", service) } if service == "" { service = s.GetEphemeralService() } s.listenerLock.Lock() defer s.listenerLock.Unlock() _, isReserved := s.reservedServices[service] _, isListening := s.listenerRegistry[service] if isReserved || isListening { return nil, fmt.Errorf("service %s is already listening", service) } _ = s.AddNameHash(service) var connType byte if tlscfg == nil { connType = ConnTypeStream tlscfg = generateServerTLSConfig() } else { connType = ConnTypeStreamTLS tlscfg = tlscfg.Clone() tlscfg.NextProtos = []string{"netceptor"} if tlscfg.ClientAuth == tls.RequireAndVerifyClientCert { tlscfg.GetConfigForClient = func(hi *tls.ClientHelloInfo) (*tls.Config, error) { clientTLSCfg := tlscfg.Clone() remoteNode := strings.Split(hi.Conn.RemoteAddr().String(), ":")[0] clientTLSCfg.VerifyPeerCertificate = ReceptorVerifyFunc(tlscfg, [][]byte{}, remoteNode, ExpectedHostnameTypeReceptor, VerifyClient, s.Logger) return clientTLSCfg, nil } } } pc := &PacketConn{ s: s, localService: service, recvChan: make(chan *MessageData), advertise: advertise, adTags: adTags, connType: connType, hopsToLive: s.maxForwardingHops, } pc.StartUnreachable() s.Logger.Debug("%s added service %s to listener registry", s.nodeID, service) s.listenerRegistry[service] = pc cfg := &quic.Config{ Tracer: s.tracer, HandshakeIdleTimeout: 15 * time.Second, MaxIdleTimeout: MaxIdleTimeoutForQuicConnections, Allow0RTT: true, DisablePathMTUDiscovery: false, } statelessResetKey := make([]byte, 32) rand.Read(statelessResetKey) tr := quic.Transport{ Conn: pc, StatelessResetKey: (*quic.StatelessResetKey)(statelessResetKey), } _ = os.Setenv("QUIC_GO_DISABLE_RECEIVE_BUFFER_WARNING", "1") ql, err := tr.Listen(tlscfg, cfg) if err != nil { return nil, err } if advertise { s.AddLocalServiceAdvertisement(service, connType, adTags) } doneChan := make(chan struct{}) go func() { select { case <-s.context.Done(): _ = ql.Close() case <-ctx.Done(): _ = ql.Close() case <-doneChan: return } }() li := &Listener{ s: s, pc: pc, ql: ql, acceptChan: make(chan *acceptResult), doneChan: doneChan, doneOnce: &sync.Once{}, } go li.acceptLoop(ctx) return li, nil } func (s *Netceptor) tracer(ctx context.Context, p logging.Perspective, connID quic.ConnectionID) *logging.ConnectionTracer { qlogPath := os.Getenv("QLOGDIR") if qlogPath != "" { role := "server" if p == logging.PerspectiveClient { role = "client" } filename := fmt.Sprintf("log_%x_%s.qlog", connID, role) f, err := os.Create(qlogPath + filename) if err != nil { s.Logger.Debug("failed to create qlog file at path: %s", qlogPath) return nil } defer func() { err := f.Close() if err != nil { s.GetLogger().Error("Error closing %s: %s", qlogPath+filename, err) } }() return qlog.NewConnectionTracer(f, p, connID) } else { return nil } } // Listen returns a stream listener compatible with Go's net.Listener. // If service is blank, generates and uses an ephemeral service name. func (s *Netceptor) Listen(service string, tlscfg *tls.Config) (*Listener, error) { return s.listen(s.context, service, tlscfg, false, nil) } // ListenAndAdvertise listens for stream connections on a service and also advertises it via broadcasts. func (s *Netceptor) ListenAndAdvertise(service string, tlscfg *tls.Config, tags map[string]string) (*Listener, error) { return s.listen(s.context, service, tlscfg, true, tags) } func (li *Listener) sendResult(ctx context.Context, conn net.Conn, err error) { select { case <-ctx.Done(): return case li.acceptChan <- &acceptResult{ conn: conn, err: err, }: case <-li.doneChan: } } func (li *Listener) acceptLoop(ctx context.Context) { for { select { case <-ctx.Done(): return case <-li.doneChan: return default: } qc, err := li.ql.Accept(ctx) select { case <-li.doneChan: return default: } if err != nil { li.sendResult(ctx, nil, err) continue } go func() { ctx, cancel := context.WithTimeout(ctx, 60*time.Second) defer cancel() qs, err := qc.AcceptStream(ctx) select { case <-li.doneChan: _ = qc.CloseWithError(500, "Listener Closed") return default: } if os.IsTimeout(err) { _ = qc.CloseWithError(500, "Accept Timeout") return } else if err != nil { _ = qc.CloseWithError(500, fmt.Sprintf("AcceptStream Error: %s", err.Error())) li.sendResult(ctx, nil, err) return } buf := make([]byte, 1) n, err := qs.Read(buf) if err != nil { _ = qc.CloseWithError(500, fmt.Sprintf("Read Error: %s", err.Error())) li.sendResult(ctx, nil, err) return } if n != 1 || buf[0] != 0 { _ = qc.CloseWithError(500, "Read Data Error") li.sendResult(ctx, nil, fmt.Errorf("stream failed to initialize")) return } doneChan := make(chan struct{}, 1) cctx, ccancel := context.WithCancel(li.s.context) conn := &Conn{ s: li.s, pc: li.pc, qc: qc, qs: qs, doneChan: doneChan, doneOnce: &sync.Once{}, ctx: cctx, } rAddr, ok := conn.RemoteAddr().(Addr) if ok { go monitorUnreachable(li.pc, doneChan, rAddr, ccancel) } go func() { select { case <-li.doneChan: _ = conn.Close() case <-cctx.Done(): _ = conn.Close() case <-doneChan: return } }() li.sendResult(ctx, conn, err) }() } } // Accept accepts a connection via the listener. func (li *Listener) Accept() (net.Conn, error) { select { case ar := <-li.acceptChan: return ar.conn, ar.err case <-li.doneChan: return nil, fmt.Errorf("listener closed") } } // Close closes the listener. func (li *Listener) Close() error { li.doneOnce.Do(func() { close(li.doneChan) }) perr := li.pc.Close() if qerr := li.ql.Close(); qerr != nil { return qerr } return perr } // Addr returns the local address of this listener. func (li *Listener) Addr() net.Addr { return li.pc.LocalAddr() } // Conn implements the net.Conn interface via the Receptor network. type Conn struct { s *Netceptor pc PacketConner qc quic.Connection qs quic.Stream doneChan chan struct{} doneOnce *sync.Once ctx context.Context } // Dial returns a stream connection compatible with Go's net.Conn. func (s *Netceptor) Dial(node string, service string, tlscfg *tls.Config) (*Conn, error) { return s.DialContext(context.Background(), node, service, tlscfg) } // DialContext is like Dial but uses a context to allow timeout or cancellation. func (s *Netceptor) DialContext(ctx context.Context, node string, service string, tlscfg *tls.Config) (*Conn, error) { _ = s.AddNameHash(node) _ = s.AddNameHash(service) pc, err := s.ListenPacket("") if err != nil { return nil, err } rAddr := s.NewAddr(node, service) cfg := &quic.Config{ Tracer: s.tracer, HandshakeIdleTimeout: 15 * time.Second, MaxIdleTimeout: MaxIdleTimeoutForQuicConnections, Allow0RTT: true, DisablePathMTUDiscovery: false, } if KeepAliveForQuicConnections { cfg.KeepAlivePeriod = MaxIdleTimeoutForQuicConnections / 2 } if tlscfg == nil { tlscfg = generateClientTLSConfig(s.NodeID()) } else { tlscfg = tlscfg.Clone() tlscfg.NextProtos = []string{"netceptor"} } okChan := make(chan struct{}) closeOnce := sync.Once{} pcClose := func() { closeOnce.Do(func() { _ = pc.Close() }) } cctx, ccancel := context.WithCancel(ctx) go func() { select { case <-okChan: return case <-cctx.Done(): pcClose() case <-s.context.Done(): pcClose() } }() doneChan := make(chan struct{}, 1) go monitorUnreachable(pc, doneChan, rAddr, ccancel) _ = os.Setenv("QUIC_GO_DISABLE_RECEIVE_BUFFER_WARNING", "1") statelessResetKey := make([]byte, 32) rand.Read(statelessResetKey) tr := quic.Transport{ Conn: pc, StatelessResetKey: (*quic.StatelessResetKey)(statelessResetKey), } qc, err := tr.Dial(cctx, rAddr, tlscfg, cfg) if err != nil { close(okChan) pcClose() if cctx.Err() != nil { return nil, cctx.Err() } return nil, err } qs, err := qc.OpenStreamSync(cctx) if err != nil { close(okChan) _ = qc.CloseWithError(500, err.Error()) _ = pc.Close() if cctx.Err() != nil { return nil, cctx.Err() } return nil, err } // We need to write something to the stream to trigger the Accept() to happen _, err = qs.Write([]byte{0}) if err != nil { close(okChan) _ = qs.Close() _ = pc.Close() if cctx.Err() != nil { return nil, cctx.Err() } return nil, err } close(okChan) go func() { select { case <-qc.Context().Done(): _ = qs.Close() _ = pc.Close() case <-s.context.Done(): _ = qs.Close() _ = pc.Close() case <-doneChan: return } }() conn := &Conn{ s: s, pc: pc, qc: qc, qs: qs, doneChan: doneChan, doneOnce: &sync.Once{}, ctx: cctx, } return conn, nil } // monitorUnreachable receives unreachable messages from the underlying PacketConn, and ends the connection // if the remote service has gone away. func monitorUnreachable(pc PacketConner, doneChan chan struct{}, remoteAddr Addr, cancel context.CancelFunc) { msgCh := pc.SubscribeUnreachable(doneChan) if msgCh == nil { cancel() return } // read from channel until closed for msg := range msgCh { if msg.Problem == ProblemServiceUnknown && msg.ToNode == remoteAddr.node && msg.ToService == remoteAddr.service { pc.GetLogger().Warning("remote service %s to node %s is unreachable", msg.ToService, msg.ToNode) cancel() } } } // Read reads data from the connection. func (c *Conn) Read(b []byte) (n int, err error) { return c.qs.Read(b) } // CancelRead cancels a pending read operation. func (c *Conn) CancelRead() { c.qs.CancelRead(499) } // Write writes data to the connection. func (c *Conn) Write(b []byte) (n int, err error) { return c.qs.Write(b) } // Close closes the writer side of the connection. func (c *Conn) Close() error { c.doneOnce.Do(func() { close(c.doneChan) }) return c.qs.Close() } func (c *Conn) CloseConnection() error { c.pc.Cancel() c.doneOnce.Do(func() { close(c.doneChan) }) c.s.Logger.Debug("closing connection from service %s to %s", c.pc.LocalService(), c.RemoteAddr().String()) return c.qc.CloseWithError(0, "normal close") } // LocalAddr returns the local address of this connection. func (c *Conn) LocalAddr() net.Addr { return c.qc.LocalAddr() } // RemoteAddr returns the remote address of this connection. func (c *Conn) RemoteAddr() net.Addr { return c.qc.RemoteAddr() } // SetDeadline sets both read and write deadlines. func (c *Conn) SetDeadline(t time.Time) error { return c.qs.SetDeadline(t) } // SetReadDeadline sets the read deadline. func (c *Conn) SetReadDeadline(t time.Time) error { return c.qs.SetReadDeadline(t) } // SetWriteDeadline sets the write deadline. func (c *Conn) SetWriteDeadline(t time.Time) error { return c.qs.SetWriteDeadline(t) } const insecureCommonName = "netceptor-insecure-common-name" func generateServerTLSConfig() *tls.Config { key, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { panic(err) } template := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ CommonName: insecureCommonName, }, NotBefore: time.Now().Add(-1 * time.Minute), NotAfter: time.Now().Add(24 * time.Hour), } certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key) if err != nil { panic(err) } keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}) certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) tlsCert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { panic(err) } return &tls.Config{ Certificates: []tls.Certificate{tlsCert}, NextProtos: []string{"netceptor"}, MinVersion: tls.VersionTLS12, PreferServerCipherSuites: true, } } func verifyServerCertificate(rawCerts [][]byte, _ [][]*x509.Certificate) error { for i := 0; i < len(rawCerts); i++ { cert, err := x509.ParseCertificate(rawCerts[i]) if err != nil { continue } if cert.Subject.CommonName == insecureCommonName { return nil } } return fmt.Errorf("insecure connection to secure service") } func generateClientTLSConfig(host string) *tls.Config { return &tls.Config{ InsecureSkipVerify: true, VerifyPeerCertificate: verifyServerCertificate, NextProtos: []string{"netceptor"}, ServerName: host, MinVersion: tls.VersionTLS12, } } receptor-1.5.3/pkg/netceptor/external_backend.go000066400000000000000000000126471475465740700220040ustar00rootroot00000000000000package netceptor import ( "context" "fmt" "net" "sync" "time" "github.com/ansible/receptor/pkg/framer" "github.com/ansible/receptor/pkg/utils" "github.com/gorilla/websocket" ) // ExternalBackend is a backend implementation for the situation when non-Receptor code // is initiating connections, outside the control of a Receptor-managed accept loop. type ExternalBackend struct { ctx context.Context sessChan chan BackendSession } // MessageConn is an abstract connection that sends and receives whole messages (datagrams). type MessageConn interface { WriteMessage(ctx context.Context, data []byte) error ReadMessage(ctx context.Context, timeout time.Duration) ([]byte, error) SetReadDeadline(t time.Time) error Close() error } // netMessageConn implements MessageConn for Go net.Conn. type netMessageConn struct { conn net.Conn framer framer.Framer } // MessageConnFromNetConn returns a MessageConnection that wraps a net.Conn. func MessageConnFromNetConn(conn net.Conn) MessageConn { return &netMessageConn{ conn: conn, framer: framer.New(), } } // WriteMessage writes a message to the connection. func (mc *netMessageConn) WriteMessage(ctx context.Context, data []byte) error { if ctx.Err() != nil { return fmt.Errorf("session closed: %s", ctx.Err()) } buf := mc.framer.SendData(data) n, err := mc.conn.Write(buf) if err != nil { return err } if n != len(buf) { return fmt.Errorf("partial data sent") } return nil } // ReadMessage reads a message from the connection. func (mc *netMessageConn) ReadMessage(ctx context.Context, timeout time.Duration) ([]byte, error) { buf := make([]byte, utils.NormalBufferSize) err := mc.conn.SetReadDeadline(time.Now().Add(timeout)) if err != nil { return nil, err } for { if ctx.Err() != nil { return nil, fmt.Errorf("session closed: %s", ctx.Err()) } if mc.framer.MessageReady() { break } n, err := mc.conn.Read(buf) if n > 0 { mc.framer.RecvData(buf[:n]) } if nerr, ok := err.(net.Error); ok && nerr.Timeout() { return nil, ErrTimeout } if err != nil { return nil, err } } buf, err = mc.framer.GetMessage() if err != nil { return nil, err } return buf, nil } // SetReadDeadline sets the deadline by which a message must be read from the connection. func (mc *netMessageConn) SetReadDeadline(_ time.Time) error { panic("implement me") } // Close closes the connection. func (mc *netMessageConn) Close() error { return mc.conn.Close() } // websocketMessageConn implements MessageConn for Gorilla websocket.Conn. type websocketMessageConn struct { conn *websocket.Conn } // MessageConnFromWebsocketConn returns a MessageConnection that wraps a Gorilla websocket.Conn. func MessageConnFromWebsocketConn(conn *websocket.Conn) MessageConn { return &websocketMessageConn{ conn: conn, } } // WriteMessage writes a message to the connection. func (mc *websocketMessageConn) WriteMessage(ctx context.Context, data []byte) error { if ctx.Err() != nil { return fmt.Errorf("session closed: %s", ctx.Err()) } return mc.conn.WriteMessage(websocket.BinaryMessage, data) } // ReadMessage reads a message from the connection. func (mc *websocketMessageConn) ReadMessage(ctx context.Context, _ time.Duration) ([]byte, error) { if ctx.Err() != nil { return nil, fmt.Errorf("session closed: %s", ctx.Err()) } messageType, data, err := mc.conn.ReadMessage() if messageType != websocket.BinaryMessage { return nil, fmt.Errorf("received message of wrong type") } return data, err } // SetReadDeadline sets the deadline by which a message must be read from the connection. func (mc *websocketMessageConn) SetReadDeadline(t time.Time) error { return mc.conn.SetReadDeadline(t) } // Close closes the connection. func (mc *websocketMessageConn) Close() error { return mc.conn.Close() } // NewExternalBackend initializes a new ExternalBackend object. func NewExternalBackend() (*ExternalBackend, error) { return &ExternalBackend{}, nil } // Start launches the backend from Receptor's point of view, and waits for connections to happen. func (b *ExternalBackend) Start(ctx context.Context, _ *sync.WaitGroup) (chan BackendSession, error) { b.ctx = ctx b.sessChan = make(chan BackendSession) return b.sessChan, nil } // ExternalSession implements BackendSession for external backends. type ExternalSession struct { eb *ExternalBackend conn MessageConn shouldClose bool ctx context.Context cancel context.CancelFunc } // NewConnection is called by the external code when a new connection is available. The // connection will be closed when the session ends if closeConnWithSession is true. The // returned context will be cancelled after the connection closes. func (b *ExternalBackend) NewConnection(conn MessageConn, closeConnWithSession bool) context.Context { ctx, cancel := context.WithCancel(b.ctx) ebs := &ExternalSession{ eb: b, conn: conn, shouldClose: closeConnWithSession, ctx: ctx, cancel: cancel, } b.sessChan <- ebs return ctx } // Send sends data over the session. func (es *ExternalSession) Send(data []byte) error { return es.conn.WriteMessage(es.ctx, data) } // Recv receives data via the session. func (es *ExternalSession) Recv(timeout time.Duration) ([]byte, error) { return es.conn.ReadMessage(es.ctx, timeout) } // Close closes the session. func (es *ExternalSession) Close() error { es.cancel() var err error if es.shouldClose { err = es.conn.Close() } return err } receptor-1.5.3/pkg/netceptor/firewall_rules.go000066400000000000000000000110621475465740700215200ustar00rootroot00000000000000package netceptor import ( "fmt" "reflect" "regexp" "strings" ) type FirewallRuleData map[interface{}]interface{} type FirewallRule struct { Action string FromNode string ToNode string FromService string ToService string } func buildComp(field string, pattern string) CompareFunc { if pattern == "" { return nil } var comp CompareFunc if strings.HasPrefix(pattern, "/") { comp, _ = regexCompare(field, pattern) } else { comp, _ = stringCompare(field, pattern) } return comp } func (fr FirewallRule) BuildComps() []CompareFunc { var comps []CompareFunc fnc := buildComp("fromnode", fr.FromNode) if fnc != nil { comps = append(comps, fnc) } tnc := buildComp("tonode", fr.ToNode) if tnc != nil { comps = append(comps, tnc) } fsc := buildComp("fromservice", fr.FromService) if fsc != nil { comps = append(comps, fsc) } tsc := buildComp("toservice", fr.ToService) if tsc != nil { comps = append(comps, tsc) } return comps } // ParseFirewallRule takes a single string describing a firewall rule, and returns a FirewallRuleFunc function. func (frd FirewallRuleData) ParseFirewallRule() (FirewallRuleFunc, error) { rv := reflect.ValueOf(frd) if rv.Kind() != reflect.Map { return nil, fmt.Errorf("invalid firewall rule. see documentation for syntax") } fr := FirewallRule{} for _, key := range rv.MapKeys() { mkv := rv.MapIndex(key) key := key.Elem().String() switch mkv.Interface().(type) { case string: // expected default: return nil, fmt.Errorf("invalid firewall rule. %s must be a string", key) } val := mkv.Elem().String() switch strings.ToLower(key) { case "action": fr.Action = val case "fromnode": fr.FromNode = val case "tonode": fr.ToNode = val case "fromservice": fr.FromService = val case "toservice": fr.ToService = val default: return nil, fmt.Errorf("invalid filewall rule. unknown key: %s", key) } } comps := fr.BuildComps() fwr, err := firewallRule(comps, fr.Action) if err != nil { return nil, err } return fwr, nil } // ParseFirewallRules takes a slice of string describing firewall rules, and returns a slice of FirewallRuleFunc functions. func ParseFirewallRules(rules []FirewallRuleData) ([]FirewallRuleFunc, error) { results := make([]FirewallRuleFunc, 0) for i, rule := range rules { result, err := rule.ParseFirewallRule() if err != nil { return nil, fmt.Errorf("error in rule %d: %s", i, err) } results = append(results, result) } return results, nil } type CompareFunc func(md *MessageData) bool func firewallRule(comparers []CompareFunc, action string) (FirewallRuleFunc, error) { var result FirewallResult switch strings.ToLower(action) { case "accept": result = FirewallResultAccept case "reject": result = FirewallResultReject case "drop": result = FirewallResultDrop default: return nil, fmt.Errorf("unknown action: %s", action) } if len(comparers) == 0 { return func(md *MessageData) FirewallResult { return result }, nil } return func(md *MessageData) FirewallResult { matched := true for _, comp := range comparers { matched = matched && comp(md) } if matched { return result } return FirewallResultContinue }, nil } func stringCompare(field string, value string) (CompareFunc, error) { switch strings.ToLower(field) { case "fromnode": return func(md *MessageData) bool { return md.FromNode == value }, nil case "fromservice": return func(md *MessageData) bool { return md.FromService == value }, nil case "tonode": return func(md *MessageData) bool { return md.ToNode == value }, nil case "toservice": return func(md *MessageData) bool { return md.ToService == value }, nil } return nil, fmt.Errorf("unknown field: %s", field) } func regexCompare(field string, value string) (CompareFunc, error) { if value[0] != '/' || value[len(value)-1] != '/' { return nil, fmt.Errorf("regex not enclosed in //") } value = fmt.Sprintf("^%s$", value[1:len(value)-1]) re, err := regexp.Compile(value) if err != nil { return nil, fmt.Errorf("regex failed to compile: %s", value) } switch strings.ToLower(field) { case "fromnode": return func(md *MessageData) bool { return re.MatchString(md.FromNode) }, nil case "fromservice": return func(md *MessageData) bool { return re.MatchString(md.FromService) }, nil case "tonode": return func(md *MessageData) bool { return re.MatchString(md.ToNode) }, nil case "toservice": return func(md *MessageData) bool { return re.MatchString(md.ToService) }, nil } return nil, fmt.Errorf("unknown field: %s", field) } receptor-1.5.3/pkg/netceptor/firewall_rules_test.go000066400000000000000000000034561475465740700225670ustar00rootroot00000000000000package netceptor import ( "testing" ) func TestFirewallRules(t *testing.T) { var frd FirewallRuleData // Rule #1 frd = FirewallRuleData{} frd["action"] = "accept" rule, err := frd.ParseFirewallRule() if err != nil { t.Fatal(err) } if rule(&MessageData{}) != FirewallResultAccept { t.Fatal("rule #1 did not return Accept") } // // Rule #2 frd = FirewallRuleData{} frd["Action"] = "drop" frd["FromNode"] = "foo" frd["ToNode"] = "bar" frd["ToService"] = "control" rule, err = frd.ParseFirewallRule() if err != nil { t.Fatal(err) } if rule(&MessageData{}) != FirewallResultContinue { t.Fatal("rule #2 did not return Continue") } if rule(&MessageData{ FromNode: "foo", ToNode: "bar", ToService: "control", }) != FirewallResultDrop { t.Fatal("rule #2 did not return Drop") } // Rule #3 frd = FirewallRuleData{} frd["fromnode"] = "/a.*b/" frd["action"] = "reject" rule, err = frd.ParseFirewallRule() if err != nil { t.Fatal(err) } if rule(&MessageData{}) != FirewallResultContinue { t.Fatal("rule #3 did not return Continue") } if rule(&MessageData{ FromNode: "appleb", }) != FirewallResultReject { t.Fatal("rule #3 did not return Reject") } if rule(&MessageData{ FromNode: "Appleb", }) != FirewallResultContinue { t.Fatal("rule #3 did not return Continue") } // Rule #4 frd = FirewallRuleData{} frd["TONODE"] = "/(?i)a.*b/" frd["ACTION"] = "reject" rule, err = frd.ParseFirewallRule() if err != nil { t.Fatal(err) } if rule(&MessageData{}) != FirewallResultContinue { t.Fatal("rule #4 did not return Continue") } if rule(&MessageData{ ToNode: "appleb", }) != FirewallResultReject { t.Fatal("rule #4 did not return Reject") } if rule(&MessageData{ ToNode: "Appleb", }) != FirewallResultReject { t.Fatal("rule #4 did not return Reject") } } receptor-1.5.3/pkg/netceptor/mock_netceptor/000077500000000000000000000000001475465740700211665ustar00rootroot00000000000000receptor-1.5.3/pkg/netceptor/mock_netceptor/external_backend.go000066400000000000000000000061461475465740700250150ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/netceptor/external_backend.go // // Generated by this command: // // mockgen -source=pkg/netceptor/external_backend.go -destination=pkg/netceptor/mock_netceptor/external_backend.go // // Package mock_netceptor is a generated GoMock package. package mock_netceptor import ( context "context" reflect "reflect" time "time" gomock "go.uber.org/mock/gomock" ) // MockMessageConn is a mock of MessageConn interface. type MockMessageConn struct { ctrl *gomock.Controller recorder *MockMessageConnMockRecorder isgomock struct{} } // MockMessageConnMockRecorder is the mock recorder for MockMessageConn. type MockMessageConnMockRecorder struct { mock *MockMessageConn } // NewMockMessageConn creates a new mock instance. func NewMockMessageConn(ctrl *gomock.Controller) *MockMessageConn { mock := &MockMessageConn{ctrl: ctrl} mock.recorder = &MockMessageConnMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockMessageConn) EXPECT() *MockMessageConnMockRecorder { return m.recorder } // Close mocks base method. func (m *MockMessageConn) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockMessageConnMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockMessageConn)(nil).Close)) } // ReadMessage mocks base method. func (m *MockMessageConn) ReadMessage(ctx context.Context, timeout time.Duration) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadMessage", ctx, timeout) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // ReadMessage indicates an expected call of ReadMessage. func (mr *MockMessageConnMockRecorder) ReadMessage(ctx, timeout any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadMessage", reflect.TypeOf((*MockMessageConn)(nil).ReadMessage), ctx, timeout) } // SetReadDeadline mocks base method. func (m *MockMessageConn) SetReadDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetReadDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetReadDeadline indicates an expected call of SetReadDeadline. func (mr *MockMessageConnMockRecorder) SetReadDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetReadDeadline", reflect.TypeOf((*MockMessageConn)(nil).SetReadDeadline), t) } // WriteMessage mocks base method. func (m *MockMessageConn) WriteMessage(ctx context.Context, data []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteMessage", ctx, data) ret0, _ := ret[0].(error) return ret0 } // WriteMessage indicates an expected call of WriteMessage. func (mr *MockMessageConnMockRecorder) WriteMessage(ctx, data any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteMessage", reflect.TypeOf((*MockMessageConn)(nil).WriteMessage), ctx, data) } receptor-1.5.3/pkg/netceptor/mock_netceptor/netceptor.go000066400000000000000000000073171475465740700235300ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/netceptor/netceptor.go // // Generated by this command: // // mockgen -source=pkg/netceptor/netceptor.go -destination=pkg/netceptor/mock_netceptor/netceptor.go // // Package mock_netceptor is a generated GoMock package. package mock_netceptor import ( context "context" reflect "reflect" sync "sync" time "time" netceptor "github.com/ansible/receptor/pkg/netceptor" gomock "go.uber.org/mock/gomock" ) // MockBackend is a mock of Backend interface. type MockBackend struct { ctrl *gomock.Controller recorder *MockBackendMockRecorder isgomock struct{} } // MockBackendMockRecorder is the mock recorder for MockBackend. type MockBackendMockRecorder struct { mock *MockBackend } // NewMockBackend creates a new mock instance. func NewMockBackend(ctrl *gomock.Controller) *MockBackend { mock := &MockBackend{ctrl: ctrl} mock.recorder = &MockBackendMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockBackend) EXPECT() *MockBackendMockRecorder { return m.recorder } // Start mocks base method. func (m *MockBackend) Start(arg0 context.Context, arg1 *sync.WaitGroup) (chan netceptor.BackendSession, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start", arg0, arg1) ret0, _ := ret[0].(chan netceptor.BackendSession) ret1, _ := ret[1].(error) return ret0, ret1 } // Start indicates an expected call of Start. func (mr *MockBackendMockRecorder) Start(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockBackend)(nil).Start), arg0, arg1) } // MockBackendSession is a mock of BackendSession interface. type MockBackendSession struct { ctrl *gomock.Controller recorder *MockBackendSessionMockRecorder isgomock struct{} } // MockBackendSessionMockRecorder is the mock recorder for MockBackendSession. type MockBackendSessionMockRecorder struct { mock *MockBackendSession } // NewMockBackendSession creates a new mock instance. func NewMockBackendSession(ctrl *gomock.Controller) *MockBackendSession { mock := &MockBackendSession{ctrl: ctrl} mock.recorder = &MockBackendSessionMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockBackendSession) EXPECT() *MockBackendSessionMockRecorder { return m.recorder } // Close mocks base method. func (m *MockBackendSession) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockBackendSessionMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockBackendSession)(nil).Close)) } // Recv mocks base method. func (m *MockBackendSession) Recv(arg0 time.Duration) ([]byte, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Recv", arg0) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // Recv indicates an expected call of Recv. func (mr *MockBackendSessionMockRecorder) Recv(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Recv", reflect.TypeOf((*MockBackendSession)(nil).Recv), arg0) } // Send mocks base method. func (m *MockBackendSession) Send(arg0 []byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Send", arg0) ret0, _ := ret[0].(error) return ret0 } // Send indicates an expected call of Send. func (mr *MockBackendSessionMockRecorder) Send(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockBackendSession)(nil).Send), arg0) } receptor-1.5.3/pkg/netceptor/mock_netceptor/packetconn.go000066400000000000000000000407201475465740700236450ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/netceptor/packetconn.go // // Generated by this command: // // mockgen -source=pkg/netceptor/packetconn.go -destination=pkg/netceptor/mock_netceptor/packetconn.go // // Package mock_netceptor is a generated GoMock package. package mock_netceptor import ( context "context" net "net" reflect "reflect" sync "sync" time "time" logger "github.com/ansible/receptor/pkg/logger" netceptor "github.com/ansible/receptor/pkg/netceptor" utils "github.com/ansible/receptor/pkg/utils" gomock "go.uber.org/mock/gomock" ) // MockPacketConner is a mock of PacketConner interface. type MockPacketConner struct { ctrl *gomock.Controller recorder *MockPacketConnerMockRecorder isgomock struct{} } // MockPacketConnerMockRecorder is the mock recorder for MockPacketConner. type MockPacketConnerMockRecorder struct { mock *MockPacketConner } // NewMockPacketConner creates a new mock instance. func NewMockPacketConner(ctrl *gomock.Controller) *MockPacketConner { mock := &MockPacketConner{ctrl: ctrl} mock.recorder = &MockPacketConnerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockPacketConner) EXPECT() *MockPacketConnerMockRecorder { return m.recorder } // Cancel mocks base method. func (m *MockPacketConner) Cancel() *context.CancelFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Cancel") ret0, _ := ret[0].(*context.CancelFunc) return ret0 } // Cancel indicates an expected call of Cancel. func (mr *MockPacketConnerMockRecorder) Cancel() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cancel", reflect.TypeOf((*MockPacketConner)(nil).Cancel)) } // Close mocks base method. func (m *MockPacketConner) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockPacketConnerMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockPacketConner)(nil).Close)) } // GetHopsToLive mocks base method. func (m *MockPacketConner) GetHopsToLive() byte { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetHopsToLive") ret0, _ := ret[0].(byte) return ret0 } // GetHopsToLive indicates an expected call of GetHopsToLive. func (mr *MockPacketConnerMockRecorder) GetHopsToLive() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHopsToLive", reflect.TypeOf((*MockPacketConner)(nil).GetHopsToLive)) } // GetLogger mocks base method. func (m *MockPacketConner) GetLogger() *logger.ReceptorLogger { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLogger") ret0, _ := ret[0].(*logger.ReceptorLogger) return ret0 } // GetLogger indicates an expected call of GetLogger. func (mr *MockPacketConnerMockRecorder) GetLogger() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogger", reflect.TypeOf((*MockPacketConner)(nil).GetLogger)) } // GetReadDeadline mocks base method. func (m *MockPacketConner) GetReadDeadline() time.Time { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetReadDeadline") ret0, _ := ret[0].(time.Time) return ret0 } // GetReadDeadline indicates an expected call of GetReadDeadline. func (mr *MockPacketConnerMockRecorder) GetReadDeadline() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReadDeadline", reflect.TypeOf((*MockPacketConner)(nil).GetReadDeadline)) } // LocalAddr mocks base method. func (m *MockPacketConner) LocalAddr() net.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LocalAddr") ret0, _ := ret[0].(net.Addr) return ret0 } // LocalAddr indicates an expected call of LocalAddr. func (mr *MockPacketConnerMockRecorder) LocalAddr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocalAddr", reflect.TypeOf((*MockPacketConner)(nil).LocalAddr)) } // LocalService mocks base method. func (m *MockPacketConner) LocalService() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LocalService") ret0, _ := ret[0].(string) return ret0 } // LocalService indicates an expected call of LocalService. func (mr *MockPacketConnerMockRecorder) LocalService() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocalService", reflect.TypeOf((*MockPacketConner)(nil).LocalService)) } // ReadFrom mocks base method. func (m *MockPacketConner) ReadFrom(p []byte) (int, net.Addr, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadFrom", p) ret0, _ := ret[0].(int) ret1, _ := ret[1].(net.Addr) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // ReadFrom indicates an expected call of ReadFrom. func (mr *MockPacketConnerMockRecorder) ReadFrom(p any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadFrom", reflect.TypeOf((*MockPacketConner)(nil).ReadFrom), p) } // SetDeadline mocks base method. func (m *MockPacketConner) SetDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetDeadline indicates an expected call of SetDeadline. func (mr *MockPacketConnerMockRecorder) SetDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDeadline", reflect.TypeOf((*MockPacketConner)(nil).SetDeadline), t) } // SetHopsToLive mocks base method. func (m *MockPacketConner) SetHopsToLive(hopsToLive byte) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetHopsToLive", hopsToLive) } // SetHopsToLive indicates an expected call of SetHopsToLive. func (mr *MockPacketConnerMockRecorder) SetHopsToLive(hopsToLive any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHopsToLive", reflect.TypeOf((*MockPacketConner)(nil).SetHopsToLive), hopsToLive) } // SetReadDeadline mocks base method. func (m *MockPacketConner) SetReadDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetReadDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetReadDeadline indicates an expected call of SetReadDeadline. func (mr *MockPacketConnerMockRecorder) SetReadDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetReadDeadline", reflect.TypeOf((*MockPacketConner)(nil).SetReadDeadline), t) } // SetWriteDeadline mocks base method. func (m *MockPacketConner) SetWriteDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetWriteDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetWriteDeadline indicates an expected call of SetWriteDeadline. func (mr *MockPacketConnerMockRecorder) SetWriteDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteDeadline", reflect.TypeOf((*MockPacketConner)(nil).SetWriteDeadline), t) } // StartUnreachable mocks base method. func (m *MockPacketConner) StartUnreachable() { m.ctrl.T.Helper() m.ctrl.Call(m, "StartUnreachable") } // StartUnreachable indicates an expected call of StartUnreachable. func (mr *MockPacketConnerMockRecorder) StartUnreachable() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartUnreachable", reflect.TypeOf((*MockPacketConner)(nil).StartUnreachable)) } // SubscribeUnreachable mocks base method. func (m *MockPacketConner) SubscribeUnreachable(doneChan chan struct{}) chan netceptor.UnreachableNotification { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SubscribeUnreachable", doneChan) ret0, _ := ret[0].(chan netceptor.UnreachableNotification) return ret0 } // SubscribeUnreachable indicates an expected call of SubscribeUnreachable. func (mr *MockPacketConnerMockRecorder) SubscribeUnreachable(doneChan any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubscribeUnreachable", reflect.TypeOf((*MockPacketConner)(nil).SubscribeUnreachable), doneChan) } // WriteTo mocks base method. func (m *MockPacketConner) WriteTo(p []byte, addr net.Addr) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTo", p, addr) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // WriteTo indicates an expected call of WriteTo. func (mr *MockPacketConnerMockRecorder) WriteTo(p, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTo", reflect.TypeOf((*MockPacketConner)(nil).WriteTo), p, addr) } // MockNetcForPacketConn is a mock of NetcForPacketConn interface. type MockNetcForPacketConn struct { ctrl *gomock.Controller recorder *MockNetcForPacketConnMockRecorder isgomock struct{} } // MockNetcForPacketConnMockRecorder is the mock recorder for MockNetcForPacketConn. type MockNetcForPacketConnMockRecorder struct { mock *MockNetcForPacketConn } // NewMockNetcForPacketConn creates a new mock instance. func NewMockNetcForPacketConn(ctrl *gomock.Controller) *MockNetcForPacketConn { mock := &MockNetcForPacketConn{ctrl: ctrl} mock.recorder = &MockNetcForPacketConnMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockNetcForPacketConn) EXPECT() *MockNetcForPacketConnMockRecorder { return m.recorder } // AddLocalServiceAdvertisement mocks base method. func (m *MockNetcForPacketConn) AddLocalServiceAdvertisement(service string, connType byte, tags map[string]string) { m.ctrl.T.Helper() m.ctrl.Call(m, "AddLocalServiceAdvertisement", service, connType, tags) } // AddLocalServiceAdvertisement indicates an expected call of AddLocalServiceAdvertisement. func (mr *MockNetcForPacketConnMockRecorder) AddLocalServiceAdvertisement(service, connType, tags any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddLocalServiceAdvertisement", reflect.TypeOf((*MockNetcForPacketConn)(nil).AddLocalServiceAdvertisement), service, connType, tags) } // AddNameHash mocks base method. func (m *MockNetcForPacketConn) AddNameHash(name string) uint64 { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AddNameHash", name) ret0, _ := ret[0].(uint64) return ret0 } // AddNameHash indicates an expected call of AddNameHash. func (mr *MockNetcForPacketConnMockRecorder) AddNameHash(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddNameHash", reflect.TypeOf((*MockNetcForPacketConn)(nil).AddNameHash), name) } // Context mocks base method. func (m *MockNetcForPacketConn) Context() context.Context { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Context") ret0, _ := ret[0].(context.Context) return ret0 } // Context indicates an expected call of Context. func (mr *MockNetcForPacketConnMockRecorder) Context() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockNetcForPacketConn)(nil).Context)) } // GetEphemeralService mocks base method. func (m *MockNetcForPacketConn) GetEphemeralService() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetEphemeralService") ret0, _ := ret[0].(string) return ret0 } // GetEphemeralService indicates an expected call of GetEphemeralService. func (mr *MockNetcForPacketConnMockRecorder) GetEphemeralService() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEphemeralService", reflect.TypeOf((*MockNetcForPacketConn)(nil).GetEphemeralService)) } // GetListenerLock mocks base method. func (m *MockNetcForPacketConn) GetListenerLock() *sync.RWMutex { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetListenerLock") ret0, _ := ret[0].(*sync.RWMutex) return ret0 } // GetListenerLock indicates an expected call of GetListenerLock. func (mr *MockNetcForPacketConnMockRecorder) GetListenerLock() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetListenerLock", reflect.TypeOf((*MockNetcForPacketConn)(nil).GetListenerLock)) } // GetListenerRegistry mocks base method. func (m *MockNetcForPacketConn) GetListenerRegistry() map[string]*netceptor.PacketConn { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetListenerRegistry") ret0, _ := ret[0].(map[string]*netceptor.PacketConn) return ret0 } // GetListenerRegistry indicates an expected call of GetListenerRegistry. func (mr *MockNetcForPacketConnMockRecorder) GetListenerRegistry() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetListenerRegistry", reflect.TypeOf((*MockNetcForPacketConn)(nil).GetListenerRegistry)) } // GetLogger mocks base method. func (m *MockNetcForPacketConn) GetLogger() *logger.ReceptorLogger { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLogger") ret0, _ := ret[0].(*logger.ReceptorLogger) return ret0 } // GetLogger indicates an expected call of GetLogger. func (mr *MockNetcForPacketConnMockRecorder) GetLogger() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogger", reflect.TypeOf((*MockNetcForPacketConn)(nil).GetLogger)) } // GetNetworkName mocks base method. func (m *MockNetcForPacketConn) GetNetworkName() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetNetworkName") ret0, _ := ret[0].(string) return ret0 } // GetNetworkName indicates an expected call of GetNetworkName. func (mr *MockNetcForPacketConnMockRecorder) GetNetworkName() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNetworkName", reflect.TypeOf((*MockNetcForPacketConn)(nil).GetNetworkName)) } // GetUnreachableBroker mocks base method. func (m *MockNetcForPacketConn) GetUnreachableBroker() *utils.Broker { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetUnreachableBroker") ret0, _ := ret[0].(*utils.Broker) return ret0 } // GetUnreachableBroker indicates an expected call of GetUnreachableBroker. func (mr *MockNetcForPacketConnMockRecorder) GetUnreachableBroker() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnreachableBroker", reflect.TypeOf((*MockNetcForPacketConn)(nil).GetUnreachableBroker)) } // MaxForwardingHops mocks base method. func (m *MockNetcForPacketConn) MaxForwardingHops() byte { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MaxForwardingHops") ret0, _ := ret[0].(byte) return ret0 } // MaxForwardingHops indicates an expected call of MaxForwardingHops. func (mr *MockNetcForPacketConnMockRecorder) MaxForwardingHops() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxForwardingHops", reflect.TypeOf((*MockNetcForPacketConn)(nil).MaxForwardingHops)) } // NodeID mocks base method. func (m *MockNetcForPacketConn) NodeID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NodeID") ret0, _ := ret[0].(string) return ret0 } // NodeID indicates an expected call of NodeID. func (mr *MockNetcForPacketConnMockRecorder) NodeID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockNetcForPacketConn)(nil).NodeID)) } // RemoveLocalServiceAdvertisement mocks base method. func (m *MockNetcForPacketConn) RemoveLocalServiceAdvertisement(service string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RemoveLocalServiceAdvertisement", service) ret0, _ := ret[0].(error) return ret0 } // RemoveLocalServiceAdvertisement indicates an expected call of RemoveLocalServiceAdvertisement. func (mr *MockNetcForPacketConnMockRecorder) RemoveLocalServiceAdvertisement(service any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveLocalServiceAdvertisement", reflect.TypeOf((*MockNetcForPacketConn)(nil).RemoveLocalServiceAdvertisement), service) } // SendMessageWithHopsToLive mocks base method. func (m *MockNetcForPacketConn) SendMessageWithHopsToLive(fromService, toNode, toService string, data []byte, hopsToLive byte) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SendMessageWithHopsToLive", fromService, toNode, toService, data, hopsToLive) ret0, _ := ret[0].(error) return ret0 } // SendMessageWithHopsToLive indicates an expected call of SendMessageWithHopsToLive. func (mr *MockNetcForPacketConnMockRecorder) SendMessageWithHopsToLive(fromService, toNode, toService, data, hopsToLive any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMessageWithHopsToLive", reflect.TypeOf((*MockNetcForPacketConn)(nil).SendMessageWithHopsToLive), fromService, toNode, toService, data, hopsToLive) } receptor-1.5.3/pkg/netceptor/mock_netceptor/ping.go000066400000000000000000000126611475465740700224600ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/netceptor/ping.go // // Generated by this command: // // mockgen -source=pkg/netceptor/ping.go -destination=pkg/netceptor/mock_netceptor/ping.go // // Package mock_netceptor is a generated GoMock package. package mock_netceptor import ( context "context" reflect "reflect" time "time" netceptor "github.com/ansible/receptor/pkg/netceptor" gomock "go.uber.org/mock/gomock" ) // MockNetcForPing is a mock of NetcForPing interface. type MockNetcForPing struct { ctrl *gomock.Controller recorder *MockNetcForPingMockRecorder isgomock struct{} } // MockNetcForPingMockRecorder is the mock recorder for MockNetcForPing. type MockNetcForPingMockRecorder struct { mock *MockNetcForPing } // NewMockNetcForPing creates a new mock instance. func NewMockNetcForPing(ctrl *gomock.Controller) *MockNetcForPing { mock := &MockNetcForPing{ctrl: ctrl} mock.recorder = &MockNetcForPingMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockNetcForPing) EXPECT() *MockNetcForPingMockRecorder { return m.recorder } // Context mocks base method. func (m *MockNetcForPing) Context() context.Context { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Context") ret0, _ := ret[0].(context.Context) return ret0 } // Context indicates an expected call of Context. func (mr *MockNetcForPingMockRecorder) Context() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockNetcForPing)(nil).Context)) } // ListenPacket mocks base method. func (m *MockNetcForPing) ListenPacket(service string) (netceptor.PacketConner, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListenPacket", service) ret0, _ := ret[0].(netceptor.PacketConner) ret1, _ := ret[1].(error) return ret0, ret1 } // ListenPacket indicates an expected call of ListenPacket. func (mr *MockNetcForPingMockRecorder) ListenPacket(service any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenPacket", reflect.TypeOf((*MockNetcForPing)(nil).ListenPacket), service) } // NewAddr mocks base method. func (m *MockNetcForPing) NewAddr(target, service string) netceptor.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewAddr", target, service) ret0, _ := ret[0].(netceptor.Addr) return ret0 } // NewAddr indicates an expected call of NewAddr. func (mr *MockNetcForPingMockRecorder) NewAddr(target, service any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAddr", reflect.TypeOf((*MockNetcForPing)(nil).NewAddr), target, service) } // NodeID mocks base method. func (m *MockNetcForPing) NodeID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NodeID") ret0, _ := ret[0].(string) return ret0 } // NodeID indicates an expected call of NodeID. func (mr *MockNetcForPingMockRecorder) NodeID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockNetcForPing)(nil).NodeID)) } // MockNetcForTraceroute is a mock of NetcForTraceroute interface. type MockNetcForTraceroute struct { ctrl *gomock.Controller recorder *MockNetcForTracerouteMockRecorder isgomock struct{} } // MockNetcForTracerouteMockRecorder is the mock recorder for MockNetcForTraceroute. type MockNetcForTracerouteMockRecorder struct { mock *MockNetcForTraceroute } // NewMockNetcForTraceroute creates a new mock instance. func NewMockNetcForTraceroute(ctrl *gomock.Controller) *MockNetcForTraceroute { mock := &MockNetcForTraceroute{ctrl: ctrl} mock.recorder = &MockNetcForTracerouteMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockNetcForTraceroute) EXPECT() *MockNetcForTracerouteMockRecorder { return m.recorder } // Context mocks base method. func (m *MockNetcForTraceroute) Context() context.Context { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Context") ret0, _ := ret[0].(context.Context) return ret0 } // Context indicates an expected call of Context. func (mr *MockNetcForTracerouteMockRecorder) Context() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockNetcForTraceroute)(nil).Context)) } // MaxForwardingHops mocks base method. func (m *MockNetcForTraceroute) MaxForwardingHops() byte { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MaxForwardingHops") ret0, _ := ret[0].(byte) return ret0 } // MaxForwardingHops indicates an expected call of MaxForwardingHops. func (mr *MockNetcForTracerouteMockRecorder) MaxForwardingHops() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MaxForwardingHops", reflect.TypeOf((*MockNetcForTraceroute)(nil).MaxForwardingHops)) } // Ping mocks base method. func (m *MockNetcForTraceroute) Ping(ctx context.Context, target string, hopsToLive byte) (time.Duration, string, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Ping", ctx, target, hopsToLive) ret0, _ := ret[0].(time.Duration) ret1, _ := ret[1].(string) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // Ping indicates an expected call of Ping. func (mr *MockNetcForTracerouteMockRecorder) Ping(ctx, target, hopsToLive any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockNetcForTraceroute)(nil).Ping), ctx, target, hopsToLive) } receptor-1.5.3/pkg/netceptor/netceptor.go000066400000000000000000001647661475465740700205300ustar00rootroot00000000000000// Package netceptor is the networking layer of Receptor. package netceptor import ( "bytes" "context" "crypto/sha256" "crypto/sha512" "crypto/tls" "crypto/x509" "encoding/binary" "encoding/json" "fmt" "io" "math" "math/rand" "reflect" "strings" "sync" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/randstr" "github.com/ansible/receptor/pkg/tickrunner" "github.com/ansible/receptor/pkg/utils" priorityQueue "github.com/jupp0r/go-priority-queue" "github.com/minio/highwayhash" ) // defaultMTU is the largest message sendable over the Netceptor network. const defaultMTU = 16384 // defaultRouteUpdateTime is the interval at which regular route updates will be sent. const defaultRouteUpdateTime = 10 * time.Second // defaultServiceAdTime is the interval at which regular service advertisements will be sent. const defaultServiceAdTime = 60 * time.Second // defaultSeenUpdateExpireTime is the age after which routing update IDs can be discarded. const defaultSeenUpdateExpireTime = 1 * time.Hour // defaultMaxForwardingHops is the maximum number of times that Netceptor will forward a data packet. const defaultMaxForwardingHops = 30 // defaultMaxConnectionIdleTime is the maximum time a connection can go without data before we consider it failed. const defaultMaxConnectionIdleTime = 2*defaultRouteUpdateTime + 1*time.Second // MainInstance is the global instance of Netceptor instantiated by the command-line main() function. var MainInstance *Netceptor // ErrorFunc is a function parameter used to process errors. The boolean parameter // indicates whether the error is fatal (i.e. the associated process is going to exit). type ErrorFunc func(error, bool) // ErrTimeout is returned for an expired deadline. var ErrTimeout error = &TimeoutError{} // TimeoutError is returned for an expired deadline. type TimeoutError struct{} // Error returns a string describing the error. func (e *TimeoutError) Error() string { return "i/o timeout" } // Timeout returns true if this error was a timeout. func (e *TimeoutError) Timeout() bool { return true } // Temporary returns true if a retry is likely a good idea. func (e *TimeoutError) Temporary() bool { return true } // Backend is the interface for back-ends that the Receptor network can run over. type Backend interface { Start(context.Context, *sync.WaitGroup) (chan BackendSession, error) } // BackendSession is the interface for a single session of a back-end. // Backends must be DATAGRAM ORIENTED, meaning that Recv() must return // whole packets sent by Send(). If the underlying protocol is stream // oriented, then the backend must deal with any required buffering. type BackendSession interface { Send([]byte) error Recv(time.Duration) ([]byte, error) // Must return netceptor.ErrTimeout if the timeout is exceeded Close() error } // FirewallRuleFunc is a function that takes a message and returns a firewall decision. type FirewallRuleFunc func(*MessageData) FirewallResult // FirewallResult enumerates the actions that can be taken as a result of a firewall rule. type FirewallResult int const ( // FirewallResultContinue continues processing further rules (no result). FirewallResultContinue FirewallResult = iota // FirewallResultAccept accepts the message for normal processing. FirewallResultAccept // FirewallResultReject denies the message, sending an unreachable message to the originator. FirewallResultReject // FirewallResultDrop denies the message silently, leaving the originator to time out. FirewallResultDrop ) // Netceptor is the main object of the Receptor mesh network protocol. type Netceptor struct { nodeID string mtu int routeUpdateTime time.Duration serviceAdTime time.Duration seenUpdateExpireTime time.Duration maxForwardingHops byte maxConnectionIdleTime time.Duration workCommands []WorkCommand workCommandsLock *sync.RWMutex epoch uint64 sequence uint64 sequenceLock *sync.RWMutex connLock *sync.RWMutex connections map[string]*connInfo knownNodeLock *sync.RWMutex knownNodeInfo map[string]*nodeInfo seenUpdatesLock *sync.RWMutex seenUpdates map[string]time.Time knownConnectionCosts map[string]map[string]float64 routingTableLock *sync.RWMutex routingTable map[string]string routingPathCosts map[string]float64 listenerLock *sync.RWMutex listenerRegistry map[string]*PacketConn sendRouteFloodChan chan time.Duration updateRoutingTableChan chan time.Duration context context.Context cancelFunc context.CancelFunc hashLock *sync.RWMutex nameHashes map[uint64]string reservedServices map[string]func(*MessageData) error serviceAdsLock *sync.RWMutex serviceAdsReceived map[string]map[string]*ServiceAdvertisement sendServiceAdsChan chan time.Duration backendWaitGroup sync.WaitGroup backendCount int backendCancel []context.CancelFunc networkName string serverTLSConfigs map[string]*tls.Config clientTLSConfigs map[string]*tls.Config clientPinnedFingerprints map[string][][]byte unreachableBroker *utils.Broker routingUpdateBroker *utils.Broker firewallLock *sync.RWMutex firewallRules []FirewallRuleFunc Logger *logger.ReceptorLogger } // ConnStatus holds information about a single connection in the Status struct. type ConnStatus struct { NodeID string Cost float64 } // Status is the struct returned by Netceptor.Status(). It represents a public // view of the internal status of the Netceptor object. type Status struct { NodeID string Connections []*ConnStatus RoutingTable map[string]string Advertisements []*ServiceAdvertisement KnownConnectionCosts map[string]map[string]float64 } const ( // MsgTypeData is a normal data-containing message. MsgTypeData = 0 // MsgTypeRoute is a routing update. MsgTypeRoute = 1 // MsgTypeServiceAdvertisement is an advertisement for a service. MsgTypeServiceAdvertisement = 2 // MsgTypeReject indicates a rejection (closure) of a backend connection. MsgTypeReject = 3 ) const ( // ProblemServiceUnknown occurs when a message arrives for a non-listening service. ProblemServiceUnknown = "service unknown" // ProblemExpiredInTransit occurs when a message's HopsToLive expires in transit. ProblemExpiredInTransit = "message expired" // ProblemRejected occurs when a packet is rejected by a firewall rule. ProblemRejected = "blocked by firewall" ) // MessageData contains a single message packet from the network. type MessageData struct { FromNode string FromService string ToNode string ToService string HopsToLive byte Data []byte } type connInfo struct { ReadChan chan []byte WriteChan chan []byte Context context.Context CancelFunc context.CancelFunc Cost float64 lastReceivedData time.Time lastReceivedLock *sync.RWMutex logger *logger.ReceptorLogger } type nodeInfo struct { Epoch uint64 Sequence uint64 } type routingUpdate struct { NodeID string UpdateID string UpdateEpoch uint64 UpdateSequence uint64 Connections map[string]float64 ForwardingNode string SuspectedDuplicate uint64 } const ( // ConnTypeDatagram indicates a packetconn (datagram) service listener. ConnTypeDatagram = 0 // ConnTypeStream indicates a conn (stream) service listener, without a user-defined TLS. ConnTypeStream = 1 // ConnTypeStreamTLS indicates the service listens on a packetconn connection, with a user-defined TLS. ConnTypeStreamTLS = 2 ) // WorkCommand tracks available work types and whether they verify work submissions. type WorkCommand struct { WorkType string // Secure true means receptor will verify the signature of the work submit payload Secure bool } // ServiceAdvertisement is the data associated with a service advertisement. type ServiceAdvertisement struct { NodeID string Service string Time time.Time ConnType byte Tags map[string]string WorkCommands []WorkCommand } // serviceAdvertisementFull is the whole message from the network. type serviceAdvertisementFull struct { *ServiceAdvertisement Cancel bool } // UnreachableMessage is the on-the-wire data associated with an unreachable message. type UnreachableMessage struct { FromNode string ToNode string FromService string ToService string Problem string } // UnreachableNotification includes additional information returned from SubscribeUnreachable. type UnreachableNotification struct { UnreachableMessage ReceivedFromNode string } var ( networkNames = make([]string, 0) networkNamesLock = sync.Mutex{} ) // makeNetworkName returns a network name that is unique within global scope. func makeNetworkName(nodeID string) string { networkNamesLock.Lock() defer networkNamesLock.Unlock() nameCounter := 1 proposedName := fmt.Sprintf("netceptor-%s", nodeID) for { good := true for i := range networkNames { if networkNames[i] == proposedName { good = false break } } if good { networkNames = append(networkNames, proposedName) return proposedName } nameCounter++ proposedName = fmt.Sprintf("netceptor-%s-%d", nodeID, nameCounter) } } // NewWithConsts constructs a new Receptor network protocol instance, specifying operational constants. func NewWithConsts(ctx context.Context, nodeID string, mtu int, routeUpdateTime time.Duration, serviceAdTime time.Duration, seenUpdateExpireTime time.Duration, maxForwardingHops byte, maxConnectionIdleTime time.Duration, ) *Netceptor { s := Netceptor{ nodeID: nodeID, mtu: mtu, routeUpdateTime: routeUpdateTime, serviceAdTime: serviceAdTime, seenUpdateExpireTime: seenUpdateExpireTime, maxForwardingHops: maxForwardingHops, maxConnectionIdleTime: maxConnectionIdleTime, epoch: uint64(time.Now().Unix()*(1<<24)) + uint64(rand.Intn(1<<24)), sequence: 0, sequenceLock: &sync.RWMutex{}, connLock: &sync.RWMutex{}, connections: make(map[string]*connInfo), knownNodeLock: &sync.RWMutex{}, knownNodeInfo: make(map[string]*nodeInfo), seenUpdatesLock: &sync.RWMutex{}, seenUpdates: make(map[string]time.Time), knownConnectionCosts: make(map[string]map[string]float64), routingTableLock: &sync.RWMutex{}, routingTable: make(map[string]string), routingPathCosts: make(map[string]float64), listenerLock: &sync.RWMutex{}, listenerRegistry: make(map[string]*PacketConn), sendRouteFloodChan: nil, updateRoutingTableChan: nil, hashLock: &sync.RWMutex{}, nameHashes: make(map[uint64]string), serviceAdsLock: &sync.RWMutex{}, serviceAdsReceived: make(map[string]map[string]*ServiceAdvertisement), sendServiceAdsChan: nil, backendWaitGroup: sync.WaitGroup{}, backendCount: 0, backendCancel: nil, networkName: makeNetworkName(nodeID), clientTLSConfigs: make(map[string]*tls.Config), clientPinnedFingerprints: make(map[string][][]byte), serverTLSConfigs: make(map[string]*tls.Config), firewallLock: &sync.RWMutex{}, workCommandsLock: &sync.RWMutex{}, Logger: logger.NewReceptorLogger(""), } s.reservedServices = map[string]func(*MessageData) error{ "ping": s.handlePing, "unreach": s.handleUnreachable, } if ctx == nil { ctx = context.Background() } s.clientTLSConfigs["default"] = &tls.Config{ MinVersion: tls.VersionTLS12, } s.AddNameHash(nodeID) s.context, s.cancelFunc = context.WithCancel(ctx) s.unreachableBroker = utils.NewBroker(s.context, reflect.TypeOf(UnreachableNotification{})) s.routingUpdateBroker = utils.NewBroker(s.context, reflect.TypeOf(map[string]string{})) s.updateRoutingTableChan = tickrunner.Run(s.context, s.updateRoutingTable, time.Hour*24, time.Millisecond*100) s.sendRouteFloodChan = tickrunner.Run(s.context, func() { s.sendRoutingUpdate(0) }, s.routeUpdateTime, time.Millisecond*100) if s.serviceAdTime > 0 { s.sendServiceAdsChan = tickrunner.Run(s.context, s.sendServiceAds, s.serviceAdTime, time.Second*5) } else { s.sendServiceAdsChan = make(chan time.Duration) go func() { for { select { case <-s.sendServiceAdsChan: // do nothing case <-s.context.Done(): return } } }() } go s.monitorConnectionAging() go s.expireSeenUpdates() return &s } // New constructs a new Receptor network protocol instance. func New(ctx context.Context, nodeID string) *Netceptor { return NewWithConsts(ctx, nodeID, defaultMTU, defaultRouteUpdateTime, defaultServiceAdTime, defaultSeenUpdateExpireTime, defaultMaxForwardingHops, defaultMaxConnectionIdleTime) } // NewAddr generates a Receptor network address from a node ID and service name. func (s *Netceptor) NewAddr(node string, service string) Addr { return Addr{ network: s.networkName, node: node, service: service, } } // Context returns the context for this Netceptor instance. func (s *Netceptor) Context() context.Context { return s.context } // Shutdown shuts down a Netceptor instance. func (s *Netceptor) Shutdown() { s.cancelFunc() } // NetceptorDone returns the channel for the netceptor context. func (s *Netceptor) NetceptorDone() <-chan struct{} { return s.context.Done() } // NodeID returns the local Node ID of this Netceptor instance. func (s *Netceptor) NodeID() string { return s.nodeID } // MTU returns the configured MTU of this Netceptor instance. func (s *Netceptor) MTU() int { return s.mtu } // RouteUpdateTime returns the configured RouteUpdateTime of this Netceptor instance. func (s *Netceptor) RouteUpdateTime() time.Duration { return s.routeUpdateTime } // ServiceAdTime returns the configured ServiceAdTime of this Netceptor instance. func (s *Netceptor) ServiceAdTime() time.Duration { return s.serviceAdTime } // SeenUpdateExpireTime returns the configured SeenUpdateExpireTime of this Netceptor instance. func (s *Netceptor) SeenUpdateExpireTime() time.Duration { return s.seenUpdateExpireTime } // MaxForwardingHops returns the configured MaxForwardingHops of this Netceptor instance. func (s *Netceptor) MaxForwardingHops() byte { return s.maxForwardingHops } // MaxConnectionIdleTime returns the configured MaxConnectionIdleTime of this Netceptor instance. func (s *Netceptor) MaxConnectionIdleTime() time.Duration { return s.maxConnectionIdleTime } // GetLogger returns the logger of this Netceptor instance. func (s *Netceptor) GetLogger() *logger.ReceptorLogger { return s.Logger } // GetListenerRegistry returns listener registry map. func (s *Netceptor) GetListenerRegistry() map[string]*PacketConn { return s.listenerRegistry } // GetNetworkName returns networkName. func (s *Netceptor) GetNetworkName() string { return s.networkName } // GetListenerLock returns listenerLock. func (s *Netceptor) GetListenerLock() *sync.RWMutex { return s.listenerLock } // GetUnreachableBroker returns unreachableBroker. func (s *Netceptor) GetUnreachableBroker() *utils.Broker { return s.unreachableBroker } // Sets the MaxConnectionIdleTime object on the Netceptor instance. func (s *Netceptor) SetMaxConnectionIdleTime(userDefinedMaxIdleConnectionTimeout string) error { // before we instantiate a new instance of Netceptor, let's verify that the user defined maxidleconnectiontimeout value is parseable duration, err := time.ParseDuration(userDefinedMaxIdleConnectionTimeout) if err != nil { return fmt.Errorf("failed to parse MaxIdleConnectionTimeout from configuration file -- valid examples include '1.5h', '30m', '30m10s'") } // we don't want the user defined timeout to be less than the defaultMaxConnectionIdleTime constant if duration < defaultMaxConnectionIdleTime { return fmt.Errorf("user defined maxIdleConnectionTimeout [%d] is less than the default default timeout [%d]", duration, defaultMaxConnectionIdleTime) } s.maxConnectionIdleTime = duration return nil } type BackendInfo struct { connectionCost float64 nodeCost map[string]float64 allowedPeers []string } // BackendConnectionCost is a modifier for AddBackend, which sets the global connection cost. func BackendConnectionCost(cost float64) func(*BackendInfo) { return func(bi *BackendInfo) { bi.connectionCost = cost } } // BackendNodeCost is a modifier for AddBackend, which sets the per-node connection costs. func BackendNodeCost(nodeCost map[string]float64) func(*BackendInfo) { return func(bi *BackendInfo) { bi.nodeCost = nodeCost } } // BackendAllowedPeers is a modifier for AddBackend, which sets the list of peers allowed to connect. func BackendAllowedPeers(peers []string) func(*BackendInfo) { return func(bi *BackendInfo) { bi.allowedPeers = peers } } // AddBackend adds a backend to the Netceptor system. func (s *Netceptor) AddBackend(backend Backend, modifiers ...func(*BackendInfo)) error { bi := &BackendInfo{ connectionCost: 1.0, nodeCost: nil, allowedPeers: nil, } for _, mod := range modifiers { mod(bi) } ctxBackend, cancel := context.WithCancel(s.context) s.backendCancel = append(s.backendCancel, cancel) // Start() runs a go routine that attempts establish a session over this // backend. For listeners, each time a peer dials this backend, sessChan is // written to, resulting in multiple ongoing sessions at once. sessChan, err := backend.Start(ctxBackend, &s.backendWaitGroup) if err != nil { return err } s.backendWaitGroup.Add(1) s.backendCount++ // Outer go routine -- this go routine waits for new sessions to be written to the sessChan and // starts the runProtocol() for that session go func() { runProtocolWg := sync.WaitGroup{} defer func() { // First wait for all session protocols to finish (the inner go routines) // for this backend before exiting this outer go routine. // It is important that the inner go routine is on a separate wait group // from the outer go routine. runProtocolWg.Wait() s.backendWaitGroup.Done() }() for { select { case sess, ok := <-sessChan: if ok { runProtocolWg.Add(1) // Inner go routine -- start the runProtocol loop for the new session // that was just passed to sessChan (which was written to from the // Start() method above) go func() { defer runProtocolWg.Done() err := s.runProtocol(ctxBackend, sess, bi) if err != nil { s.Logger.SanitizedError("Backend error: %s\n", err) } }() } else { return } case <-ctxBackend.Done(): return } } }() return nil } // BackendWait waits for the backend wait group. func (s *Netceptor) BackendWait() { s.backendWaitGroup.Wait() } // BackendDone calls Done on the backendWaitGroup. func (s *Netceptor) BackendDone() { s.backendWaitGroup.Done() } // BackendCount returns the number of backends that ever registered with this Netceptor. func (s *Netceptor) BackendCount() int { return s.backendCount } // CancelBackends stops all backends by calling a context cancel. func (s *Netceptor) CancelBackends() { s.Logger.Debug("Canceling backends") for i := range s.backendCancel { // a context cancel function s.backendCancel[i]() } s.BackendWait() s.backendCancel = nil s.backendCount = 0 } // Status returns the current state of the Netceptor object. func (s *Netceptor) Status() Status { s.connLock.RLock() conns := make([]*ConnStatus, 0) for conn := range s.connections { conns = append(conns, &ConnStatus{ NodeID: conn, Cost: s.connections[conn].Cost, }) } s.connLock.RUnlock() s.routingTableLock.RLock() routes := make(map[string]string) for k, v := range s.routingTable { routes[k] = v } s.routingTableLock.RUnlock() s.serviceAdsLock.RLock() serviceAds := make([]*ServiceAdvertisement, 0) for n := range s.serviceAdsReceived { for _, ad := range s.serviceAdsReceived[n] { adCopy := *ad if adCopy.NodeID == s.nodeID { adCopy.Time = time.Now() s.workCommandsLock.RLock() if len(s.workCommands) > 0 { adCopy.WorkCommands = s.workCommands } s.workCommandsLock.RUnlock() } serviceAds = append(serviceAds, &adCopy) } } s.serviceAdsLock.RUnlock() s.knownNodeLock.RLock() knownConnectionCosts := make(map[string]map[string]float64) for k1, v1 := range s.knownConnectionCosts { knownConnectionCosts[k1] = make(map[string]float64) for k2, v2 := range v1 { knownConnectionCosts[k1][k2] = v2 } } s.knownNodeLock.RUnlock() return Status{ NodeID: s.nodeID, Connections: conns, RoutingTable: routes, Advertisements: serviceAds, KnownConnectionCosts: knownConnectionCosts, } } // PathCost returns the cost to a given remote node, or an error if the node doesn't exist. func (s *Netceptor) PathCost(nodeID string) (float64, error) { s.routingTableLock.RLock() defer s.routingTableLock.RUnlock() cost, ok := s.routingPathCosts[nodeID] if !ok { return 0, fmt.Errorf("node not found") } return cost, nil } // AddFirewallRules adds firewall rules, optionally clearing existing rules first. func (s *Netceptor) AddFirewallRules(rules []FirewallRuleFunc, clearExisting bool) error { s.firewallLock.Lock() defer s.firewallLock.Unlock() if clearExisting { s.firewallRules = nil } s.firewallRules = append(s.firewallRules, rules...) return nil } func (s *Netceptor) AddLocalServiceAdvertisement(service string, connType byte, tags map[string]string) { s.serviceAdsLock.Lock() n, ok := s.serviceAdsReceived[s.nodeID] if !ok { n = make(map[string]*ServiceAdvertisement) s.serviceAdsReceived[s.nodeID] = n } n[service] = &ServiceAdvertisement{ NodeID: s.nodeID, Service: service, Time: time.Now(), ConnType: connType, Tags: tags, } s.serviceAdsLock.Unlock() select { case <-s.context.Done(): return case s.sendServiceAdsChan <- 0: default: } } func (s *Netceptor) RemoveLocalServiceAdvertisement(service string) error { s.serviceAdsLock.Lock() defer s.serviceAdsLock.Unlock() n, ok := s.serviceAdsReceived[s.nodeID] connType := n[service].ConnType if ok { delete(n, service) } sa := &serviceAdvertisementFull{ ServiceAdvertisement: &ServiceAdvertisement{ NodeID: s.nodeID, Service: service, Time: time.Now(), ConnType: connType, Tags: nil, }, Cancel: true, } data, err := s.translateStructToNetwork(MsgTypeServiceAdvertisement, sa) if err != nil { return err } s.flood(data, "") return nil } // Send a single service broadcast. func (s *Netceptor) sendServiceAd(si *ServiceAdvertisement) error { s.Logger.Debug("Sending service advertisement: %v\n", si) sf := serviceAdvertisementFull{ ServiceAdvertisement: si, Cancel: false, } data, err := s.translateStructToNetwork(MsgTypeServiceAdvertisement, sf) if err != nil { return err } s.flood(data, "") return nil } // Send advertisements for all advertised services. func (s *Netceptor) sendServiceAds() { ads := make([]ServiceAdvertisement, 0) s.listenerLock.RLock() for sn := range s.listenerRegistry { if s.listenerRegistry[sn].advertise { sa := ServiceAdvertisement{ NodeID: s.nodeID, Service: sn, Time: time.Now(), ConnType: s.listenerRegistry[sn].connType, Tags: s.listenerRegistry[sn].adTags, } if svcType, ok := sa.Tags["type"]; ok { if svcType == "Control Service" { s.workCommandsLock.RLock() if len(s.workCommands) > 0 { sa.WorkCommands = s.workCommands } s.workCommandsLock.RUnlock() } } ads = append(ads, sa) } } s.listenerLock.RUnlock() for i := range ads { err := s.sendServiceAd(&ads[i]) if err != nil { s.Logger.Error("Error sending service advertisement: %s\n", err) } } } // Watches connections and expires any that haven't seen traffic in too long. func (s *Netceptor) monitorConnectionAging() { for { select { case <-time.After(5 * time.Second): timedOut := make(map[string]context.CancelFunc, 0) s.connLock.RLock() for conn := range s.connections { connInfo := s.connections[conn] connInfo.lastReceivedLock.RLock() if time.Since(connInfo.lastReceivedData) > s.maxConnectionIdleTime { timedOut[conn] = s.connections[conn].CancelFunc } connInfo.lastReceivedLock.RUnlock() } s.connLock.RUnlock() for conn := range timedOut { s.Logger.Warning("Timing out connection %s, idle for the past %s\n", conn, s.maxConnectionIdleTime) timedOut[conn]() } case <-s.context.Done(): return } } } // Expires old updates from the seenUpdates table. func (s *Netceptor) expireSeenUpdates() { for { select { case <-time.After(s.seenUpdateExpireTime / 2): thresholdTime := time.Now().Add(-s.seenUpdateExpireTime) s.seenUpdatesLock.Lock() for id := range s.seenUpdates { if s.seenUpdates[id].Before(thresholdTime) { delete(s.seenUpdates, id) } } s.seenUpdatesLock.Unlock() case <-s.context.Done(): return } } } // Re-calculates the next-hop table based on current knowledge of the network. func (s *Netceptor) updateRoutingTable() { s.knownNodeLock.RLock() defer s.knownNodeLock.RUnlock() s.Logger.Debug("Re-calculating routing table\n") // Dijkstra's algorithm Q := priorityQueue.New() Q.Insert(s.nodeID, 0.0) cost := make(map[string]float64) prev := make(map[string]string) for node := range s.knownConnectionCosts { if node == s.nodeID { cost[node] = 0.0 } else { cost[node] = math.MaxFloat64 } prev[node] = "" Q.Insert(node, cost[node]) } for Q.Len() > 0 { nodeIf, _ := Q.Pop() node := fmt.Sprintf("%v", nodeIf) for neighbor, edgeCost := range s.knownConnectionCosts[node] { pathCost := cost[node] + edgeCost if pathCost < cost[neighbor] { cost[neighbor] = pathCost prev[neighbor] = node Q.Insert(neighbor, pathCost) } } } s.routingTableLock.Lock() defer s.routingTableLock.Unlock() s.routingTable = make(map[string]string) for dest := range s.knownConnectionCosts { p := dest for { if prev[p] == s.nodeID { s.routingTable[dest] = p break } else if prev[p] == "" { break } p = prev[p] } } s.routingPathCosts = cost routingTableCopy := make(map[string]string) for k, v := range s.routingTable { routingTableCopy[k] = v } go s.routingUpdateBroker.Publish(routingTableCopy) s.printRoutingTable() } // SubscribeRoutingUpdates subscribes for messages when the routing table is changed. func (s *Netceptor) SubscribeRoutingUpdates() chan map[string]string { iChan := s.routingUpdateBroker.Subscribe() uChan := make(chan map[string]string) go func() { for { select { case msgIf, ok := <-iChan: if !ok { close(uChan) return } msg, ok := msgIf.(map[string]string) if !ok { continue } select { case uChan <- msg: case <-s.context.Done(): close(uChan) return } case <-s.context.Done(): close(uChan) return } } }() return uChan } // Forwards a message to all neighbors, possibly excluding one. func (s *Netceptor) flood(message []byte, excludeConn string) { s.connLock.RLock() defer s.connLock.RUnlock() for conn, ci := range s.connections { if conn != excludeConn { go func(conn string, ci *connInfo) { select { case ci.WriteChan <- message: case <-ci.Context.Done(): s.Logger.Debug("connInfo for connection %s cancelled during flood write", conn) } }(conn, ci) } } } // GetServerTLSConfig retrieves a server TLS config by name. func (s *Netceptor) GetServerTLSConfig(name string) (*tls.Config, error) { if name == "" { return nil, nil } sc, ok := s.serverTLSConfigs[name] if !ok { return nil, fmt.Errorf("unknown TLS config %s", name) } return sc.Clone(), nil } // AddWorkCommand records a work command so it can be included in service announcements. func (s *Netceptor) AddWorkCommand(command string, secure bool) error { if command == "" { return fmt.Errorf("must provide a name") } wC := WorkCommand{WorkType: command, Secure: secure} s.workCommandsLock.Lock() defer s.workCommandsLock.Unlock() s.workCommands = append(s.workCommands, wC) return nil } // SetServerTLSConfig stores a server TLS config by name. func (s *Netceptor) SetServerTLSConfig(name string, config *tls.Config) error { if name == "" { return fmt.Errorf("must provide a name") } s.serverTLSConfigs[name] = config return nil } // ReceptorCertNameError is the error produced when Receptor certificate name verification fails. type ReceptorCertNameError struct { ValidNodes []string ExpectedNode string } func (rce ReceptorCertNameError) Error() string { if len(rce.ValidNodes) == 0 { return fmt.Sprintf("x509: certificate is not valid for any Receptor node IDs, but wanted to match %s", rce.ExpectedNode) } var plural string if len(rce.ValidNodes) > 1 { plural = "s" } return fmt.Sprintf("x509: certificate is valid for Receptor node ID%s %s, not %s", plural, strings.Join(rce.ValidNodes, ", "), rce.ExpectedNode) } // VerifyType indicates whether we are verifying a server or client. type VerifyType int const ( // VerifyServer indicates we are the client, verifying a server. VerifyServer VerifyType = 1 // VerifyClient indicates we are the server, verifying a client. VerifyClient = 2 ) // ExpectedHostnameType indicates whether we are connecting to a DNS hostname or a Receptor Node ID. type ExpectedHostnameType int const ( // ExpectedHostnameTypeDNS indicates we are expecting a DNS style hostname. ExpectedHostnameTypeDNS ExpectedHostnameType = 1 // ExpectedHostnameTypeReceptor indicates we are expecting a Receptor node ID. ExpectedHostnameTypeReceptor = 2 ) // ReceptorVerifyFunc generates a function that verifies a Receptor node ID. func ReceptorVerifyFunc(tlscfg *tls.Config, pinnedFingerprints [][]byte, expectedHostname string, expectedHostnameType ExpectedHostnameType, verifyType VerifyType, logger *logger.ReceptorLogger, ) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { if len(rawCerts) == 0 { logger.Error("RVF failed: peer certificate missing") return fmt.Errorf("RVF failed: peer certificate missing") } certs := make([]*x509.Certificate, len(rawCerts)) for i, asn1Data := range rawCerts { cert, err := x509.ParseCertificate(asn1Data) if err != nil { logger.Error("RVF failed to parse: %s", err) return fmt.Errorf("failed to parse certificate from server: " + err.Error()) //nolint:govet } certs[i] = cert } var opts x509.VerifyOptions switch verifyType { case VerifyServer: opts = x509.VerifyOptions{ Intermediates: x509.NewCertPool(), Roots: tlscfg.RootCAs, CurrentTime: time.Now(), KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, } if expectedHostnameType == ExpectedHostnameTypeDNS && expectedHostname != "" { opts.DNSName = expectedHostname } case VerifyClient: opts = x509.VerifyOptions{ Intermediates: x509.NewCertPool(), Roots: tlscfg.ClientCAs, CurrentTime: time.Now(), KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, } if expectedHostnameType == ExpectedHostnameTypeDNS && expectedHostname != "" { opts.DNSName = expectedHostname } default: logger.Error("RVF failed: invalid verification type: must be client or server") return fmt.Errorf("RVF failed: invalid verification type: must be client or server") } if len(pinnedFingerprints) > 0 { var sha224sum []byte var sha256sum []byte var sha384sum []byte var sha512sum []byte fingerprintOK := false for _, fing := range pinnedFingerprints { fingLenFound := false for _, s := range []struct { len int sum *[]byte sumFunc func(data []byte) []byte }{ {28, &sha224sum, func(data []byte) []byte { sum := sha256.Sum224(data) return sum[:] }}, {32, &sha256sum, func(data []byte) []byte { sum := sha256.Sum256(data) return sum[:] }}, {48, &sha384sum, func(data []byte) []byte { sum := sha512.Sum384(data) return sum[:] }}, {64, &sha512sum, func(data []byte) []byte { sum := sha512.Sum512(data) return sum[:] }}, } { if len(fing) == s.len { fingLenFound = true if *s.sum == nil { *s.sum = s.sumFunc(certs[0].Raw) } if bytes.Equal(fing, *s.sum) { fingerprintOK = true break } } } if !fingLenFound { logger.Error("RVF failed: pinned certificate must be sha224, sha256, sha384 or sha512") return fmt.Errorf("RVF failed: pinned certificate must be sha224, sha256, sha384 or sha512") } } if !fingerprintOK { logger.Error("RVF failed: presented certificate does not match any pinned fingerprint") return fmt.Errorf("RVF failed: presented certificate does not match any pinned fingerprint") } } for _, cert := range certs[1:] { opts.Intermediates.AddCert(cert) } var err error _, err = certs[0].Verify(opts) if err != nil { logger.Error("RVF failed verify: %s\nRootCAs: %v\nServerName: %s", err, tlscfg.RootCAs, tlscfg.ServerName) return err } if expectedHostnameType == ExpectedHostnameTypeReceptor { found, receptorNames, err := utils.ParseReceptorNamesFromCert(certs[0], expectedHostname, logger) if err != nil { return err } if !found { logger.Error("RVF ReceptorNameError: expected %s but found %s", expectedHostname, strings.Join(receptorNames, ", ")) return ReceptorCertNameError{ValidNodes: receptorNames, ExpectedNode: expectedHostname} } } return nil } } // GetClientTLSConfig retrieves a client TLS config by name. Supported host name types // are dns and receptor. func (s *Netceptor) GetClientTLSConfig(name string, expectedHostName string, expectedHostNameType ExpectedHostnameType) (*tls.Config, error) { if name == "" { return nil, nil } tlscfg, ok := s.clientTLSConfigs[name] if !ok { return nil, fmt.Errorf("unknown TLS config %s", name) } var pinnedFingerprints [][]byte pinnedFingerprints, ok = s.clientPinnedFingerprints[name] if !ok { return nil, fmt.Errorf("pinned fingerprints missing for %s", name) } tlscfg = tlscfg.Clone() if !tlscfg.InsecureSkipVerify { tlscfg.VerifyPeerCertificate = ReceptorVerifyFunc(tlscfg, pinnedFingerprints, expectedHostName, expectedHostNameType, VerifyServer, s.Logger) switch expectedHostNameType { case ExpectedHostnameTypeDNS: tlscfg.ServerName = expectedHostName case ExpectedHostnameTypeReceptor: tlscfg.InsecureSkipVerify = true } } return tlscfg, nil } // SetClientTLSConfig stores a client TLS config by name. func (s *Netceptor) SetClientTLSConfig(name string, config *tls.Config, pinnedFingerprints [][]byte) error { if name == "" { return fmt.Errorf("must provide a name") } s.clientTLSConfigs[name] = config s.clientPinnedFingerprints[name] = pinnedFingerprints return nil } // All-zero seed for deterministic highwayhash. var zerokey = make([]byte, 32) // Hash a name and add it to the lookup table. func (s *Netceptor) AddNameHash(name string) uint64 { if strings.EqualFold(name, "localhost") { name = s.nodeID } h, _ := highwayhash.New64(zerokey) _, _ = h.Write([]byte(name)) hv := h.Sum64() s.hashLock.Lock() defer s.hashLock.Unlock() if _, ok := s.nameHashes[hv]; !ok { s.nameHashes[hv] = name } return hv } // Looks up a name given a hash received from the network. func (s *Netceptor) GetNameFromHash(namehash uint64) (string, error) { s.hashLock.RLock() defer s.hashLock.RUnlock() name, ok := s.nameHashes[namehash] if !ok { return "", fmt.Errorf("hash not found") } return name, nil } // Given a string, returns a fixed-length buffer right-padded with null (0) bytes. func stringFromFixedLenBytes(bytes []byte) string { p := len(bytes) - 1 for p >= 0 && bytes[p] == 0 { p-- } if p < 0 { return "" } return string(bytes[:p+1]) } // Given a fixed-length buffer, returns a string excluding any null (0) bytes on the right. func fixedLenBytesFromString(s string, l int) []byte { bytes := make([]byte, l) copy(bytes, s) return bytes } // Translates an incoming message from wire protocol to MessageData object. func (s *Netceptor) translateDataToMessage(data []byte) (*MessageData, error) { if len(data) < 36 { return nil, fmt.Errorf("data too short to be a valid message") } fromNode, err := s.GetNameFromHash(binary.BigEndian.Uint64(data[4:12])) if err != nil { return nil, err } toNode, err := s.GetNameFromHash(binary.BigEndian.Uint64(data[12:20])) if err != nil { return nil, err } fromService := stringFromFixedLenBytes(data[20:28]) toService := stringFromFixedLenBytes(data[28:36]) md := &MessageData{ FromNode: fromNode, FromService: fromService, ToNode: toNode, ToService: toService, HopsToLive: data[1], Data: data[36:], } return md, nil } // Translates an outgoing message from a MessageData object to wire protocol. func (s *Netceptor) translateDataFromMessage(msg *MessageData) ([]byte, error) { buf := &bytes.Buffer{} buf.Write([]byte{MsgTypeData, msg.HopsToLive, 0, 0}) binary.Write(buf, binary.BigEndian, s.AddNameHash(msg.FromNode)) binary.Write(buf, binary.BigEndian, s.AddNameHash(msg.ToNode)) buf.Write(fixedLenBytesFromString(msg.FromService, 8)) buf.Write(fixedLenBytesFromString(msg.ToService, 8)) buf.Write(msg.Data) return buf.Bytes(), nil } // Forwards a message to its next hop. func (s *Netceptor) forwardMessage(md *MessageData) error { if md.HopsToLive <= 0 { if md.FromService != "unreach" { _ = s.sendUnreachable(md.FromNode, &UnreachableMessage{ FromNode: md.FromNode, ToNode: md.ToNode, FromService: md.FromService, ToService: md.ToService, Problem: ProblemExpiredInTransit, }) } return nil } s.routingTableLock.RLock() nextHop, ok := s.routingTable[md.ToNode] s.routingTableLock.RUnlock() if !ok { return fmt.Errorf("no route to node") } s.connLock.RLock() c, ok := s.connections[nextHop] s.connLock.RUnlock() if !ok || c.WriteChan == nil { return fmt.Errorf("no connection to next hop") } message, err := s.translateDataFromMessage(md) if err != nil { return err } // decrement HopsToLive message[1]-- s.Logger.Trace(" Forwarding data length %d via %s\n", len(md.Data), nextHop) select { case <-c.Context.Done(): return fmt.Errorf("connInfo cancelled while forwarding message") case c.WriteChan <- message: } return nil } // Generates and sends a message over the Receptor network, specifying HopsToLive. func (s *Netceptor) SendMessageWithHopsToLive(fromService string, toNode string, toService string, data []byte, hopsToLive byte) error { if len(fromService) > 8 || len(toService) > 8 { return fmt.Errorf("service name too long") } if strings.EqualFold(toNode, "localhost") { toNode = s.nodeID } md := &MessageData{ FromNode: s.nodeID, FromService: fromService, ToNode: toNode, ToService: toService, HopsToLive: hopsToLive, Data: data, } s.Logger.Trace("--- Sending data length %d from %s:%s to %s:%s\n", len(md.Data), md.FromNode, md.FromService, md.ToNode, md.ToService) return s.handleMessageData(md) } // Generates and sends a message over the Receptor network. func (s *Netceptor) sendMessage(fromService string, toNode string, toService string, data []byte) error { return s.SendMessageWithHopsToLive(fromService, toNode, toService, data, s.maxForwardingHops) } // Returns an unused random service name to use as the equivalent of a TCP/IP ephemeral port number. func (s *Netceptor) GetEphemeralService() string { s.listenerLock.RLock() defer s.listenerLock.RUnlock() for { service := randstr.RandomString(8) _, ok := s.reservedServices[service] if ok { continue } _, ok = s.listenerRegistry[service] if ok { continue } return service } } // Prints the routing table. // The caller must already hold at least a read lock on known connections and routing. func (s *Netceptor) printRoutingTable() { logLevel, _ := s.Logger.GetLogLevelByName("Info") if s.Logger.GetLogLevel() < logLevel { return } s.Logger.Log(logLevel, "Known Connections:\n") for conn := range s.knownConnectionCosts { sb := &strings.Builder{} _, _ = fmt.Fprintf(sb, " %s: ", conn) for peer := range s.knownConnectionCosts[conn] { _, _ = fmt.Fprintf(sb, "%s(%.2f) ", peer, s.knownConnectionCosts[conn][peer]) } _, _ = fmt.Fprintf(sb, "\n") s.Logger.Log(logLevel, sb.String()) //nolint:govet } s.Logger.Log(logLevel, "Routing Table:\n") for node := range s.routingTable { s.Logger.Log(logLevel, " %s via %s\n", node, s.routingTable[node]) } } // Constructs a routing update message. func (s *Netceptor) makeRoutingUpdate(suspectedDuplicate uint64) *routingUpdate { s.connLock.RLock() defer s.connLock.RUnlock() s.sequenceLock.Lock() defer s.sequenceLock.Unlock() s.sequence++ conns := make(map[string]float64) for conn := range s.connections { conns[conn] = s.connections[conn].Cost } update := &routingUpdate{ NodeID: s.nodeID, UpdateID: randstr.RandomString(8), UpdateEpoch: s.epoch, UpdateSequence: s.sequence, Connections: conns, ForwardingNode: s.nodeID, SuspectedDuplicate: suspectedDuplicate, } return update } // Translates an arbitrary struct to a network message. func (s *Netceptor) translateStructToNetwork(messageType byte, content interface{}) ([]byte, error) { contentBytes, err := json.Marshal(content) if err != nil { return nil, err } return append([]byte{messageType}, contentBytes...), nil } // Sends a routing update to all neighbors. func (s *Netceptor) sendRoutingUpdate(suspectedDuplicate uint64) { s.connLock.RLock() connCount := len(s.connections) s.connLock.RUnlock() if connCount == 0 { return } ru := s.makeRoutingUpdate(suspectedDuplicate) sb := make([]string, 0) for conn := range ru.Connections { sb = append(sb, fmt.Sprintf("%s(%.2f)", conn, ru.Connections[conn])) } if suspectedDuplicate == 0 { s.Logger.Debug("Sending routing update %s. Connections: %s\n", ru.UpdateID, strings.Join(sb, " ")) } else { s.Logger.Warning("Sending duplicate node notification %s. Connections: %s\n", ru.UpdateID, strings.Join(sb, " ")) } message, err := s.translateStructToNetwork(MsgTypeRoute, ru) if err != nil { return } s.flood(message, "") } // Processes a routing update received from a connection. func (s *Netceptor) handleRoutingUpdate(ri *routingUpdate, recvConn string) { if ri.NodeID == "" { // Our peer is still trying to initialize return } if ri.NodeID == s.nodeID { if ri.UpdateEpoch == s.epoch { return } if ri.SuspectedDuplicate == s.epoch { // We are the duplicate! s.Logger.Error("We are a duplicate node with ID %s and epoch %d. Shutting down.\n", s.nodeID, s.epoch) s.Shutdown() return } if ri.UpdateEpoch > s.epoch { // Update has our node ID but a newer epoch - so if clocks are in sync they are a duplicate s.Logger.SanitizedError("Duplicate node ID %s detected via %s\n", ri.NodeID, recvConn) // Send routing update noting our suspicion s.sendRoutingUpdate(ri.UpdateEpoch) return } return } s.seenUpdatesLock.Lock() _, ok := s.seenUpdates[ri.UpdateID] if ok { s.seenUpdatesLock.Unlock() return } s.seenUpdates[ri.UpdateID] = time.Now() s.seenUpdatesLock.Unlock() if ri.SuspectedDuplicate != 0 { s.Logger.SanitizedWarning("Node %s with epoch %d sent update %s suspecting a duplicate node with epoch %d\n", ri.NodeID, ri.UpdateEpoch, ri.UpdateID, ri.SuspectedDuplicate) s.knownNodeLock.Lock() ni, ok := s.knownNodeInfo[ri.NodeID] if ok { if ni.Epoch == ri.SuspectedDuplicate { s.knownNodeInfo[ri.NodeID].Epoch = ri.UpdateEpoch s.knownNodeInfo[ri.NodeID].Sequence = ri.UpdateSequence } } s.knownNodeLock.Unlock() } else { s.Logger.SanitizedDebug("Received routing update %s from %s via %s\n", ri.UpdateID, ri.NodeID, recvConn) s.knownNodeLock.Lock() ni, ok := s.knownNodeInfo[ri.NodeID] if ok { if ri.UpdateEpoch < ni.Epoch { s.knownNodeLock.Unlock() return } if ri.UpdateEpoch == ni.Epoch && ri.UpdateSequence <= ni.Sequence { s.knownNodeLock.Unlock() return } } else { select { case <-s.context.Done(): s.knownNodeLock.Unlock() return case s.sendRouteFloodChan <- 0: } ni = &nodeInfo{} } ni.Epoch = ri.UpdateEpoch ni.Sequence = ri.UpdateSequence changed := false if !reflect.DeepEqual(ri.Connections, s.knownConnectionCosts[ri.NodeID]) { changed = true } _, ok = s.knownNodeInfo[ri.NodeID] if !ok { _ = s.AddNameHash(ri.NodeID) } s.knownNodeInfo[ri.NodeID] = ni if changed { s.knownConnectionCosts[ri.NodeID] = make(map[string]float64) for k, v := range ri.Connections { s.knownConnectionCosts[ri.NodeID][k] = v } for conn := range s.knownConnectionCosts { if conn == s.nodeID { continue } _, ok = ri.Connections[conn] if !ok { delete(s.knownConnectionCosts[conn], ri.NodeID) } } } s.knownNodeLock.Unlock() if changed { select { case <-s.context.Done(): return case s.updateRoutingTableChan <- 100 * time.Millisecond: } } } ri.ForwardingNode = s.nodeID message, err := s.translateStructToNetwork(MsgTypeRoute, ri) if err != nil { return } s.flood(message, recvConn) } // Handles a ping request. func (s *Netceptor) handlePing(md *MessageData) error { return s.sendMessage("ping", md.FromNode, md.FromService, []byte{}) } // Handles an unreachable response. func (s *Netceptor) handleUnreachable(md *MessageData) error { unrMsg := UnreachableMessage{} err := json.Unmarshal(md.Data, &unrMsg) if err != nil { return err } unrData := UnreachableNotification{ UnreachableMessage: unrMsg, ReceivedFromNode: md.FromNode, } s.Logger.Warning("Received unreachable message from %s (service %s) to %s (service %s): ttl %v, data %s", md.FromNode, md.FromService, md.ToNode, md.ToService, md.HopsToLive, unrMsg.Problem) return s.unreachableBroker.Publish(unrData) } // Sends an unreachable response. func (s *Netceptor) sendUnreachable(toNode string, message *UnreachableMessage) error { bytes, err := json.Marshal(message) if err != nil { return err } err = s.sendMessage("unreach", toNode, "unreach", bytes) if err != nil { return err } return nil } // Dispatches a message to a reserved service. Returns true if handled, false otherwise. func (s *Netceptor) dispatchReservedService(md *MessageData) (bool, error) { svc, ok := s.reservedServices[md.ToService] if ok { return true, svc(md) } return false, nil } // Handles incoming data and dispatches it to a service listener. func (s *Netceptor) handleMessageData(md *MessageData) error { // Check firewall rules for this packet s.firewallLock.RLock() result := FirewallResultAccept for _, rule := range s.firewallRules { result = rule(md) if result != FirewallResultContinue { break } } s.firewallLock.RUnlock() switch result { case FirewallResultAccept: // do nothing case FirewallResultDrop: return nil case FirewallResultReject: if md.FromService != "unreach" { _ = s.sendUnreachable(md.FromNode, &UnreachableMessage{ FromNode: md.FromNode, ToNode: md.ToNode, FromService: md.FromService, ToService: md.ToService, Problem: ProblemRejected, }) } return nil } // If the destination is local, then dispatch the message to a service if md.ToNode == s.nodeID { handled, err := s.dispatchReservedService(md) if err != nil { return err } if handled { return nil } s.listenerLock.RLock() pc, ok := s.listenerRegistry[md.ToService] if !ok || pc.context.Err() != nil { s.listenerLock.RUnlock() if md.FromNode == s.nodeID { return fmt.Errorf(ProblemServiceUnknown) //nolint:staticcheck } _ = s.sendUnreachable(md.FromNode, &UnreachableMessage{ FromNode: md.FromNode, ToNode: md.ToNode, FromService: md.FromService, ToService: md.ToService, Problem: ProblemServiceUnknown, }) return nil } s.listenerLock.RUnlock() select { case <-pc.context.Done(): close(pc.recvChan) return nil case pc.recvChan <- md: } return nil } // The destination is non-local, so forward the message. return s.forwardMessage(md) } // GetServiceInfo returns the advertising info, if any, for a service on a node. func (s *Netceptor) GetServiceInfo(nodeID string, service string) (*ServiceAdvertisement, bool) { s.serviceAdsLock.RLock() defer s.serviceAdsLock.RUnlock() n, ok := s.serviceAdsReceived[nodeID] if !ok { return nil, false } svc, ok := n[service] if !ok { return nil, false } svcCopy := *svc return &svcCopy, true } // Handles an incoming service advertisement. func (s *Netceptor) handleServiceAdvertisement(data []byte, receivedFrom string) error { if data[0] != MsgTypeServiceAdvertisement { return fmt.Errorf("message is the wrong type") } si := &serviceAdvertisementFull{} err := json.Unmarshal(data[1:], si) if err != nil { return err } s.Logger.SanitizedDebug("Received service advertisement from %s\n", si.NodeID) s.serviceAdsLock.Lock() defer s.serviceAdsLock.Unlock() n, ok := s.serviceAdsReceived[si.NodeID] if !ok { n = make(map[string]*ServiceAdvertisement) s.serviceAdsReceived[si.NodeID] = n } curSvc, keepCur := n[si.Service] if keepCur { if si.Time.After(curSvc.Time) { keepCur = false } } if keepCur { return nil } if si.Cancel { delete(s.serviceAdsReceived[si.NodeID], si.Service) if len(s.serviceAdsReceived[si.NodeID]) == 0 { delete(s.serviceAdsReceived, si.NodeID) } } else { s.serviceAdsReceived[si.NodeID][si.Service] = si.ServiceAdvertisement } s.flood(data, receivedFrom) return nil } // Goroutine to send data from the backend to the connection's ReadChan. func (ci *connInfo) protoReader(sess BackendSession) { for { buf, err := sess.Recv(1 * time.Second) if err == ErrTimeout { continue } if err != nil { if err != io.EOF && ci.Context.Err() == nil { ci.logger.Error("Backend receiving error %s\n", err) } ci.CancelFunc() return } ci.lastReceivedLock.Lock() ci.lastReceivedData = time.Now() ci.lastReceivedLock.Unlock() select { case <-ci.Context.Done(): return case ci.ReadChan <- buf: } } } // Goroutine to send data from the connection's WriteChan to the backend. func (ci *connInfo) protoWriter(sess BackendSession) { for { select { case <-ci.Context.Done(): return case message, more := <-ci.WriteChan: if !more { return } err := sess.Send(message) if err != nil { if ci.Context.Err() == nil { ci.logger.Error("Backend sending error %s\n", err) } ci.CancelFunc() return } } } } // Continuously sends routing updates to let the other end know who we are on initial connection. func (s *Netceptor) sendInitialConnectMessage(ci *connInfo, initDoneChan chan bool) { count := 0 for { ri, err := s.translateStructToNetwork(MsgTypeRoute, s.makeRoutingUpdate(0)) if err != nil { s.Logger.Error("Error Sending initial connection message: %s\n", err) return } s.Logger.Debug("Sending initial connection message\n") select { case ci.WriteChan <- ri: case <-ci.Context.Done(): return } count++ if count > 10 { s.Logger.Warning("Giving up on connection initialization\n") ci.CancelFunc() return } select { case <-s.context.Done(): return case <-time.After(1 * time.Second): continue case <-ci.Context.Done(): return case <-initDoneChan: s.Logger.Debug("Stopping initial updates\n") return } } } func (s *Netceptor) sendRejectMessage(ci *connInfo) { rejMsg, err := s.translateStructToNetwork(MsgTypeReject, make([]string, 0)) if err == nil { select { case <-ci.Context.Done(): case ci.WriteChan <- rejMsg: } } } func (s *Netceptor) sendAndLogConnectionRejection(remoteNodeID string, ci *connInfo, reason string) error { s.sendRejectMessage(ci) return fmt.Errorf("%s: rejected connection with node %s because %s", s.nodeID, remoteNodeID, reason) } func (s *Netceptor) removeConnection(remoteNodeID string) { if remoteNodeID != "" { s.connLock.Lock() delete(s.connections, remoteNodeID) s.connLock.Unlock() s.knownNodeLock.Lock() _, ok := s.knownConnectionCosts[remoteNodeID] if ok { delete(s.knownConnectionCosts[remoteNodeID], s.nodeID) } _, ok = s.knownConnectionCosts[s.nodeID] if ok { delete(s.knownConnectionCosts[s.nodeID], remoteNodeID) } s.knownNodeLock.Unlock() } } // Main Netceptor protocol loop. func (s *Netceptor) runProtocol(ctx context.Context, sess BackendSession, bi *BackendInfo) error { if bi.connectionCost <= 0.0 { return fmt.Errorf("connection cost must be positive") } established := false remoteEstablished := false remoteNodeID := "" connectionCost := bi.connectionCost defer func() { _ = sess.Close() if established { select { case s.sendRouteFloodChan <- 0: case <-ctx.Done(): // ctx is a child of s.context return } select { case s.updateRoutingTableChan <- 0: case <-ctx.Done(): return } } }() ci := &connInfo{ ReadChan: make(chan []byte), WriteChan: make(chan []byte), Cost: connectionCost, lastReceivedLock: &sync.RWMutex{}, logger: s.Logger, } ci.Context, ci.CancelFunc = context.WithCancel(ctx) go ci.protoReader(sess) go ci.protoWriter(sess) initDoneChan := make(chan bool) go s.sendInitialConnectMessage(ci, initDoneChan) for { select { case data := <-ci.ReadChan: msgType := data[0] if established { switch msgType { case MsgTypeData: message, err := s.translateDataToMessage(data) if err != nil { s.Logger.Error("Error translating data to message struct: %s\n", err) continue } s.Logger.Trace("--- Received data length %d from %s:%s to %s:%s via %s\n", len(message.Data), message.FromNode, message.FromService, message.ToNode, message.ToService, remoteNodeID) err = s.handleMessageData(message) if err != nil { s.Logger.Error("Error handling message data: %s\n", err) } case MsgTypeRoute: ri := &routingUpdate{} err := json.Unmarshal(data[1:], ri) if err != nil { s.Logger.Error("Error unpacking routing update: %s\n", err) continue } if ri.ForwardingNode != remoteNodeID { s.removeConnection(remoteNodeID) return s.sendAndLogConnectionRejection(remoteNodeID, ci, fmt.Sprintf("remote node ID changed unexpectedly from %s to %s", remoteNodeID, ri.NodeID)) } if ri.NodeID == remoteNodeID { // This is an update from our direct connection, so do some extra verification remoteCost, ok := ri.Connections[s.nodeID] if !ok { if remoteEstablished { s.removeConnection(remoteNodeID) return s.sendAndLogConnectionRejection(remoteNodeID, ci, "remote node no longer lists us as a connection") } // This is a late initialization request from the remote node, so don't process it as a routing update. continue } remoteEstablished = true if ok && remoteCost != connectionCost { s.removeConnection(remoteNodeID) return s.sendAndLogConnectionRejection(remoteNodeID, ci, "we disagree about the connection cost") } } s.handleRoutingUpdate(ri, remoteNodeID) case MsgTypeServiceAdvertisement: err := s.handleServiceAdvertisement(data, remoteNodeID) if err != nil { s.Logger.Error("Error handling service advertisement: %s\n", err) continue } case MsgTypeReject: s.Logger.Warning("Received a rejection message from peer.") s.removeConnection(remoteNodeID) return fmt.Errorf("remote node rejected the connection") default: s.Logger.Warning("Unknown message type\n") } } else { // Connection not established if msgType == MsgTypeRoute { ri := &routingUpdate{} err := json.Unmarshal(data[1:], ri) if err != nil { s.Logger.Error("Error unpacking routing update: %s\n", err) continue } remoteNodeID = ri.ForwardingNode // Decide whether the remote node is acceptable if remoteNodeID == s.nodeID { return s.sendAndLogConnectionRejection(remoteNodeID, ci, "it tried to connect using our own node ID") } remoteNodeAccepted := true if bi.allowedPeers != nil { remoteNodeAccepted = false for i := range bi.allowedPeers { if bi.allowedPeers[i] == remoteNodeID { remoteNodeAccepted = true break } } } if !remoteNodeAccepted { return s.sendAndLogConnectionRejection(remoteNodeID, ci, "it is not in the allowed peers list") } remoteNodeCost, ok := bi.nodeCost[remoteNodeID] if ok { ci.Cost = remoteNodeCost connectionCost = remoteNodeCost } s.connLock.Lock() for conn := range s.connections { if remoteNodeID == conn { remoteNodeAccepted = false break } } if !remoteNodeAccepted { s.connLock.Unlock() return s.sendAndLogConnectionRejection(remoteNodeID, ci, "it connected using a node ID we are already connected to") } s.connections[remoteNodeID] = ci s.connLock.Unlock() // Establish the connection select { case initDoneChan <- true: case <-ctx.Done(): s.removeConnection(remoteNodeID) return nil case <-ci.Context.Done(): s.removeConnection(remoteNodeID) return nil } s.Logger.SanitizedInfo("Connection established with %s\n", remoteNodeID) s.AddNameHash(remoteNodeID) s.knownNodeLock.Lock() _, ok = s.knownConnectionCosts[s.nodeID] if !ok { s.knownConnectionCosts[s.nodeID] = make(map[string]float64) } s.knownConnectionCosts[s.nodeID][remoteNodeID] = connectionCost _, ok = s.knownConnectionCosts[remoteNodeID] if !ok { s.knownConnectionCosts[remoteNodeID] = make(map[string]float64) } s.knownConnectionCosts[remoteNodeID][s.nodeID] = connectionCost s.knownNodeLock.Unlock() select { case s.sendRouteFloodChan <- 0: case <-ctx.Done(): s.removeConnection(remoteNodeID) return nil case <-ci.Context.Done(): s.removeConnection(remoteNodeID) return nil } select { case s.updateRoutingTableChan <- 0: case <-ctx.Done(): return nil case <-ci.Context.Done(): return nil } established = true } else if msgType == MsgTypeReject { s.Logger.Warning("Received a rejection message from peer.") s.removeConnection(remoteNodeID) return fmt.Errorf("remote node rejected the connection") } } case <-ci.Context.Done(): s.removeConnection(remoteNodeID) return nil } } } receptor-1.5.3/pkg/netceptor/netceptor_test.go000066400000000000000000000505541475465740700215540ustar00rootroot00000000000000package netceptor import ( "context" "fmt" "log" "os" "strings" "sync" "testing" "time" "github.com/ansible/receptor/tests/utils" "github.com/prep/socketpair" "github.com/quic-go/quic-go" "github.com/quic-go/quic-go/logging" ) type logWriter struct { t *testing.T node1count int node1Lock sync.RWMutex node2count int node2Lock sync.RWMutex } func (lw *logWriter) Write(p []byte) (n int, err error) { s := strings.Trim(string(p), "\n") if strings.HasPrefix(s, "ERROR") { if !strings.Contains(s, "maximum number of forwarding hops") { fmt.Print(s) lw.t.Fatal(s) return } } else if strings.HasPrefix(s, "TRACE") { if strings.Contains(s, "via node1") { lw.node1Lock.Lock() lw.node1count++ lw.node1Lock.Unlock() } else if strings.Contains(s, "via node2") { lw.node2Lock.Lock() lw.node2count++ lw.node2Lock.Unlock() } } lw.t.Log(s) return len(p), nil } func TestHopCountLimit(t *testing.T) { lw := &logWriter{ t: t, } log.SetOutput(lw) defer func() { log.SetOutput(os.Stdout) }() // Create two Netceptor nodes using external backends n1 := New(context.Background(), "node1") n1.Logger.SetOutput(lw) n1.Logger.SetShowTrace(true) b1, err := NewExternalBackend() if err != nil { t.Fatal(err) } err = n1.AddBackend(b1) if err != nil { t.Fatal(err) } n2 := New(context.Background(), "node2") n2.Logger.SetOutput(lw) n2.Logger.SetShowTrace(true) b2, err := NewExternalBackend() if err != nil { t.Fatal(err) } err = n2.AddBackend(b2) if err != nil { t.Fatal(err) } // Create a Unix socket pair and use it to connect the backends c1, c2, err := socketpair.New("unix") if err != nil { t.Fatal(err) } // Subscribe for node list updates nCh1 := n1.SubscribeRoutingUpdates() nCh2 := n2.SubscribeRoutingUpdates() // Connect the two nodes b1.NewConnection(MessageConnFromNetConn(c1), true) b2.NewConnection(MessageConnFromNetConn(c2), true) // Wait for the nodes to establish routing to each other var routes1 map[string]string var routes2 map[string]string timeout, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() for { select { case <-timeout.Done(): t.Fatal("timed out waiting for nodes to connect") case routes1 = <-nCh1: case routes2 = <-nCh2: } if routes1 != nil && routes2 != nil { _, ok := routes1["node2"] if ok { _, ok := routes2["node1"] if ok { break } } } } // Inject a fake node3 that both nodes think the other node has a route to n1.AddNameHash("node3") n1.routingTableLock.Lock() n1.routingTable["node3"] = "node2" n1.routingTableLock.Unlock() n2.AddNameHash("node3") n2.routingTableLock.Lock() n2.routingTable["node3"] = "node1" n2.routingTableLock.Unlock() // Send a message to node3, which should bounce back and forth until max hops is reached pc, err := n1.ListenPacket("test") if err != nil { t.Fatal(err) } _, err = pc.WriteTo([]byte("hello"), n1.NewAddr("node3", "test")) if err != nil { t.Fatal(err) } // If the hop count limit is not working, the connections will never become inactive timeout, cancel = context.WithTimeout(context.Background(), 2*time.Second) defer cancel() for { c, ok := n1.connections["node2"] if !ok { t.Fatal("node2 disappeared from node1's connections") } c.lastReceivedLock.RLock() lastReceivedData := c.lastReceivedData c.lastReceivedLock.RUnlock() if time.Since(lastReceivedData) > 250*time.Millisecond { break } select { case <-timeout.Done(): t.Fatal(timeout.Err()) case <-time.After(125 * time.Millisecond): } } // Make sure we actually succeeded in creating a routing loop lw.node1Lock.RLock() node1Count := lw.node1count lw.node1Lock.RUnlock() lw.node2Lock.RLock() node2Count := lw.node2count lw.node2Lock.RUnlock() if node1Count < 10 || node2Count < 10 { t.Fatal("test did not create a routing loop") } n1.Shutdown() n2.Shutdown() n1.BackendWait() n2.BackendWait() } func TestLotsOfPings(t *testing.T) { numBackboneNodes := 3 numLeafNodesPerBackbone := 3 nodes := []*Netceptor{} for i := 0; i < numBackboneNodes; i++ { nodes = append(nodes, New(context.Background(), fmt.Sprintf("backbone_%d", i))) } for i := 0; i < numBackboneNodes; i++ { for j := 0; j < i; j++ { b1, err := NewExternalBackend() if err == nil { err = nodes[i].AddBackend(b1) } if err != nil { t.Fatal(err) } b2, err := NewExternalBackend() if err == nil { err = nodes[j].AddBackend(b2) } if err != nil { t.Fatal(err) } c1, c2, err := socketpair.New("unix") if err != nil { t.Fatal(err) } b1.NewConnection(MessageConnFromNetConn(c1), true) b2.NewConnection(MessageConnFromNetConn(c2), true) } } for i := 0; i < numBackboneNodes; i++ { for j := 0; j < numLeafNodesPerBackbone; j++ { b1, err := NewExternalBackend() if err == nil { err = nodes[i].AddBackend(b1) } if err != nil { t.Fatal(err) } newNode := New(context.Background(), fmt.Sprintf("leaf_%d_%d", i, j)) nodes = append(nodes, newNode) b2, err := NewExternalBackend() if err == nil { err = newNode.AddBackend(b2) } if err != nil { t.Fatal(err) } c1, c2, err := socketpair.New("unix") if err != nil { t.Fatal(err) } b1.NewConnection(MessageConnFromNetConn(c1), true) b2.NewConnection(MessageConnFromNetConn(c2), true) } } responses := make([][]bool, len(nodes)) responseLocks := make([][]sync.RWMutex, len(nodes)) for i := range nodes { responses[i] = make([]bool, len(nodes)) responseLocks[i] = make([]sync.RWMutex, len(nodes)) } errorChan := make(chan error) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) wg := sync.WaitGroup{} for i := range nodes { for j := range nodes { // Need to make copies of these variables to avoid a data race i2 := i j2 := j wg.Add(2) go func(sender *Netceptor, recipient *Netceptor, response *bool) { pc, err := sender.ListenPacket("") if err != nil { errorChan <- err return } go func() { defer wg.Done() for { buf := make([]byte, 1024) err := pc.SetReadDeadline(time.Now().Add(1 * time.Second)) if err != nil { errorChan <- fmt.Errorf("error in SetReadDeadline: %s", err) return } _, addr, err := pc.ReadFrom(buf) if ctx.Err() != nil { return } if err != nil { continue } ncAddr, ok := addr.(Addr) if !ok { errorChan <- fmt.Errorf("addr was not a Netceptor address") return } if ncAddr.node != recipient.nodeID { errorChan <- fmt.Errorf("received response from wrong node") return } t.Logf("%s received response from %s", sender.nodeID, recipient.nodeID) responseLocks[i2][j2].Lock() *response = true responseLocks[i2][j2].Unlock() } }() go func() { defer wg.Done() buf := []byte("test") rAddr := sender.NewAddr(recipient.nodeID, "ping") for { _, _ = pc.WriteTo(buf, rAddr) select { case <-ctx.Done(): return case <-time.After(100 * time.Millisecond): } responseLocks[i2][j2].RLock() r := *response responseLocks[i2][j2].RUnlock() if r { return } } }() }(nodes[i], nodes[j], &responses[i][j]) } } wg.Add(1) go func() { defer wg.Done() for { good := true for i := range nodes { for j := range nodes { responseLocks[i][j].RLock() r := responses[i][j] responseLocks[i][j].RUnlock() if !r { good = false break } } if !good { break } } if good { t.Log("all pings received") cancel() return } select { case <-ctx.Done(): return case <-time.After(100 * time.Millisecond): } } }() t.Log("waiting for done") select { case err := <-errorChan: t.Fatal(err) case <-ctx.Done(): } t.Log("waiting for waitgroup") wg.Wait() t.Log("shutting down") for i := range nodes { go nodes[i].Shutdown() } t.Log("waiting for backends") for i := range nodes { nodes[i].BackendWait() } } func TestDuplicateNodeDetection(t *testing.T) { // Create Netceptor nodes netsize := 4 nodes := make([]*Netceptor, netsize) backends := make([]*ExternalBackend, netsize) routingChans := make([]chan map[string]string, netsize) logWriter := utils.NewTestLogWriter() defer func() { t.Log(logWriter.String()) }() for i := 0; i < netsize; i++ { nodes[i] = New(context.Background(), fmt.Sprintf("node%d", i)) nodes[i].Logger.SetOutput(logWriter) routingChans[i] = nodes[i].SubscribeRoutingUpdates() var err error backends[i], err = NewExternalBackend() if err != nil { t.Fatal(err) } err = nodes[i].AddBackend(backends[i]) if err != nil { t.Fatal(err) } } // Connect the nodes in a circular network for i := 0; i < netsize; i++ { c1, c2, err := socketpair.New("unix") if err != nil { t.Fatal(err) } peer := (i + 1) % netsize backends[i].NewConnection(MessageConnFromNetConn(c1), true) backends[peer].NewConnection(MessageConnFromNetConn(c2), true) } // Wait for the nodes to establish routing to each other knownRoutes := make([]map[string]string, netsize) knownRoutesLock := sync.RWMutex{} for i := 0; i < netsize; i++ { knownRoutes[i] = make(map[string]string) go func(i int) { for { select { case routes := <-routingChans[i]: knownRoutesLock.Lock() knownRoutes[i] = routes knownRoutesLock.Unlock() case <-nodes[i].context.Done(): return } } }(i) } timeout, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() for { select { case <-timeout.Done(): t.Fatal("timed out waiting for nodes to connect") case <-time.After(200 * time.Millisecond): } for i := 0; i < netsize; i++ { peer := (i + 1) % 3 knownRoutesLock.RLock() _, ok := knownRoutes[i][fmt.Sprintf("node%d", peer)] if !ok { knownRoutesLock.RUnlock() continue } _, ok = knownRoutes[peer][fmt.Sprintf("node%d", i)] if !ok { knownRoutesLock.RUnlock() continue } knownRoutesLock.RUnlock() } break } // Make sure the new node gets a more recent timestamp than the old one time.Sleep(1 * time.Second) // Create and connect a new node with a duplicate name n := New(context.Background(), "node0") n.Logger.SetOutput(logWriter) n.Logger.Info("Duplicate node0 has epoch %d\n", n.epoch) b, err := NewExternalBackend() if err != nil { t.Fatal(err) } err = n.AddBackend(b) if err != nil { t.Fatal(err) } c1, c2, err := socketpair.New("unix") if err != nil { t.Fatal(err) } b.NewConnection(MessageConnFromNetConn(c1), true) backends[netsize/2].NewConnection(MessageConnFromNetConn(c2), true) // Wait for duplicate node to self-terminate backendCloseChan := make(chan struct{}) go func() { n.BackendWait() close(backendCloseChan) }() select { case <-backendCloseChan: case <-time.After(10 * time.Second): t.Fatal("timed out waiting for duplicate node to terminate") } // Force close the connection to the connected node _ = c2.Close() // Shut down the rest of the network for i := 0; i < netsize; i++ { nodes[i].Shutdown() } for i := 0; i < netsize; i++ { nodes[i].BackendWait() } if !strings.Contains(logWriter.String(), "We are a duplicate node") { t.Fatalf("Did not find expected log message from duplicate node.") } } func TestFirewalling(t *testing.T) { lw := &logWriter{ t: t, } log.SetOutput(lw) defer func() { log.SetOutput(os.Stdout) }() // Create two Netceptor nodes using external backends n1 := New(context.Background(), "node1") n1.Logger.SetOutput(lw) n1.Logger.SetShowTrace(true) b1, err := NewExternalBackend() if err != nil { t.Fatal(err) } err = n1.AddBackend(b1) if err != nil { t.Fatal(err) } n2 := New(context.Background(), "node2") n2.Logger.SetOutput(lw) n2.Logger.SetShowTrace(true) b2, err := NewExternalBackend() if err != nil { t.Fatal(err) } err = n2.AddBackend(b2) if err != nil { t.Fatal(err) } // Add a firewall to node 1 that rejects messages whose data is "bad" err = n1.AddFirewallRules([]FirewallRuleFunc{ func(md *MessageData) FirewallResult { if string(md.Data) == "bad" { return FirewallResultReject } return FirewallResultAccept }, }, true) if err != nil { t.Fatal(err) } // Create a Unix socket pair and use it to connect the backends c1, c2, err := socketpair.New("unix") if err != nil { t.Fatal(err) } // Subscribe for node list updates nCh1 := n1.SubscribeRoutingUpdates() nCh2 := n2.SubscribeRoutingUpdates() // Connect the two nodes b1.NewConnection(MessageConnFromNetConn(c1), true) b2.NewConnection(MessageConnFromNetConn(c2), true) // Wait for the nodes to establish routing to each other var routes1 map[string]string var routes2 map[string]string timeout, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() for { select { case <-timeout.Done(): t.Fatal("timed out waiting for nodes to connect") case routes1 = <-nCh1: case routes2 = <-nCh2: } if routes1 != nil && routes2 != nil { _, ok := routes1["node2"] if ok { _, ok := routes2["node1"] if ok { break } } } } // Set up packet connection pc1, err := n1.ListenPacket("testsvc") if err != nil { t.Fatal(err) } pc2, err := n2.ListenPacket("") if err != nil { t.Fatal(err) } // Subscribe for unreachable messages doneChan := make(chan struct{}) unreach2chan := pc2.SubscribeUnreachable(doneChan) // Save received unreachable messages to a variable var lastUnreachMsg *UnreachableNotification lastUnreachLock := sync.RWMutex{} go func() { <-timeout.Done() close(doneChan) }() go func() { for unreach := range unreach2chan { unreach := unreach lastUnreachLock.Lock() lastUnreachMsg = &unreach lastUnreachLock.Unlock() } }() // Send a good message lastUnreachMsg = nil _, err = pc2.WriteTo([]byte("good"), n2.NewAddr("node1", "testsvc")) if err != nil { t.Fatal(err) } err = pc1.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) if err != nil { t.Fatal(err) } buf := make([]byte, 256) n, _, err := pc1.ReadFrom(buf) if err != nil { t.Fatal(err) } if string(buf[:n]) != "good" { t.Fatal("incorrect message received") } time.Sleep(100 * time.Millisecond) if lastUnreachMsg != nil { t.Fatalf("unexpected unreachable message received: %v", lastUnreachMsg) } // Send a bad message _, err = pc2.WriteTo([]byte("bad"), n2.NewAddr("node1", "testsvc")) if err != nil { t.Fatal(err) } err = pc1.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) if err != nil { t.Fatal(err) } _, _, err = pc1.ReadFrom(buf) if err != ErrTimeout { if err == nil { err = fmt.Errorf("received message that should have been firewalled") } t.Fatal(err) } time.Sleep(100 * time.Millisecond) lastUnreachLock.RLock() lum := lastUnreachMsg //nolint:ifshort lastUnreachLock.RUnlock() if lum == nil { t.Fatal("did not receive expected unreachable message") } // Shut down the network n1.Shutdown() n2.Shutdown() n1.BackendWait() n2.BackendWait() } func TestAllowedPeers(t *testing.T) { lw := &logWriter{ t: t, } log.SetOutput(lw) defer func() { log.SetOutput(os.Stdout) }() // Create two Netceptor nodes using external backends n1 := New(context.Background(), "node1") n1.Logger.SetOutput(lw) n1.Logger.SetShowTrace(true) b1, err := NewExternalBackend() if err != nil { t.Fatal(err) } err = n1.AddBackend(b1) if err != nil { t.Fatal(err) } n2 := New(context.Background(), "node2") n2.Logger.SetOutput(lw) n2.Logger.SetShowTrace(true) b2, err := NewExternalBackend() if err != nil { t.Fatal(err) } err = n2.AddBackend(b2) if err != nil { t.Fatal(err) } // Add a firewall to node 1 that rejects messages whose data is "bad" err = n1.AddFirewallRules([]FirewallRuleFunc{ func(md *MessageData) FirewallResult { if string(md.Data) == "bad" { return FirewallResultReject } return FirewallResultAccept }, }, true) if err != nil { t.Fatal(err) } // Create a Unix socket pair and use it to connect the backends c1, c2, err := socketpair.New("unix") if err != nil { t.Fatal(err) } // Subscribe for node list updates nCh1 := n1.SubscribeRoutingUpdates() nCh2 := n2.SubscribeRoutingUpdates() // Connect the two nodes b1.NewConnection(MessageConnFromNetConn(c1), true) b2.NewConnection(MessageConnFromNetConn(c2), true) // Wait for the nodes to establish routing to each other var routes1 map[string]string var routes2 map[string]string timeout, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() for { select { case <-timeout.Done(): t.Fatal("timed out waiting for nodes to connect") case routes1 = <-nCh1: case routes2 = <-nCh2: } if routes1 != nil && routes2 != nil { _, ok := routes1["node2"] if ok { _, ok := routes2["node1"] if ok { break } } } } // Set up packet connection pc1, err := n1.ListenPacket("testsvc") if err != nil { t.Fatal(err) } pc2, err := n2.ListenPacket("") if err != nil { t.Fatal(err) } // Subscribe for unreachable messages doneChan := make(chan struct{}) unreach2chan := pc2.SubscribeUnreachable(doneChan) // Save received unreachable messages to a variable var lastUnreachMsg *UnreachableNotification lastUnreachLock := sync.RWMutex{} go func() { <-timeout.Done() close(doneChan) }() go func() { for unreach := range unreach2chan { unreach := unreach lastUnreachLock.Lock() lastUnreachMsg = &unreach lastUnreachLock.Unlock() } }() // Send a good message lastUnreachMsg = nil _, err = pc2.WriteTo([]byte("good"), n2.NewAddr("node1", "testsvc")) if err != nil { t.Fatal(err) } err = pc1.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) if err != nil { t.Fatal(err) } buf := make([]byte, 256) n, _, err := pc1.ReadFrom(buf) if err != nil { t.Fatal(err) } if string(buf[:n]) != "good" { t.Fatal("incorrect message received") } time.Sleep(100 * time.Millisecond) if lastUnreachMsg != nil { t.Fatalf("unexpected unreachable message received: %v", lastUnreachMsg) } // Send a bad message _, err = pc2.WriteTo([]byte("bad"), n2.NewAddr("node1", "testsvc")) if err != nil { t.Fatal(err) } err = pc1.SetReadDeadline(time.Now().Add(100 * time.Millisecond)) if err != nil { t.Fatal(err) } _, _, err = pc1.ReadFrom(buf) if err != ErrTimeout { if err == nil { err = fmt.Errorf("received message that should have been firewalled") } t.Fatal(err) } time.Sleep(100 * time.Millisecond) lastUnreachLock.RLock() lum := lastUnreachMsg //nolint:ifshort lastUnreachLock.RUnlock() if lum == nil { t.Fatal("did not receive expected unreachable message") } // Shut down the network n1.Shutdown() n2.Shutdown() n1.BackendWait() n2.BackendWait() } func TestSetMaxConnectionIdleTime(t *testing.T) { t.Parallel() node := New(context.Background(), "node1") defer node.Shutdown() err := node.SetMaxConnectionIdleTime("60s") if err != nil { t.Fatal(err) } time, _ := time.ParseDuration("60s") if node.MaxConnectionIdleTime() != time { t.Fatal("setter behaved incorrectly") } } func TestSetBadMaxConnectionIdleTime(t *testing.T) { t.Parallel() node := New(context.Background(), "node1") defer node.Shutdown() err := node.SetMaxConnectionIdleTime("60d") if err == nil { t.Fatal("this should have failed out, as we're passing in an invalid date-string to SetMaxConnectionIdleTime") } } func TestTooShortSetMaxConnectionIdleTime(t *testing.T) { t.Parallel() node := New(context.Background(), "node1") defer node.Shutdown() err := node.SetMaxConnectionIdleTime("60us") if err == nil { t.Fatal("this should have failed out, as we're passing in an invalid time object that violates the logic in SetMaxConnectionIdleTime") } } func TestTracerReturnsNewConnectionTracer(t *testing.T) { t.Parallel() s := New(context.Background(), "node1") p := logging.PerspectiveClient os.Setenv("QLOGDIR", "/tmp/") trace := s.tracer(s.context, p, quic.ConnectionID{}) if trace == nil { t.Fatalf("tracer should return a newConnectionTracer when QLOGDIR environment variable is defined but got %v", trace) } os.Unsetenv("QLOGDIR") } func TestTracerDoesNotReturnsNewConnectionTracer(t *testing.T) { t.Parallel() s := New(context.Background(), "node1") p := logging.PerspectiveClient os.Unsetenv("QLOGDIR") trace := s.tracer(s.context, p, quic.ConnectionID{}) if trace != nil { t.Fatalf("tracer should return nil when QLOGDIR environment variable is not defined but got %v", trace) } } receptor-1.5.3/pkg/netceptor/packetconn.go000066400000000000000000000207301475465740700206300ustar00rootroot00000000000000package netceptor import ( "context" "fmt" "net" "reflect" "sync" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/utils" ) type PacketConner interface { SetHopsToLive(hopsToLive byte) GetHopsToLive() byte SubscribeUnreachable(doneChan chan struct{}) chan UnreachableNotification ReadFrom(p []byte) (int, net.Addr, error) WriteTo(p []byte, addr net.Addr) (n int, err error) LocalAddr() net.Addr Close() error SetDeadline(t time.Time) error SetReadDeadline(t time.Time) error GetReadDeadline() time.Time SetWriteDeadline(t time.Time) error Cancel() *context.CancelFunc LocalService() string GetLogger() *logger.ReceptorLogger StartUnreachable() } type NetcForPacketConn interface { GetEphemeralService() string AddNameHash(name string) uint64 AddLocalServiceAdvertisement(service string, connType byte, tags map[string]string) SendMessageWithHopsToLive(fromService string, toNode string, toService string, data []byte, hopsToLive byte) error RemoveLocalServiceAdvertisement(service string) error GetLogger() *logger.ReceptorLogger NodeID() string GetNetworkName() string GetListenerLock() *sync.RWMutex GetListenerRegistry() map[string]*PacketConn GetUnreachableBroker() *utils.Broker MaxForwardingHops() byte Context() context.Context } // PacketConn implements the net.PacketConn interface via the Receptor network. type PacketConn struct { s NetcForPacketConn localService string recvChan chan *MessageData readDeadlineMutex sync.Mutex readDeadline time.Time advertise bool adTags map[string]string connType byte hopsToLive byte unreachableSubs *utils.Broker context context.Context cancel context.CancelFunc } func NewPacketConnWithConst(s NetcForPacketConn, service string, advertise bool, adtags map[string]string, connTypeDatagram byte) *PacketConn { npc := &PacketConn{ s: s, localService: service, recvChan: make(chan *MessageData), advertise: advertise, adTags: adtags, connType: connTypeDatagram, hopsToLive: s.MaxForwardingHops(), } npc.StartUnreachable() s.GetListenerRegistry()[service] = npc return npc } func NewPacketConn(s NetcForPacketConn, service string, connTypeDatagram byte) *PacketConn { return NewPacketConnWithConst(s, service, false, nil, connTypeDatagram) } // ListenPacket returns a datagram connection compatible with Go's net.PacketConn. // If service is blank, generates and uses an ephemeral service name. func (s *Netceptor) ListenPacket(service string) (PacketConner, error) { if len(service) > 8 { return nil, fmt.Errorf("service name %s too long", service) } if service == "" { service = s.GetEphemeralService() } s.listenerLock.Lock() defer s.listenerLock.Unlock() _, isReserved := s.reservedServices[service] _, isListening := s.listenerRegistry[service] if isReserved || isListening { return nil, fmt.Errorf("service %s is already listening", service) } _ = s.AddNameHash(service) pc := NewPacketConn(s, service, ConnTypeDatagram) return pc, nil } // ListenPacketAndAdvertise returns a datagram listener, and also broadcasts service // advertisements to the Receptor network as long as the listener remains open. func (s *Netceptor) ListenPacketAndAdvertise(service string, tags map[string]string) (PacketConner, error) { if len(service) > 8 { return nil, fmt.Errorf("service name %s too long", service) } if service == "" { service = s.GetEphemeralService() } s.listenerLock.Lock() defer s.listenerLock.Unlock() _, isReserved := s.reservedServices[service] _, isListening := s.listenerRegistry[service] if isReserved || isListening { return nil, fmt.Errorf("service %s is already listening and advertising", service) } pc := NewPacketConnWithConst(s, service, true, tags, ConnTypeDatagram) s.AddLocalServiceAdvertisement(service, ConnTypeDatagram, tags) return pc, nil } func (pc *PacketConn) Cancel() *context.CancelFunc { return &pc.cancel } func (pc *PacketConn) GetLogger() *logger.ReceptorLogger { return pc.s.GetLogger() } // startUnreachable starts monitoring the netceptor unreachable channel and forwarding relevant messages. func (pc *PacketConn) StartUnreachable() { pc.context, pc.cancel = context.WithCancel(pc.s.Context()) pc.unreachableSubs = utils.NewBroker(pc.context, reflect.TypeOf(UnreachableNotification{})) iChan := pc.s.GetUnreachableBroker().Subscribe() go func() { <-pc.context.Done() pc.s.GetUnreachableBroker().Unsubscribe(iChan) }() go func() { for msgIf := range iChan { msg, ok := msgIf.(UnreachableNotification) if !ok { continue } FromNode := msg.FromNode FromService := msg.FromService if FromNode == pc.s.NodeID() && FromService == pc.localService { _ = pc.unreachableSubs.Publish(msg) } } }() } // SubscribeUnreachable subscribes for unreachable messages relevant to this PacketConn. func (pc *PacketConn) SubscribeUnreachable(doneChan chan struct{}) chan UnreachableNotification { iChan := pc.unreachableSubs.Subscribe() if iChan == nil { return nil } uChan := make(chan UnreachableNotification) // goroutine 1 // if doneChan is selected, this will unsubscribe the channel, which should // eventually close out the go routine 2 go func() { select { case <-doneChan: pc.unreachableSubs.Unsubscribe(iChan) case <-pc.context.Done(): } }() // goroutine 2 // this will exit when either the broker closes iChan, or the broker // returns via pc.context.Done() go func() { for { msgIf, ok := <-iChan if !ok { close(uChan) return } msg, ok := msgIf.(UnreachableNotification) if !ok { continue } uChan <- msg } }() return uChan } // ReadFrom reads a packet from the network and returns its data and address. func (pc *PacketConn) ReadFrom(p []byte) (n int, addr net.Addr, err error) { var m *MessageData if pc.GetReadDeadline().IsZero() { select { case m = <-pc.recvChan: case <-pc.context.Done(): return 0, nil, fmt.Errorf("connection context closed") } } else { select { case m = <-pc.recvChan: case <-time.After(time.Until(pc.GetReadDeadline())): return 0, nil, ErrTimeout } } if m == nil { return 0, nil, fmt.Errorf("connection closed") } nCopied := copy(p, m.Data) fromAddr := Addr{ network: pc.s.GetNetworkName(), node: m.FromNode, service: m.FromService, } return nCopied, fromAddr, nil } // WriteTo writes a packet to an address on the network. func (pc *PacketConn) WriteTo(p []byte, addr net.Addr) (n int, err error) { ncaddr, ok := addr.(Addr) if !ok { return 0, fmt.Errorf("attempt to write to non-netceptor address") } err = pc.s.SendMessageWithHopsToLive(pc.localService, ncaddr.node, ncaddr.service, p, pc.hopsToLive) if err != nil { return 0, err } return len(p), nil } // SetHopsToLive sets the HopsToLive value for future outgoing packets on this connection. func (pc *PacketConn) SetHopsToLive(hopsToLive byte) { pc.hopsToLive = hopsToLive } func (pc *PacketConn) GetHopsToLive() byte { return pc.hopsToLive } // LocalService returns the local service name of the connection. func (pc *PacketConn) LocalService() string { return pc.localService } // LocalAddr returns the local address the connection is bound to. func (pc *PacketConn) LocalAddr() net.Addr { return Addr{ network: pc.s.GetNetworkName(), node: pc.s.NodeID(), service: pc.localService, } } // Close closes the connection. func (pc *PacketConn) Close() error { pc.s.GetListenerLock().Lock() defer pc.s.GetListenerLock().Unlock() delete(pc.s.GetListenerRegistry(), pc.localService) if pc.cancel != nil { pc.cancel() } if pc.advertise { err := pc.s.RemoveLocalServiceAdvertisement(pc.localService) if err != nil { return err } } return nil } // SetDeadline sets both the read and write deadlines. func (pc *PacketConn) SetDeadline(t time.Time) error { pc.readDeadlineMutex.Lock() pc.readDeadline = t pc.readDeadlineMutex.Unlock() return nil } // SetReadDeadline sets the read deadline. func (pc *PacketConn) SetReadDeadline(t time.Time) error { pc.readDeadlineMutex.Lock() pc.readDeadline = t pc.readDeadlineMutex.Unlock() return nil } func (pc *PacketConn) GetReadDeadline() time.Time { pc.readDeadlineMutex.Lock() readDeadline := pc.readDeadline pc.readDeadlineMutex.Unlock() return readDeadline } // SetWriteDeadline sets the write deadline. func (pc *PacketConn) SetWriteDeadline(_ time.Time) error { // Write deadline doesn't mean anything because Write() implementation is non-blocking. return nil } receptor-1.5.3/pkg/netceptor/packetconn_test.go000066400000000000000000000247751475465740700217040ustar00rootroot00000000000000package netceptor_test import ( "context" "errors" "reflect" "sync" "testing" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/netceptor/mock_netceptor" "github.com/ansible/receptor/pkg/utils" "go.uber.org/mock/gomock" ) const ( expectNoErrorReturnString = "Expected no error, but got: %v" closeErrorString = "Close Error" ) // checkPacketConn checks for TestNewPacketConn and TestListenPacket tests. func checkPacketConn(t *testing.T, expectedErr string, failedTestString string, err error) { if expectedErr == "" && err != nil { t.Errorf(failedTestString, err) } if expectedErr != "" && err != nil && err.Error() != expectedErr { t.Errorf(failedTestString, err) } if expectedErr != "" && err == nil { t.Errorf(failedTestString, err) } } // TestNewPacketConn tests the NewPacketConn method. func TestNewPacketConn(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() ctx := context.Background() mockNetceptorForPacketConn := mock_netceptor.NewMockNetcForPacketConn(ctrl) mockNetceptorForPacketConn.EXPECT().MaxForwardingHops().Return(byte(1)) mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))) mockNetceptorForPacketConn.EXPECT().GetListenerRegistry().Return(map[string]*netceptor.PacketConn{}) t.Run("NewPacketConn Success", func(t *testing.T) { pc := netceptor.NewPacketConn(mockNetceptorForPacketConn, "test", 0) if pc == nil { t.Error("Expected new PacketConn, got nil") } }) } // TestListenPacket tests the ListenPacket method. func TestListenPacket(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() listenPacketTestCases := []struct { name string service string expectedErr string failedTestString string }{ {"Success", "test", "", expectNoErrorReturnString}, {"Service Too Long Error", "123456789", "service name 123456789 too long", "Expected service name too long error, but got %v"}, {"Service Already Listening Error", "ping", "service ping is already listening", "Expected service ping is already listening, but got: %v"}, } for _, testCase := range listenPacketTestCases { ctx := context.Background() netc := netceptor.New(ctx, "node") t.Run(testCase.name, func(t *testing.T) { _, err := netc.ListenPacket(testCase.service) checkPacketConn(t, testCase.expectedErr, testCase.failedTestString, err) }) } } // TestListenPacketAndAdvertise test the ListenPacketAndAdvertise method. func TestListenPacketAndAdvertise(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() listenPacketTestCases := []struct { name string service string tags map[string]string expectedErr string failedTestString string }{ {"Success", "test", map[string]string{}, "", expectNoErrorReturnString}, {"Success empty service", "", map[string]string{}, "", expectNoErrorReturnString}, {"Service Too Long Error", "123456789", map[string]string{}, "service name 123456789 too long", "Expected service name too long error, but got %v"}, {"Service Already Listening Error", "ping", map[string]string{}, "service ping is already listening and advertising", "Expected service ping is already listening and advertising, but got: %v"}, } for _, testCase := range listenPacketTestCases { ctx := context.Background() netc := netceptor.New(ctx, "node") t.Run(testCase.name, func(t *testing.T) { _, err := netc.ListenPacketAndAdvertise(testCase.service, testCase.tags) checkPacketConn(t, testCase.expectedErr, testCase.failedTestString, err) }) } } // TestPacketConn tests both NewPacketConnWithConst and NewPacketConn methods. func TestPacketConn(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockNetceptorForPacketConn := mock_netceptor.NewMockNetcForPacketConn(ctrl) packetConnTestCases := []struct { name string service string expectedCall func(ctx context.Context) funcCall func(pc netceptor.PacketConner) interface{} expectedReturnVal interface{} unexpectedReturnValMsg string }{ { "GetLocalService Success", "test", func(ctx context.Context) { mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))) }, func(pc netceptor.PacketConner) interface{} { return pc.LocalService() }, "test", "Expected GetLocalService to be test, but got %v", }, { "GetLogger Success", "test", func(ctx context.Context) { mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().GetLogger().Return(logger.NewReceptorLogger("test")) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))).Times(4) }, func(pc netceptor.PacketConner) interface{} { return pc.GetLogger().Logger.Prefix() }, "test", "Expected Logger prefix to be test, but got %v", }, { "ReadFrom Error", "", func(ctx context.Context) { newCtx, ctxCancel := context.WithCancel(context.Background()) time.AfterFunc(time.Microsecond*200, ctxCancel) mockNetceptorForPacketConn.EXPECT().Context().Return(newCtx) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(newCtx, reflect.TypeOf(netceptor.UnreachableNotification{}))) }, func(pc netceptor.PacketConner) interface{} { _, _, err := pc.ReadFrom([]byte{}) return err.Error() }, "connection context closed", "Expected ReadFrom error to be connection context closed, but got %v", }, { "SetHopsToLive Success", "test", func(ctx context.Context) { mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))) }, func(pc netceptor.PacketConner) interface{} { pc.SetHopsToLive(byte(2)) return pc.GetHopsToLive() }, byte(2), "Expected hopsToLive to be 2, but got %v", }, { "LocalAddr Success", "test", func(ctx context.Context) { mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))) mockNetceptorForPacketConn.EXPECT().GetNetworkName().Return("test") mockNetceptorForPacketConn.EXPECT().NodeID().Return("test") }, func(pc netceptor.PacketConner) interface{} { return pc.LocalAddr().Network() }, "test", "Expected LocalAddr Network to be test, but got %v", }, { "Close Success", "test", func(ctx context.Context) { mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))) mockNetceptorForPacketConn.EXPECT().GetListenerLock().Return(&sync.RWMutex{}).Times(2) mockNetceptorForPacketConn.EXPECT().GetListenerRegistry().Return(map[string]*netceptor.PacketConn{}) }, func(pc netceptor.PacketConner) interface{} { return pc.Close() }, nil, expectNoErrorReturnString, }, { closeErrorString, "test", func(ctx context.Context) { mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().RemoveLocalServiceAdvertisement("test").Return(errors.New(closeErrorString)) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))) mockNetceptorForPacketConn.EXPECT().GetListenerLock().Return(&sync.RWMutex{}).Times(2) mockNetceptorForPacketConn.EXPECT().GetListenerRegistry().Return(map[string]*netceptor.PacketConn{}) }, func(pc netceptor.PacketConner) interface{} { return pc.Close().Error() }, closeErrorString, "Expected error to be Close Error, but got %v", }, { "SetDeadline Success", "test", func(ctx context.Context) { mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))) }, func(pc netceptor.PacketConner) interface{} { return pc.SetDeadline(time.Now().Add(time.Millisecond * 100)) }, nil, expectNoErrorReturnString, }, { "SetReadDeadline Success", "test", func(ctx context.Context) { mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))) }, func(pc netceptor.PacketConner) interface{} { return pc.SetReadDeadline(time.Now().Add(time.Millisecond * 100)) }, nil, expectNoErrorReturnString, }, { "SetWriteDeadline Success", "test", func(ctx context.Context) { mockNetceptorForPacketConn.EXPECT().Context().Return(context.Background()) mockNetceptorForPacketConn.EXPECT().GetUnreachableBroker().Return(utils.NewBroker(ctx, reflect.TypeOf(netceptor.UnreachableNotification{}))) }, func(pc netceptor.PacketConner) interface{} { return pc.SetWriteDeadline(time.Now().Add(time.Millisecond * 100)) }, nil, expectNoErrorReturnString, }, } for _, testCase := range packetConnTestCases { t.Run(testCase.name, func(t *testing.T) { ctx := context.Background() mockNetceptorForPacketConn.EXPECT().MaxForwardingHops().Return(byte(1)) mockNetceptorForPacketConn.EXPECT().GetListenerRegistry().Return(map[string]*netceptor.PacketConn{}) testCase.expectedCall(ctx) var returnVal interface{} var pc *netceptor.PacketConn if testCase.name != closeErrorString { pc = netceptor.NewPacketConn(mockNetceptorForPacketConn, testCase.service, 0) } else { pc = netceptor.NewPacketConnWithConst(mockNetceptorForPacketConn, testCase.service, true, map[string]string{}, byte(0)) } returnVal = testCase.funcCall(pc) if returnVal != testCase.expectedReturnVal { t.Errorf(testCase.unexpectedReturnValMsg, returnVal) } ctx.Done() ctrl.Finish() }) } } receptor-1.5.3/pkg/netceptor/ping.go000066400000000000000000000071641475465740700174460ustar00rootroot00000000000000package netceptor import ( "context" "fmt" "strings" "time" ) // NetcForPing should include all methods of Netceptor needed by the Ping function. type NetcForPing interface { ListenPacket(service string) (PacketConner, error) NewAddr(target string, service string) Addr NodeID() string Context() context.Context } // Ping calls SendPing to sends a single test packet and waits for a reply or error. func (s *Netceptor) Ping(ctx context.Context, target string, hopsToLive byte) (time.Duration, string, error) { return SendPing(ctx, s, target, hopsToLive) } // SendPing creates Ping by sending a single test packet and waits for a replay or error. func SendPing(ctx context.Context, s NetcForPing, target string, hopsToLive byte) (time.Duration, string, error) { pc, err := s.ListenPacket("") if err != nil { return 0, "", err } ctxPing, ctxCancel := context.WithCancel(ctx) defer func() { ctxCancel() _ = pc.Close() }() pc.SetHopsToLive(hopsToLive) doneChan := make(chan struct{}) unrCh := pc.SubscribeUnreachable(doneChan) defer close(doneChan) type errorResult struct { err error fromNode string } errorChan := make(chan errorResult) go func() { for msg := range unrCh { errorChan <- errorResult{ err: fmt.Errorf(msg.Problem), //nolint:govet fromNode: msg.ReceivedFromNode, } } }() startTime := time.Now() replyChan := make(chan string) go func() { buf := make([]byte, 8) _, addr, err := pc.ReadFrom(buf) fromNode := "" if addr != nil { fromNode = addr.String() fromNode = strings.TrimSuffix(fromNode, ":ping") } if err == nil { select { case replyChan <- fromNode: case <-ctxPing.Done(): case <-s.Context().Done(): } } else { select { case errorChan <- errorResult{ err: err, fromNode: fromNode, }: case <-ctx.Done(): case <-s.Context().Done(): } } }() _, err = pc.WriteTo([]byte{}, s.NewAddr(target, "ping")) if err != nil { return time.Since(startTime), s.NodeID(), err } select { case errRes := <-errorChan: return time.Since(startTime), errRes.fromNode, errRes.err case remote := <-replyChan: return time.Since(startTime), remote, nil case <-time.After(10 * time.Second): return time.Since(startTime), "", fmt.Errorf("timeout") case <-ctxPing.Done(): return time.Since(startTime), "", fmt.Errorf("user cancelled") case <-s.Context().Done(): return time.Since(startTime), "", fmt.Errorf("netceptor shutdown") } } type NetcForTraceroute interface { MaxForwardingHops() byte Ping(ctx context.Context, target string, hopsToLive byte) (time.Duration, string, error) Context() context.Context } // TracerouteResult is the result of one hop of a traceroute. type TracerouteResult struct { From string Time time.Duration Err error } func (s *Netceptor) Traceroute(ctx context.Context, target string) <-chan *TracerouteResult { return CreateTraceroute(ctx, s, target) } // CreateTraceroute returns a channel which will receive a series of hops between this node and the target. func CreateTraceroute(ctx context.Context, s NetcForTraceroute, target string) <-chan *TracerouteResult { results := make(chan *TracerouteResult) go func() { defer close(results) for i := 0; i <= int(s.MaxForwardingHops()); i++ { pingTime, pingRemote, err := s.Ping(ctx, target, byte(i)) res := &TracerouteResult{ From: pingRemote, Time: pingTime, } if err != nil && err.Error() != ProblemExpiredInTransit { res.Err = err } select { case results <- res: case <-ctx.Done(): return case <-s.Context().Done(): return } if res.Err != nil || err == nil { return } } }() return results } receptor-1.5.3/pkg/netceptor/tlsconfig.go000066400000000000000000000203261475465740700204740ustar00rootroot00000000000000package netceptor import ( "crypto/tls" "crypto/x509" "encoding/hex" "encoding/pem" "fmt" "os" "strings" "github.com/ansible/receptor/pkg/utils" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) // ************************************************************************** // Command line // ************************************************************************** var configSection = &cmdline.ConfigSection{ Description: "Commands that configure resources used by other commands:", Order: 5, } func decodeFingerprints(fingerprints []string) ([][]byte, error) { fingerprintBytes := make([][]byte, 0, len(fingerprints)) for _, fingStr := range fingerprints { fingBytes, err := hex.DecodeString(strings.ReplaceAll(fingStr, ":", "")) if err != nil { return nil, fmt.Errorf("error decoding fingerprint") } if len(fingBytes) != 32 && len(fingBytes) != 64 { return nil, fmt.Errorf("fingerprints must be 32 or 64 bytes for sha256 or sha512") } fingerprintBytes = append(fingerprintBytes, fingBytes) } return fingerprintBytes, nil } func checkCertificatesMatchNodeID(certbytes []byte, n *Netceptor, certName string, certPath string) error { block, _ := pem.Decode(certbytes) if block == nil { return fmt.Errorf("failed to parse certfifcate PEM") } parsedCert, err := x509.ParseCertificate(block.Bytes) if err != nil { return err } found, receptorNames, err := utils.ParseReceptorNamesFromCert(parsedCert, n.nodeID, n.Logger) if err != nil { return err } if !found { return fmt.Errorf("nodeID=%s not found in certificate name(s); names found=%s; cfg section=%s; server cert=%s", n.nodeID, fmt.Sprint(receptorNames), certName, certPath) } return nil } func baseTLS(minTLS13 bool) *tls.Config { var tlscfg *tls.Config if minTLS13 { tlscfg = &tls.Config{ PreferServerCipherSuites: true, MinVersion: tls.VersionTLS13, } } else { tlscfg = &tls.Config{ MinVersion: tls.VersionTLS12, CurvePreferences: []tls.CurveID{tls.CurveP521, tls.CurveP384, tls.CurveP256}, PreferServerCipherSuites: true, CipherSuites: []uint16{ tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_RSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, }, } } return tlscfg } // TLSServerConfig stores the configuration options for a TLS server. type TLSServerConfig struct { Name string `required:"true" description:"Name of this TLS server configuration"` Cert string `required:"true" description:"Server certificate filename"` Key string `required:"true" description:"Server private key filename"` RequireClientCert bool `required:"false" description:"Require client certificates" default:"false"` ClientCAs string `required:"false" description:"Filename of CA bundle to verify client certs with"` PinnedClientCert []string `required:"false" description:"Pinned fingerprint of required client certificate"` SkipReceptorNamesCheck bool `required:"false" description:"Skip verifying ReceptorNames OIDs in certificate at startup" default:"false"` MinTLS13 bool `required:"false" description:"Set minimum TLS version to 1.3. Otherwise the minimum is 1.2" default:"false"` } func (cfg TLSServerConfig) PrepareTLSServerConfig(n *Netceptor) (*tls.Config, error) { tlscfg := baseTLS(cfg.MinTLS13) certBytes, err := os.ReadFile(cfg.Cert) if err != nil { return nil, err } keybytes, err := os.ReadFile(cfg.Key) if err != nil { return nil, err } cert, err := tls.X509KeyPair(certBytes, keybytes) if err != nil { return nil, err } // check server crt to ensure that the receptor NodeID is in the client certificate as an OID if !cfg.SkipReceptorNamesCheck { if err := checkCertificatesMatchNodeID(certBytes, n, cfg.Name, cfg.Cert); err != nil { return nil, err } } tlscfg.Certificates = []tls.Certificate{cert} if cfg.ClientCAs != "" { caBytes, err := os.ReadFile(cfg.ClientCAs) if err != nil { return nil, fmt.Errorf("error reading client CAs file: %s", err) } clientCAs := x509.NewCertPool() clientCAs.AppendCertsFromPEM(caBytes) tlscfg.ClientCAs = clientCAs } switch { case cfg.RequireClientCert: tlscfg.ClientAuth = tls.RequireAndVerifyClientCert case cfg.ClientCAs != "": tlscfg.ClientAuth = tls.VerifyClientCertIfGiven default: tlscfg.ClientAuth = tls.NoClientCert } var pinnedFingerprints [][]byte pinnedFingerprints, err = decodeFingerprints(cfg.PinnedClientCert) if err != nil { return nil, fmt.Errorf("error decoding fingerprints: %s", err) } if tlscfg.ClientAuth != tls.NoClientCert { tlscfg.VerifyPeerCertificate = ReceptorVerifyFunc(tlscfg, pinnedFingerprints, "", ExpectedHostnameTypeDNS, VerifyClient, n.Logger) } return tlscfg, nil } // Prepare creates the tls.config and stores it in the global map. func (cfg TLSServerConfig) Prepare() error { tlscfg, err := cfg.PrepareTLSServerConfig(MainInstance) if err != nil { return fmt.Errorf("error preparing tls server config: %s", err) } return MainInstance.SetServerTLSConfig(cfg.Name, tlscfg) } // TLSClientConfig stores the configuration options for a TLS client. type TLSClientConfig struct { Name string `required:"true" description:"Name of this TLS client configuration"` Cert string `required:"true" description:"Client certificate filename"` Key string `required:"true" description:"Client private key filename"` RootCAs string `required:"false" description:"Root CA bundle to use instead of system trust"` InsecureSkipVerify bool `required:"false" description:"Accept any server cert" default:"false"` PinnedServerCert []string `required:"false" description:"Pinned fingerprint of required server certificate"` SkipReceptorNamesCheck bool `required:"false" description:"if true, skip verifying ReceptorNames OIDs in certificate at startup"` MinTLS13 bool `required:"false" description:"Set minimum TLS version to 1.3. Otherwise the minimum is 1.2" default:"false"` } func (cfg TLSClientConfig) PrepareTLSClientConfig(n *Netceptor) (tlscfg *tls.Config, pinnedFingerprints [][]byte, err error) { tlscfg = baseTLS(cfg.MinTLS13) if cfg.Cert != "" || cfg.Key != "" { if cfg.Cert == "" || cfg.Key == "" { return nil, nil, fmt.Errorf("cert and key must both be supplied or neither") } certBytes, err := os.ReadFile(cfg.Cert) if err != nil { return nil, nil, err } keyBytes, err := os.ReadFile(cfg.Key) if err != nil { return nil, nil, err } cert, err := tls.X509KeyPair(certBytes, keyBytes) if err != nil { return nil, nil, err } // check client crt to ensure that the receptor NodeID is in the client certificate as an OID if !cfg.SkipReceptorNamesCheck { if err := checkCertificatesMatchNodeID(certBytes, n, cfg.Name, cfg.Cert); err != nil { return nil, nil, err } } tlscfg.Certificates = []tls.Certificate{cert} } if cfg.RootCAs != "" { caBytes, err := os.ReadFile(cfg.RootCAs) if err != nil { return nil, nil, fmt.Errorf("error reading root CAs file: %s", err) } rootCAs := x509.NewCertPool() rootCAs.AppendCertsFromPEM(caBytes) tlscfg.RootCAs = rootCAs } pinnedFingerprints, err = decodeFingerprints(cfg.PinnedServerCert) if err != nil { return nil, nil, fmt.Errorf("error decoding fingerprints: %s", err) } tlscfg.InsecureSkipVerify = cfg.InsecureSkipVerify return tlscfg, pinnedFingerprints, nil } // Prepare creates the tls.config and stores it in the global map. func (cfg TLSClientConfig) Prepare() error { tlscfg, pinnedFingerprints, err := cfg.PrepareTLSClientConfig(MainInstance) if err != nil { return fmt.Errorf("error preparing tls client config: %s", err) } return MainInstance.SetClientTLSConfig(cfg.Name, tlscfg, pinnedFingerprints) } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-tls", "tls-server", "Define a TLS server configuration", TLSServerConfig{}, cmdline.Section(configSection)) cmdline.RegisterConfigTypeForApp("receptor-tls", "tls-client", "Define a TLS client configuration", TLSClientConfig{}, cmdline.Section(configSection)) } receptor-1.5.3/pkg/netceptor/tlsconfig_test.go000066400000000000000000000303601475465740700215320ustar00rootroot00000000000000package netceptor import ( "context" "os" "testing" "github.com/ansible/receptor/tests/utils" ) // setup handle using hardcoded PEMs. func setupSuite(t *testing.T) (*os.File, *os.File, *os.File, func(t *testing.T)) { tempCertFile, err := os.CreateTemp("", "") if err != nil { t.Error(err.Error()) } tempCertFile.Write([]byte(`-----BEGIN CERTIFICATE----- MIIFRDCCAyygAwIBAgIEYiZSQDANBgkqhkiG9w0BAQsFADA7MTkwNwYDVQQDEzBB bnNpYmxlIEF1dG9tYXRpb24gQ29udHJvbGxlciBOb2RlcyBNZXNoIFJPT1QgQ0Ew HhcNMjIwMzA3MTg0MzEyWhcNMzExMjI4MDUwMzUxWjARMQ8wDQYDVQQDEwZmb29i YXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnXsRTTIoV2Oqh5zvN JzQBYOZPpxmnKzwLvgeop44Csk++zARvg5XIpmPbSEU2PY3pNGvLTH6nD54/ZfOI RzSN0ipvfcrpJtkrJ7OYo1gX7ROXM30x3bj2KcJ/cMgMiZMQLqPegKhtMHLGz0TX +0MfJ5jqTlowVjSAyUhK6pMtf2ISpHqOA6uvmLhUhkruUrUkHMnbwWMTzrO8QDMa dLvV+hiWZNZFaf6Xt3lNBRY+yrXuSG7ZOc/6UsWDb4NVALL1mJ0IjfSeiK58Sf8V HUY4MEjy8VW2lfARU/mcNkxrUY1DBNp5zcHMhwoLkLId90PyFyzXMDCvZxHrGEwt Z23UAYY/wAvw1XWm5XJBiLzaL12dStuHeZgtAUOucQHvEOglvPilU6vKf5PFdxqo KEOwXtgLUTlw4otm2bWx5p2LPlxkPApbAv7UaxiTbcpuIMh8WTTSk/EUgpyEUjph iN0uqnp2fH9Mmyn8hgSB/Kf6FhIZFl3VMNN6x8VTkqLkzVG8Ud48gFHfraVQXvaL cDDCLxTeda6Th6uTw2zCifBzXbWxZKjlinx8MEM/kIA1we/wlwsYwpQNhkbOorR3 eJ6Exdl1Ar8l3jHp293hCvxUNuzG5Z9oPDMQ6MSm8xxrBN2pYZNL3DCWaJ0njuNj YeNR7l7s+9ibX5RD+uASiC6hOwIDAQABo3oweDAOBgNVHQ8BAf8EBAMCB4AwHQYD VR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMB8GA1UdIwQYMBaAFFAlC81rH211 fPJoWglERKb/7/NfMCYGA1UdEQQfMB2HBH8AAAGgFQYJKwYBBAGSCBMBoAgMBmZv b2JhcjANBgkqhkiG9w0BAQsFAAOCAgEAbzKRqx2i8S0Kuu0bIX094EoGiGSTWW4l YNHwn9mC/5KgzjSvxTkD0pInt31d5O27rK7/wMVezeqBIG92uwwZr7ndS6Fe0FT1 7tMZ1VH5VetIiicbu3AYssqMs/JYEocqOngLh/pGHmlwcnmPpCltipcE50bv9YWn O8Yc5O7v16SxHzGsDUDO5eQAe2qvBaE5F5SBCVkjSoajmh3fdx/4eSzoF2wrug3/ O+WAb70UXX6r8dmRpr4RezQ6XPWAG57BgU3g0NUkczFo5gFndBUJngLhR6wr14xB st21haZ65XIA46PB8jY04l/H2INwCzo++PlKJ3ROKwLXYDSZlgQ3X9XxsSzCX3Hs viK9Ybzp2W8sl1Pvtb/jodcNTpD2IB8IrWnvuOgnwVmewqAqlxM7Ers9kC83lBpt EhAXh0QyJ5BpHOkpm4jpVhOx1swHTBDoibysvpdr5KuuOm1JTr7cYRYhIe65rVz3 aL0PryzHdvQB97LhYAaUPtFnxNxUIeXKZO3Ndg/KSrSe4IqGz51uKjxJy+MnH9// nnG0JqlerSVvSPSiZ2kdn4OwzV2eA3Gj3uyTSGsjjoj82bhhRwKaSWmUh+AJByQ9 kE6r/6za1Hvm+i/mz8f1cTUxFjF5pKzrprNRz5NMzs6NkQ0pg+mq5CNzav1ATSyv Bdt96MbGrC0= -----END CERTIFICATE----- `)) tempCertKey, err := os.CreateTemp("", "") if err != nil { t.Error(err.Error()) } tempCertKey.Write([]byte(`-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAp17EU0yKFdjqoec7zSc0AWDmT6cZpys8C74HqKeOArJPvswE b4OVyKZj20hFNj2N6TRry0x+pw+eP2XziEc0jdIqb33K6SbZKyezmKNYF+0TlzN9 Md249inCf3DIDImTEC6j3oCobTByxs9E1/tDHyeY6k5aMFY0gMlISuqTLX9iEqR6 jgOrr5i4VIZK7lK1JBzJ28FjE86zvEAzGnS71foYlmTWRWn+l7d5TQUWPsq17khu 2TnP+lLFg2+DVQCy9ZidCI30noiufEn/FR1GODBI8vFVtpXwEVP5nDZMa1GNQwTa ec3BzIcKC5CyHfdD8hcs1zAwr2cR6xhMLWdt1AGGP8AL8NV1puVyQYi82i9dnUrb h3mYLQFDrnEB7xDoJbz4pVOryn+TxXcaqChDsF7YC1E5cOKLZtm1seadiz5cZDwK WwL+1GsYk23KbiDIfFk00pPxFIKchFI6YYjdLqp6dnx/TJsp/IYEgfyn+hYSGRZd 1TDTesfFU5Ki5M1RvFHePIBR362lUF72i3Awwi8U3nWuk4erk8Nswonwc121sWSo 5Yp8fDBDP5CANcHv8JcLGMKUDYZGzqK0d3iehMXZdQK/Jd4x6dvd4Qr8VDbsxuWf aDwzEOjEpvMcawTdqWGTS9wwlmidJ47jY2HjUe5e7PvYm1+UQ/rgEoguoTsCAwEA AQKCAgApCj3Nxyjc7pGqHY82YPSJmf8fbPQHX7ybjH9IRb22v456VICJ75Qc3WAC 9xexkypnEqmT8i/kOxospY0vz3X9iJlLOWc2AIaj5FpPhU4mn8V7/+4k+h9OjTLa GQeUu29KOoWIG7gw/f5G7bAN3di5nPYMDiZjT+AT7EdDx31LXL7pn1dF13ST3Djm 0P8yrSkpr713m1X2F2tPL9bYF+OvNmItDpDT+IerIBwoXKT1xLMTuMMllN2Anic8 cW2cvE0ll8R5woVHEnDmnSZlQQZk5MIegDrqSJ3TQeok+dOHRToEQv5ne6KXyk0W RObIHkeU50XhhjmJ6RYltZGIWKI/QohWBECINhjmBxqGKBz5ultIOmeLPd5IlC+Y ow+zQk8WuYaUIX2PAzhFnhRfxUsv2Zoljt2J4YC3oKsB9cynrhonozvwEJy9MJJF a48+meJ6Wkm6LtcREPgbjFtfhrPKQlD+/kfHR6mxhjR977lgZAvrGhlBTZPKx/MF r0ZOP34+Cw2ZDrHO1L7GQVEjY0JM2B6lCEYtI8Mxy04gqa+kRIjL+04WhjT1w2Lk 71tOBNNB2AqxK+aptqxLG2By4mlW7WliGZI0j/6caXkg02olL/WqeBWTKSoUXLd6 LD523A02VHQgBDhTdIjezKI1FpAVKCXdHuwgqSWPQiQx6FkdAQKCAQEA1YinOp0U 1/9nq5f9Oet5uOLLGNG5lpzvCY9tPk9gWjTlAes5aQ8Pftg+P6dGgAsVqGxT2NvS uNSqYIBdm7Uy7jUG9m6PjQeQ7+oQ1vJqbryqr4QDwnAtHdWFfXak17YZs9YuhesP l5h4Oxi43Q2tZalMUY/rAmn+URqI5jlSWYiH6D9p2j9mEzvFrPQLvsbDb6zbxlAv 8oaqOiOrQa+q3T+loeRX0ErN9qf84Vw7tc7Qp5a4siWyWIHKGHHVveB+ITcHJ2+7 KJf7saRAjcRyHxX3tsPyRVSfg37nIMoPHilnN8bbhgBs0eMq1zcQgEYVceWx4pcZ GonabS85TBsqwQKCAQEAyKfZoot+oOOfWXMVBD761o4msd3fxRJlyS9RsPzRx7VO rQNTw9fCmurcFnF444fCfnEVJ/bCh/rWETyt1wVQhuy+th16hq4NEwGOD87WBXCn b3K8ZNbFDB9WL30q7bLe9UBw4j1ciHGKqpkjEACBrrdBF3HxVjBCQiHUKci3KK7E j6rtmR97UJj3XtTU0XiFm2FNKRa+aw0OQ3rr5Bw9ZURd9aXoDCXUMoXgfFnUxLWd y8Mdh5/PWmf8/o7/WqWpwejRJqfcGR1576QJXZjbduXG5zviDjwe5VKjgH5XRe8x ytCa5Z6APGWA4hhuZYfERcCsirEPO4ruew+iE0c2+wKCAQAA7o28Rb83ihfLuegS /qITWnoEa7XhoGGyqvuREAudmSl+rqYbfUNWDF+JK5O1L1cy2vYqthrfT55GuYiv C0VjoLudC7J4rRXG1kCoj3pDbXNZPLw/dvnbbXkdqQzjHBpUnJSrZPE2eiXcLCly XYLqNKjumjAuXIQNmo4KYymm1l+xdcVifHBXmSUtsgrzFC76J8j1vpfW+Rt5EXrH 2JpoSMTSRgrUD9+COg1ydlKUYoiqko/PxzZWCIr3PFfwcjBauMDBPU2VycQBbHQT qk3NMO1Z0NUX1Fy12DHuBLO4L/oRVj7TAOF4sQMY2VarGKMzUgtKr9oeMYfQfipD 2MKBAoIBAQCyCFuNYP+FePDVyMoI7mhZHd8vSZFVpbEyBA4TXv4yl6eq0pzr0vAT y/Zi42NDXh0vWt5Oix6mz+RHfvMvKMP+MugzZYxlGuD20BZf6ED0qrOkqsSFJBnJ W7R4hjIknOQ97mM6GP+VAEjsfNsjQ4/MmUPjrXFX65GeY61/NVtteUNlxV7y0X/0 TwSM24HIKYtCBd8Uad2h1f+l19acmoHO7A4B+qYcwSO5gBdhvcKOliXfuMrmnuC3 cjSDGBVxNDOenReVmLIshn6+JWk55noy0ETevb8gqi8vgVcYlwCQSF6BeP02Zp+Y 9uaXtN2esAtxaDavB9JgHjDid0hymmkpAoIBABmtcLim8rEIo82NERAUvVHR7MxR hXKx9g3bm1O0w7kJ16jyf5uyJ85JNi1XF2/AomSNWH6ikHuX5Xj6vOdL4Ki9jPDq TOlmvys2LtCAMOM3e3NvzIfTnrQEurGusCQKxCbnlRk2W13j3uc2gVFgB3T1+w2H lSEhzuFpDrxKrsE9QcCf7/Cju+2ir9h3FsPDRKoxfRJ2/onsgQ/Q7NODRRQGjwxw P/Hli/j17jC7TdgC26JhtVHH7K5xC6iNL03Pf3GTSvwN1vK1BY2reoz1FtQrGZvM rydzkVNNVeMVX2TER9yc8AdFqkRlaBWHmO61rYmV+N1quLM0uMVsu55ZNCY= -----END RSA PRIVATE KEY----- `)) tempCA, err := os.CreateTemp("", "") if err != nil { t.Error(err.Error()) } tempCA.Write([]byte(`-----BEGIN CERTIFICATE----- MIIFVTCCAz2gAwIBAgIEYdeDaTANBgkqhkiG9w0BAQsFADA7MTkwNwYDVQQDEzBB bnNpYmxlIEF1dG9tYXRpb24gQ29udHJvbGxlciBOb2RlcyBNZXNoIFJPT1QgQ0Ew HhcNMjIwMTA3MDAwMzUxWhcNMzIwMTA3MDAwMzUxWjA7MTkwNwYDVQQDEzBBbnNp YmxlIEF1dG9tYXRpb24gQ29udHJvbGxlciBOb2RlcyBNZXNoIFJPT1QgQ0EwggIi MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCxAErOWvVDU8mfZgtE6BSygTWU MkPxxIEQSYs/UesRAHaB+QXa7/0Foa0VUJKcWwUE+2yYkNRrg8MmE8VWMSewcaNI As407stFXP+A2anPEglwemTskpO72sigiYDKShC5n5ciyPsHckwVlOCTtac5TwFe eTmGnHWRcd4uBGvaEXx98fw/wLgYtr9vmKTdnOQjriX9EaAWrjlrlzm54Bs3uVUj GSL7zY381EuUVV4AjbqQyThbY9cVfsK0nmzLUqpiHG2IhGZDZA9+jxtz2wJWFkNQ nWA3afCUjcWV+4FpP3p1U1myCeh2yR2uCHs9pkUK3ts9uD/Wd5j9M1oBMlymbN/C 5Fahd+cTXrPAjsoRqCso9TBP4mIlNl1Jq8MRUWTL5HOuwn+KnufBtuQ1hIb71Eso kj90eWeo/P+temYAEquUVWiej7lnHyZVW647lE+o+xJEOmW+tY5H4jgA/twP4s7U BgR545usWF9/utvnhsGSkg1EYcdzaM01pkrWrw1GvHT++HshsrG6Tse8gY7JrTds LDtU8LPhPUEnSfVBcgcMg2Dg8lEbaODtdp2xtCJwZHy9CiAx3CKcogVEfecrKSSr 2iSocft9/l8J+mhVG2CI6ekG6Cy9hDct/3SV01Dfd4FG7xXJIE3mTDLILpi1AVIW MHxYD3skuQMyLhJDAwIDAQABo2EwXzAOBgNVHQ8BAf8EBAMCAoQwHQYDVR0lBBYw FAYIKwYBBQUHAwIGCCsGAQUFBwMBMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE FFAlC81rH211fPJoWglERKb/7/NfMA0GCSqGSIb3DQEBCwUAA4ICAQCWCP/O6YQ9 jhae2/BeUeeKsnoxf90prg3o6/QHbelF6yL+MvGg5ZbPSlt+ywLDNR2CYvXk/4SD 5To7CPKhSPBwwUJpafvQfAOZijU30fvXvp5yZEFoOzOyvBP58NfzL5qH6Pf5A6i3 rHvtR1v7DgS7u2qWWcSimIM0UPoV3JubLTEORjOR6FIyNkIxdjhrP3SxyZ54xxde G3bchKaRcGVNoFYSDN4bAA22JAjlD8kXNYKzIS/0cOR/9SnHd1wMIQ2trx0+TfyG FAA1mW1mjzQd+h5SGBVeCz2W2XttNSIfQDndJCsyACxmIaOK99AQxdhZsWfHtGO1 3TjnyoiHjf8rozJbAVYqrIdB6GDf6fUlxwhUXT0qkgOvvAzjNnLoOBUkE4TWqXHl 38a+ITDNVzaUlrTd63eexS69V6kHe7mrqjywNQ9EXF9kaVeoNTzRf/ztT/DEVAl+ rKshMt4IOKQf1ScE+EJe1njpREHV+fa+kYvQB6cRuxW9a8sOSeQNaSL73Zv54elZ xffYhMv6yXvVxVnJHEsG3kM/CsvsU364BBd9kDcZbHpjNcDHMu+XxECJjD2atVtu FdaOLykGKfMCYVBP+xs97IJO8En/5N9QQwc+N4cfCg9/BWoZKHPbRx/V+57VEj0m 69EpJXbL15ZQLCPsaIcqJqpK23VyJKc8fA== -----END CERTIFICATE----- `)) return tempCertFile, tempCertKey, tempCA, func(t *testing.T) { defer os.Remove(tempCertFile.Name()) defer os.Remove(tempCertKey.Name()) defer os.Remove(tempCA.Name()) } } // another setup handle using Receptor-like cert-generation. func useUtilsSetupSuite(t *testing.T, name string) (string, string, string, func(t *testing.T)) { _, ca, err := utils.GenerateCA(name, name) if err != nil { t.Error(err.Error()) } certKey, cert, err := utils.GenerateCert(name, name, []string{name}, []string{name}) if err != nil { t.Error(err.Error()) } return ca, certKey, cert, func(t *testing.T) { defer os.Remove(ca) defer os.Remove(certKey) defer os.Remove(cert) } } func useUtilsSetupSuiteWithGenerateWithCA(t *testing.T, name string) (string, string, string, func(t *testing.T)) { caKey, caCert, err := utils.GenerateCA(name, name) if err != nil { t.Error(err.Error()) } certKey, cert, err := utils.GenerateCertWithCA(name, caKey, caCert, name, nil, []string{"foobar"}) if err != nil { t.Error(err.Error()) } return caCert, cert, certKey, func(t *testing.T) { defer os.Remove(caCert) defer os.Remove(caKey) defer os.Remove(certKey) defer os.Remove(cert) } } func TestPrepareGoodTLSServerConfig(t *testing.T) { tempCertFile, tempCertKey, tempCA, teardownSuite := setupSuite(t) defer teardownSuite(t) cfg := &TLSServerConfig{ Name: "foobar", Cert: tempCertFile.Name(), Key: tempCertKey.Name(), RequireClientCert: false, ClientCAs: tempCA.Name(), } MainInstance = New(context.Background(), "foobar") if err := cfg.Prepare(); err != nil { t.Errorf("nodeId=%s not found in certificate", MainInstance.nodeID) } } func TestNodeIDMismatch(t *testing.T) { tempCertFile, tempCertKey, tempCA, teardownSuite := setupSuite(t) defer teardownSuite(t) cfg := &TLSServerConfig{ Name: "foobar", Cert: tempCertFile.Name(), Key: tempCertKey.Name(), RequireClientCert: false, ClientCAs: tempCA.Name(), } MainInstance = New(context.Background(), "barfoo") if err := cfg.Prepare(); err == nil { t.Errorf("nodeId=%s; ReceptorName=foobar; this should have failed out", MainInstance.nodeID) } } func TestNodeIDWithUtilsGenerateCert(t *testing.T) { tempCa, tempCertKey, tempCert, tearDownSuite := useUtilsSetupSuite(t, "foobar") defer tearDownSuite(t) cfg := TLSServerConfig{ Name: "foobar", Cert: tempCert, Key: tempCertKey, RequireClientCert: false, ClientCAs: tempCa, } MainInstance = New(context.Background(), "foobar") if err := cfg.Prepare(); err != nil { t.Errorf("nodeId=%s; ReceptorName=foobar; this shouldn't have failed", MainInstance.nodeID) } } func TestBadNodeIDWithUtilsGenerateCert(t *testing.T) { tempCa, tempCertKey, tempCert, tearDownSuite := useUtilsSetupSuite(t, "foobar") defer tearDownSuite(t) cfg := TLSServerConfig{ Name: "foobar", Cert: tempCert, Key: tempCertKey, RequireClientCert: false, ClientCAs: tempCa, } MainInstance = New(context.Background(), "barfoo") if err := cfg.Prepare(); err == nil { t.Errorf("nodeId=%s; ReceptorName=foobar; this should have failed", MainInstance.nodeID) } } func TestNodeIDWithUtilsGenerateCertWithCA(t *testing.T) { caCert, tempCert, tempCertKey, tearDownSuite := useUtilsSetupSuiteWithGenerateWithCA(t, "foobar") defer tearDownSuite(t) cfg := &TLSClientConfig{ Name: "foobar", Cert: tempCert, Key: tempCertKey, RootCAs: caCert, InsecureSkipVerify: false, } MainInstance = New(context.Background(), "foobar") if err := cfg.Prepare(); err != nil { t.Errorf("nodeId=%s; ReceptorName=foobar; this should have not failed", MainInstance.nodeID) } } func TestNodeIDWIthSkipReceptorNamesCheckTrue(t *testing.T) { caCert, tempCert, tempCertKey, tearDownSuite := useUtilsSetupSuiteWithGenerateWithCA(t, "foobaz") defer tearDownSuite(t) clientCfg := &TLSClientConfig{ Name: "foobaz-client", Cert: tempCert, Key: tempCertKey, RootCAs: caCert, InsecureSkipVerify: false, SkipReceptorNamesCheck: true, } serverCfg := &TLSServerConfig{ Name: "foobaz-server", Cert: tempCert, Key: tempCertKey, RequireClientCert: false, ClientCAs: caCert, SkipReceptorNamesCheck: true, } MainInstance = New(context.Background(), "foobar") if err := clientCfg.Prepare(); err != nil { t.Errorf("nodeId=%s; ReceptorName=foobar; this should have not failed", MainInstance.nodeID) } if err := serverCfg.Prepare(); err != nil { t.Errorf("nodeId=%s; ReceptorName=foobar; this should have not failed", MainInstance.nodeID) } } receptor-1.5.3/pkg/randstr/000077500000000000000000000000001475465740700156245ustar00rootroot00000000000000receptor-1.5.3/pkg/randstr/randstr.go000066400000000000000000000007451475465740700176360ustar00rootroot00000000000000package randstr import ( "crypto/rand" "math/big" ) // RandomString returns a random string of a given length. func RandomString(length int) string { if length < 0 { return "" } charset := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" randbytes := make([]byte, 0, length) for i := 0; i < length; i++ { idx, _ := rand.Int(rand.Reader, big.NewInt(int64(len(charset)))) randbytes = append(randbytes, charset[idx.Int64()]) } return string(randbytes) } receptor-1.5.3/pkg/randstr/randstr_test.go000066400000000000000000000032671475465740700206770ustar00rootroot00000000000000package randstr_test import ( "strings" "testing" "github.com/ansible/receptor/pkg/randstr" ) func TestRandStrLength(t *testing.T) { randStringTestCases := []struct { name string inputLength int expectedLength int }{ { name: "length of 100", inputLength: 100, expectedLength: 100, }, { name: "length of 0", inputLength: 0, expectedLength: 0, }, { name: "length of -1", inputLength: -1, expectedLength: 0, }, } for _, testCase := range randStringTestCases { t.Run(testCase.name, func(t *testing.T) { randomStr := randstr.RandomString(testCase.inputLength) if len(randomStr) != testCase.expectedLength { t.Errorf("%s - expected: %+v, received: %+v", testCase.name, testCase.expectedLength, len(randomStr)) } }) } } func TestRandStrHasDifferentOutputThanCharset(t *testing.T) { charset := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" randomStr := randstr.RandomString(len(charset)) if randomStr == charset { t.Errorf("output should be different than charset. charset: %+v, received: %+v", charset, randomStr) } } func TestRandStrHasNoContinuousSubStringOfCharset(t *testing.T) { randomStr := randstr.RandomString(10) charset := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" charsetIndex := strings.Index(charset, string(randomStr[0])) for index, char := range randomStr { if index == 0 { continue } currentCharsetIndex := strings.Index(charset, string(char)) if charsetIndex+1 != currentCharsetIndex { break } if index+1 == len(randomStr) { t.Error("rand str is continuous") } charsetIndex = currentCharsetIndex } } receptor-1.5.3/pkg/services/000077500000000000000000000000001475465740700157725ustar00rootroot00000000000000receptor-1.5.3/pkg/services/cmdline.go000066400000000000000000000003141475465740700177320ustar00rootroot00000000000000package services import "github.com/ghjm/cmdline" var servicesSection = &cmdline.ConfigSection{ Description: "Commands to configure services that run on top of the Receptor mesh:", Order: 20, } receptor-1.5.3/pkg/services/command.go000066400000000000000000000043701475465740700177430ustar00rootroot00000000000000//go:build !windows // +build !windows package services import ( "crypto/tls" "net" "os/exec" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/utils" "github.com/creack/pty" "github.com/ghjm/cmdline" "github.com/google/shlex" "github.com/spf13/viper" ) func runCommand(qc net.Conn, command string, logger *logger.ReceptorLogger) error { args, err := shlex.Split(command) if err != nil { return err } cmd := exec.Command(args[0], args[1:]...) tty, err := pty.Start(cmd) if err != nil { return err } utils.BridgeConns(tty, "external command", qc, "command service", logger) return nil } // CommandService listens on the Receptor network and runs a local command. func CommandService(s *netceptor.Netceptor, service string, tlscfg *tls.Config, command string) { qli, err := s.ListenAndAdvertise(service, tlscfg, map[string]string{ "type": "Command Service", }) if err != nil { s.Logger.Error("Error listening on Receptor network: %s\n", err) return } for { qc, err := qli.Accept() if err != nil { s.Logger.Error("Error accepting connection on Receptor network: %s\n", err) return } go func() { err := runCommand(qc, command, s.Logger) if err != nil { s.Logger.Error("Error running command: %s\n", err) } _ = qc.Close() }() } } // commandSvcCfg is the cmdline configuration object for a command service. type CommandSvcCfg struct { Service string `required:"true" description:"Receptor service name to bind to"` Command string `required:"true" description:"Command to execute on a connection"` TLS string `description:"Name of TLS server config"` } // Run runs the action. func (cfg CommandSvcCfg) Run() error { netceptor.MainInstance.Logger.Info("Running command service %s\n", cfg) tlscfg, err := netceptor.MainInstance.GetServerTLSConfig(cfg.TLS) if err != nil { return err } go CommandService(netceptor.MainInstance, cfg.Service, tlscfg, cfg.Command) return nil } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-command-service", "command-service", "Run an interactive command via a Receptor service", CommandSvcCfg{}, cmdline.Section(servicesSection)) } receptor-1.5.3/pkg/services/interfaces/000077500000000000000000000000001475465740700201155ustar00rootroot00000000000000receptor-1.5.3/pkg/services/interfaces/mock_interfaces/000077500000000000000000000000001475465740700232515ustar00rootroot00000000000000receptor-1.5.3/pkg/services/interfaces/mock_interfaces/net_interfaces.go000066400000000000000000000372401475465740700265770ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/services/interfaces/net_interfaces.go // // Generated by this command: // // mockgen -source=pkg/services/interfaces/net_interfaces.go -destination=pkg/services/interfaces/mock_interfaces/net_interfaces.go // // Package mock_net_interface is a generated GoMock package. package mock_net_interface import ( net "net" netip "net/netip" os "os" reflect "reflect" syscall "syscall" time "time" net_interface "github.com/ansible/receptor/pkg/services/interfaces" gomock "go.uber.org/mock/gomock" ) // MockNetterUDP is a mock of NetterUDP interface. type MockNetterUDP struct { ctrl *gomock.Controller recorder *MockNetterUDPMockRecorder isgomock struct{} } // MockNetterUDPMockRecorder is the mock recorder for MockNetterUDP. type MockNetterUDPMockRecorder struct { mock *MockNetterUDP } // NewMockNetterUDP creates a new mock instance. func NewMockNetterUDP(ctrl *gomock.Controller) *MockNetterUDP { mock := &MockNetterUDP{ctrl: ctrl} mock.recorder = &MockNetterUDPMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockNetterUDP) EXPECT() *MockNetterUDPMockRecorder { return m.recorder } // DialUDP mocks base method. func (m *MockNetterUDP) DialUDP(network string, laddr, raddr *net.UDPAddr) (net_interface.UDPConnInterface, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DialUDP", network, laddr, raddr) ret0, _ := ret[0].(net_interface.UDPConnInterface) ret1, _ := ret[1].(error) return ret0, ret1 } // DialUDP indicates an expected call of DialUDP. func (mr *MockNetterUDPMockRecorder) DialUDP(network, laddr, raddr any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DialUDP", reflect.TypeOf((*MockNetterUDP)(nil).DialUDP), network, laddr, raddr) } // ListenUDP mocks base method. func (m *MockNetterUDP) ListenUDP(network string, laddr *net.UDPAddr) (net_interface.UDPConnInterface, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListenUDP", network, laddr) ret0, _ := ret[0].(net_interface.UDPConnInterface) ret1, _ := ret[1].(error) return ret0, ret1 } // ListenUDP indicates an expected call of ListenUDP. func (mr *MockNetterUDPMockRecorder) ListenUDP(network, laddr any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenUDP", reflect.TypeOf((*MockNetterUDP)(nil).ListenUDP), network, laddr) } // ResolveUDPAddr mocks base method. func (m *MockNetterUDP) ResolveUDPAddr(network, address string) (*net.UDPAddr, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ResolveUDPAddr", network, address) ret0, _ := ret[0].(*net.UDPAddr) ret1, _ := ret[1].(error) return ret0, ret1 } // ResolveUDPAddr indicates an expected call of ResolveUDPAddr. func (mr *MockNetterUDPMockRecorder) ResolveUDPAddr(network, address any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResolveUDPAddr", reflect.TypeOf((*MockNetterUDP)(nil).ResolveUDPAddr), network, address) } // MockUDPConnInterface is a mock of UDPConnInterface interface. type MockUDPConnInterface struct { ctrl *gomock.Controller recorder *MockUDPConnInterfaceMockRecorder isgomock struct{} } // MockUDPConnInterfaceMockRecorder is the mock recorder for MockUDPConnInterface. type MockUDPConnInterfaceMockRecorder struct { mock *MockUDPConnInterface } // NewMockUDPConnInterface creates a new mock instance. func NewMockUDPConnInterface(ctrl *gomock.Controller) *MockUDPConnInterface { mock := &MockUDPConnInterface{ctrl: ctrl} mock.recorder = &MockUDPConnInterfaceMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockUDPConnInterface) EXPECT() *MockUDPConnInterfaceMockRecorder { return m.recorder } // Close mocks base method. func (m *MockUDPConnInterface) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockUDPConnInterfaceMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockUDPConnInterface)(nil).Close)) } // File mocks base method. func (m *MockUDPConnInterface) File() (*os.File, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "File") ret0, _ := ret[0].(*os.File) ret1, _ := ret[1].(error) return ret0, ret1 } // File indicates an expected call of File. func (mr *MockUDPConnInterfaceMockRecorder) File() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "File", reflect.TypeOf((*MockUDPConnInterface)(nil).File)) } // LocalAddr mocks base method. func (m *MockUDPConnInterface) LocalAddr() net.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LocalAddr") ret0, _ := ret[0].(net.Addr) return ret0 } // LocalAddr indicates an expected call of LocalAddr. func (mr *MockUDPConnInterfaceMockRecorder) LocalAddr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LocalAddr", reflect.TypeOf((*MockUDPConnInterface)(nil).LocalAddr)) } // Read mocks base method. func (m *MockUDPConnInterface) Read(b []byte) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Read", b) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // Read indicates an expected call of Read. func (mr *MockUDPConnInterfaceMockRecorder) Read(b any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockUDPConnInterface)(nil).Read), b) } // ReadFrom mocks base method. func (m *MockUDPConnInterface) ReadFrom(b []byte) (int, net.Addr, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadFrom", b) ret0, _ := ret[0].(int) ret1, _ := ret[1].(net.Addr) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // ReadFrom indicates an expected call of ReadFrom. func (mr *MockUDPConnInterfaceMockRecorder) ReadFrom(b any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadFrom", reflect.TypeOf((*MockUDPConnInterface)(nil).ReadFrom), b) } // ReadFromUDP mocks base method. func (m *MockUDPConnInterface) ReadFromUDP(b []byte) (int, *net.UDPAddr, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadFromUDP", b) ret0, _ := ret[0].(int) ret1, _ := ret[1].(*net.UDPAddr) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // ReadFromUDP indicates an expected call of ReadFromUDP. func (mr *MockUDPConnInterfaceMockRecorder) ReadFromUDP(b any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadFromUDP", reflect.TypeOf((*MockUDPConnInterface)(nil).ReadFromUDP), b) } // ReadFromUDPAddrPort mocks base method. func (m *MockUDPConnInterface) ReadFromUDPAddrPort(b []byte) (int, netip.AddrPort, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadFromUDPAddrPort", b) ret0, _ := ret[0].(int) ret1, _ := ret[1].(netip.AddrPort) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // ReadFromUDPAddrPort indicates an expected call of ReadFromUDPAddrPort. func (mr *MockUDPConnInterfaceMockRecorder) ReadFromUDPAddrPort(b any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadFromUDPAddrPort", reflect.TypeOf((*MockUDPConnInterface)(nil).ReadFromUDPAddrPort), b) } // ReadMsgUDP mocks base method. func (m *MockUDPConnInterface) ReadMsgUDP(b, oob []byte) (int, int, int, *net.UDPAddr, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadMsgUDP", b, oob) ret0, _ := ret[0].(int) ret1, _ := ret[1].(int) ret2, _ := ret[2].(int) ret3, _ := ret[3].(*net.UDPAddr) ret4, _ := ret[4].(error) return ret0, ret1, ret2, ret3, ret4 } // ReadMsgUDP indicates an expected call of ReadMsgUDP. func (mr *MockUDPConnInterfaceMockRecorder) ReadMsgUDP(b, oob any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadMsgUDP", reflect.TypeOf((*MockUDPConnInterface)(nil).ReadMsgUDP), b, oob) } // ReadMsgUDPAddrPort mocks base method. func (m *MockUDPConnInterface) ReadMsgUDPAddrPort(b, oob []byte) (int, int, int, netip.AddrPort, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ReadMsgUDPAddrPort", b, oob) ret0, _ := ret[0].(int) ret1, _ := ret[1].(int) ret2, _ := ret[2].(int) ret3, _ := ret[3].(netip.AddrPort) ret4, _ := ret[4].(error) return ret0, ret1, ret2, ret3, ret4 } // ReadMsgUDPAddrPort indicates an expected call of ReadMsgUDPAddrPort. func (mr *MockUDPConnInterfaceMockRecorder) ReadMsgUDPAddrPort(b, oob any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadMsgUDPAddrPort", reflect.TypeOf((*MockUDPConnInterface)(nil).ReadMsgUDPAddrPort), b, oob) } // RemoteAddr mocks base method. func (m *MockUDPConnInterface) RemoteAddr() net.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RemoteAddr") ret0, _ := ret[0].(net.Addr) return ret0 } // RemoteAddr indicates an expected call of RemoteAddr. func (mr *MockUDPConnInterfaceMockRecorder) RemoteAddr() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoteAddr", reflect.TypeOf((*MockUDPConnInterface)(nil).RemoteAddr)) } // SetDeadline mocks base method. func (m *MockUDPConnInterface) SetDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetDeadline indicates an expected call of SetDeadline. func (mr *MockUDPConnInterfaceMockRecorder) SetDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDeadline", reflect.TypeOf((*MockUDPConnInterface)(nil).SetDeadline), t) } // SetReadBuffer mocks base method. func (m *MockUDPConnInterface) SetReadBuffer(bytes int) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetReadBuffer", bytes) ret0, _ := ret[0].(error) return ret0 } // SetReadBuffer indicates an expected call of SetReadBuffer. func (mr *MockUDPConnInterfaceMockRecorder) SetReadBuffer(bytes any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetReadBuffer", reflect.TypeOf((*MockUDPConnInterface)(nil).SetReadBuffer), bytes) } // SetReadDeadline mocks base method. func (m *MockUDPConnInterface) SetReadDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetReadDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetReadDeadline indicates an expected call of SetReadDeadline. func (mr *MockUDPConnInterfaceMockRecorder) SetReadDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetReadDeadline", reflect.TypeOf((*MockUDPConnInterface)(nil).SetReadDeadline), t) } // SetWriteBuffer mocks base method. func (m *MockUDPConnInterface) SetWriteBuffer(bytes int) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetWriteBuffer", bytes) ret0, _ := ret[0].(error) return ret0 } // SetWriteBuffer indicates an expected call of SetWriteBuffer. func (mr *MockUDPConnInterfaceMockRecorder) SetWriteBuffer(bytes any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteBuffer", reflect.TypeOf((*MockUDPConnInterface)(nil).SetWriteBuffer), bytes) } // SetWriteDeadline mocks base method. func (m *MockUDPConnInterface) SetWriteDeadline(t time.Time) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetWriteDeadline", t) ret0, _ := ret[0].(error) return ret0 } // SetWriteDeadline indicates an expected call of SetWriteDeadline. func (mr *MockUDPConnInterfaceMockRecorder) SetWriteDeadline(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWriteDeadline", reflect.TypeOf((*MockUDPConnInterface)(nil).SetWriteDeadline), t) } // SyscallConn mocks base method. func (m *MockUDPConnInterface) SyscallConn() (syscall.RawConn, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyscallConn") ret0, _ := ret[0].(syscall.RawConn) ret1, _ := ret[1].(error) return ret0, ret1 } // SyscallConn indicates an expected call of SyscallConn. func (mr *MockUDPConnInterfaceMockRecorder) SyscallConn() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyscallConn", reflect.TypeOf((*MockUDPConnInterface)(nil).SyscallConn)) } // Write mocks base method. func (m *MockUDPConnInterface) Write(b []byte) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", b) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // Write indicates an expected call of Write. func (mr *MockUDPConnInterfaceMockRecorder) Write(b any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockUDPConnInterface)(nil).Write), b) } // WriteMsgUDP mocks base method. func (m *MockUDPConnInterface) WriteMsgUDP(b, oob []byte, addr *net.UDPAddr) (int, int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteMsgUDP", b, oob, addr) ret0, _ := ret[0].(int) ret1, _ := ret[1].(int) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // WriteMsgUDP indicates an expected call of WriteMsgUDP. func (mr *MockUDPConnInterfaceMockRecorder) WriteMsgUDP(b, oob, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteMsgUDP", reflect.TypeOf((*MockUDPConnInterface)(nil).WriteMsgUDP), b, oob, addr) } // WriteMsgUDPAddrPort mocks base method. func (m *MockUDPConnInterface) WriteMsgUDPAddrPort(b, oob []byte, addr netip.AddrPort) (int, int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteMsgUDPAddrPort", b, oob, addr) ret0, _ := ret[0].(int) ret1, _ := ret[1].(int) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // WriteMsgUDPAddrPort indicates an expected call of WriteMsgUDPAddrPort. func (mr *MockUDPConnInterfaceMockRecorder) WriteMsgUDPAddrPort(b, oob, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteMsgUDPAddrPort", reflect.TypeOf((*MockUDPConnInterface)(nil).WriteMsgUDPAddrPort), b, oob, addr) } // WriteTo mocks base method. func (m *MockUDPConnInterface) WriteTo(b []byte, addr net.Addr) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteTo", b, addr) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // WriteTo indicates an expected call of WriteTo. func (mr *MockUDPConnInterfaceMockRecorder) WriteTo(b, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteTo", reflect.TypeOf((*MockUDPConnInterface)(nil).WriteTo), b, addr) } // WriteToUDP mocks base method. func (m *MockUDPConnInterface) WriteToUDP(b []byte, addr *net.UDPAddr) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteToUDP", b, addr) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // WriteToUDP indicates an expected call of WriteToUDP. func (mr *MockUDPConnInterfaceMockRecorder) WriteToUDP(b, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteToUDP", reflect.TypeOf((*MockUDPConnInterface)(nil).WriteToUDP), b, addr) } // WriteToUDPAddrPort mocks base method. func (m *MockUDPConnInterface) WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "WriteToUDPAddrPort", b, addr) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // WriteToUDPAddrPort indicates an expected call of WriteToUDPAddrPort. func (mr *MockUDPConnInterfaceMockRecorder) WriteToUDPAddrPort(b, addr any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteToUDPAddrPort", reflect.TypeOf((*MockUDPConnInterface)(nil).WriteToUDPAddrPort), b, addr) } receptor-1.5.3/pkg/services/interfaces/net_interfaces.go000066400000000000000000000027701475465740700234430ustar00rootroot00000000000000package netinterface import ( "net" "net/netip" "os" "syscall" "time" ) type NetterUDP interface { ResolveUDPAddr(network string, address string) (*net.UDPAddr, error) ListenUDP(network string, laddr *net.UDPAddr) (UDPConnInterface, error) DialUDP(network string, laddr *net.UDPAddr, raddr *net.UDPAddr) (UDPConnInterface, error) } // UDPConnInterface abstracts the methods of net.UDPConn used in the proxy. type UDPConnInterface interface { Close() error File() (f *os.File, err error) LocalAddr() net.Addr Read(b []byte) (int, error) ReadFrom(b []byte) (int, net.Addr, error) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) ReadFromUDPAddrPort(b []byte) (n int, addr netip.AddrPort, err error) ReadMsgUDP(b []byte, oob []byte) (n int, oobn int, flags int, addr *net.UDPAddr, err error) ReadMsgUDPAddrPort(b []byte, oob []byte) (n int, oobn int, flags int, addr netip.AddrPort, err error) RemoteAddr() net.Addr SetDeadline(t time.Time) error SetReadBuffer(bytes int) error SetReadDeadline(t time.Time) error SetWriteBuffer(bytes int) error SetWriteDeadline(t time.Time) error SyscallConn() (syscall.RawConn, error) Write(b []byte) (int, error) WriteMsgUDP(b []byte, oob []byte, addr *net.UDPAddr) (n int, oobn int, err error) WriteMsgUDPAddrPort(b []byte, oob []byte, addr netip.AddrPort) (n int, oobn int, err error) WriteTo(b []byte, addr net.Addr) (int, error) WriteToUDP(b []byte, addr *net.UDPAddr) (int, error) WriteToUDPAddrPort(b []byte, addr netip.AddrPort) (int, error) } receptor-1.5.3/pkg/services/ip_router.go000066400000000000000000000230341475465740700203330ustar00rootroot00000000000000//go:build linux // +build linux package services import ( "bytes" "fmt" "math" "net" "strings" "sync" "time" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/utils" "github.com/ghjm/cmdline" "github.com/songgao/water" "github.com/spf13/viper" "github.com/vishvananda/netlink" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" ) const adTypeIPRouter = "IP Router" type ipRoute struct { dest *net.IPNet via string } // IPRouterService is an IP router service. type IPRouterService struct { nc *netceptor.Netceptor networkName string tunIfName string localNet *net.IPNet advertiseRoutes []*net.IPNet linkIP net.IP destIP net.IP tunIf *water.Interface link netlink.Link nConn netceptor.PacketConner knownRoutes []ipRoute knownRoutesLock *sync.RWMutex } // NewIPRouter creates a new IP router service. func NewIPRouter(nc *netceptor.Netceptor, networkName string, tunInterface string, localNet string, routes string, ) (*IPRouterService, error) { ipr := &IPRouterService{ nc: nc, networkName: networkName, tunIfName: tunInterface, knownRoutes: make([]ipRoute, 0), knownRoutesLock: &sync.RWMutex{}, } var err error _, ipr.localNet, err = net.ParseCIDR(localNet) if err != nil { return nil, fmt.Errorf("could not parse %s as a CIDR address", localNet) } ones, bits := ipr.localNet.Mask.Size() if ones != 30 { return nil, fmt.Errorf("local network %s must be a /30 CIDR", localNet) } if bits != 32 { return nil, fmt.Errorf("local network %s must be IPv4", localNet) } ipr.advertiseRoutes = make([]*net.IPNet, 0) if routes != "" { routeList := strings.Split(routes, ",") for i := range routeList { _, ipNet, err := net.ParseCIDR(routeList[i]) if err != nil { return nil, fmt.Errorf("could not parse %s as a CIDR address", routeList[i]) } ipr.advertiseRoutes = append(ipr.advertiseRoutes, ipNet) } } err = ipr.run() if err != nil { return nil, err } return ipr, nil } func (ipr *IPRouterService) updateKnownRoutes() { newRoutes := make([]ipRoute, 0) status := ipr.nc.Status() for i := range status.Advertisements { ad := status.Advertisements[i] adType, ok := ad.Tags["type"] if !ok || adType != adTypeIPRouter { continue } network, ok := ad.Tags["network"] if !ok || network != ipr.networkName { continue } _, ok = status.RoutingTable[ad.NodeID] if !ok { continue } for key, value := range ad.Tags { if strings.HasPrefix(key, "route") { _, ipNet, err := net.ParseCIDR(value) if err == nil { newRoute := ipRoute{ dest: ipNet, via: ad.NodeID, } newRoutes = append(newRoutes, newRoute) } } } } ipr.knownRoutesLock.Lock() ipr.knownRoutes = newRoutes ipr.knownRoutesLock.Unlock() } func (ipr *IPRouterService) reconcileRoutingTable() { ipr.knownRoutesLock.RLock() defer ipr.knownRoutesLock.RUnlock() routes, err := netlink.RouteList(ipr.link, netlink.FAMILY_ALL) if err != nil { ipr.nc.Logger.Error("error retrieving kernel routes list: %s", err) return } fmt.Printf("=========\n") fmt.Printf("Receptor Routes:\n") for i := range ipr.knownRoutes { fmt.Printf(" dest %s via %s\n", ipr.knownRoutes[i].dest.String(), ipr.knownRoutes[i].via) } fmt.Printf("Kernel Routes:\n") for i := range routes { fmt.Printf(" dest %s\n", routes[i].Dst.String()) } for i := range ipr.knownRoutes { kr := ipr.knownRoutes[i] found := false for j := range routes { route := routes[j] if kr.dest.IP.Equal(route.Dst.IP) && bytes.Equal(kr.dest.Mask, route.Dst.Mask) { found = true break } } if !found { ipr.nc.Logger.Debug("Adding route to %s", kr.dest.String()) err := ipr.addRoute(kr.dest) if err != nil { ipr.nc.Logger.Error("error adding kernel route to %s: %s", kr.dest.String(), err) } } } _, ipv6LinkLocal, _ := net.ParseCIDR("fe80::/10") for i := range routes { route := routes[i] if ipr.localNet.Contains(route.Dst.IP) { continue } if ipv6LinkLocal.Contains(route.Dst.IP) { continue } found := false for j := range ipr.knownRoutes { kr := ipr.knownRoutes[j] if kr.dest.IP.Equal(route.Dst.IP) && bytes.Equal(kr.dest.Mask, route.Dst.Mask) { found = true break } } if !found { ipr.nc.Logger.Debug("Removing route to %s", route.Dst.String()) err := netlink.RouteDel(&route) if err != nil { ipr.nc.Logger.Error("error deleting kernel route to %s: %s", route.Dst.String(), err) } } } } func (ipr *IPRouterService) runAdvertisingWatcher() { for { ipr.updateKnownRoutes() ipr.reconcileRoutingTable() select { case <-ipr.nc.Context().Done(): return case <-time.After(10 * time.Second): } } } func (ipr *IPRouterService) runTunToNetceptor() { ipr.nc.Logger.Debug("Running tunnel-to-Receptor forwarder\n") buf := make([]byte, utils.NormalBufferSize) for { if ipr.nc.Context().Err() != nil { return } n, err := ipr.tunIf.Read(buf) if err != nil { ipr.nc.Logger.Error("Error reading from tun device: %s\n", err) continue } packet := buf[:n] // Get the destination address from the received packet ipVersion := int(packet[0] >> 4) var destIP net.IP switch ipVersion { case 4: header, err := ipv4.ParseHeader(packet) if err != nil { ipr.nc.Logger.Debug("Malformed ipv4 packet received: %s", err) } destIP = header.Dst case 6: header, err := ipv6.ParseHeader(packet) if err != nil { ipr.nc.Logger.Debug("Malformed ipv6 packet received: %s", err) } destIP = header.Dst default: ipr.nc.Logger.Debug("Packet received with unknown version %d", ipVersion) continue } // Find the lowest cost receptor node that can accept this packet remoteNode := "" remoteCost := math.MaxFloat64 ipr.knownRoutesLock.RLock() for i := range ipr.knownRoutes { route := ipr.knownRoutes[i] cost, err := ipr.nc.PathCost(route.via) if err != nil { continue } if cost < remoteCost && route.dest.Contains(destIP) { remoteCost = cost remoteNode = route.via } } ipr.knownRoutesLock.RUnlock() if remoteNode == "" { continue } // Send the packet via Receptor remoteAddr := ipr.nc.NewAddr(remoteNode, ipr.networkName) ipr.nc.Logger.Trace(" Forwarding data length %d to %s via %s\n", n, destIP, remoteAddr.String()) wn, err := ipr.nConn.WriteTo(packet, remoteAddr) if err != nil || wn != n { ipr.nc.Logger.Error("Error writing to Receptor network: %s\n", err) } } } func (ipr *IPRouterService) runNetceptorToTun() { ipr.nc.Logger.Debug("Running netceptor to tunnel forwarder\n") buf := make([]byte, utils.NormalBufferSize) for { if ipr.nc.Context().Err() != nil { return } n, addr, err := ipr.nConn.ReadFrom(buf) if err != nil { ipr.nc.Logger.Error("Error reading from Receptor: %s\n", err) continue } ipr.nc.Logger.Trace(" Forwarding data length %d from %s to %s\n", n, addr.String(), ipr.tunIf.Name()) wn, err := ipr.tunIf.Write(buf[:n]) if err != nil || wn != n { ipr.nc.Logger.Error("Error writing to tun device: %s\n", err) } } } func (ipr *IPRouterService) addRoute(route *net.IPNet) error { err := netlink.RouteAdd(&netlink.Route{ LinkIndex: ipr.link.Attrs().Index, Scope: netlink.SCOPE_UNIVERSE, Dst: route, Gw: ipr.destIP, }) if err != nil { return fmt.Errorf("error adding route to interface: %s", err) } return nil } // Run runs the IP router. func (ipr *IPRouterService) run() error { cfg := water.Config{ DeviceType: water.TUN, } cfg.Name = ipr.tunIfName var err error ipr.tunIf, err = water.New(water.Config{DeviceType: water.TUN}) if err != nil { return fmt.Errorf("error opening tun device: %s", err) } ipr.link, err = netlink.LinkByName(ipr.tunIf.Name()) if err != nil { return fmt.Errorf("error accessing link for tun device: %s", err) } baseIP := ipr.localNet.IP.To4() ipr.linkIP = make([]byte, 4) copy(ipr.linkIP, baseIP) ipr.linkIP[3]++ ipr.destIP = make([]byte, 4) copy(ipr.destIP, ipr.linkIP) ipr.destIP[3]++ if !ipr.localNet.Contains(ipr.linkIP) || !ipr.localNet.Contains(ipr.destIP) { return fmt.Errorf("error calculating link and remote addresses") } addr := &netlink.Addr{ IPNet: netlink.NewIPNet(ipr.linkIP), Peer: netlink.NewIPNet(ipr.destIP), } err = netlink.AddrAdd(ipr.link, addr) if err != nil { return fmt.Errorf("error adding IP address to link: %s", err) } err = netlink.LinkSetUp(ipr.link) if err != nil { return fmt.Errorf("error setting link up: %s", err) } advertisement := map[string]string{ "type": adTypeIPRouter, "network": ipr.networkName, "route_local": ipr.localNet.String(), } for i := range ipr.advertiseRoutes { advertisement[fmt.Sprintf("route_%d", i)] = ipr.advertiseRoutes[i].String() } ipr.nConn, err = ipr.nc.ListenPacketAndAdvertise(ipr.networkName, advertisement) if err != nil { return fmt.Errorf("error listening for service %s: %s", ipr.networkName, err) } go ipr.runAdvertisingWatcher() go ipr.runTunToNetceptor() go ipr.runNetceptorToTun() return nil } // Run runs the action. func (cfg IPRouterCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running tun router service %s\n", cfg) _, err := NewIPRouter(netceptor.MainInstance, cfg.NetworkName, cfg.Interface, cfg.LocalNet, cfg.Routes) if err != nil { return err } return nil } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-ip-router", "ip-router", "Run an IP router using a tun interface", IPRouterCfg{}, cmdline.Section(servicesSection)) } receptor-1.5.3/pkg/services/ip_router_cfg.go000066400000000000000000000006561475465740700211570ustar00rootroot00000000000000package services // ipRouterCfg is the cmdline configuration object for an IP router. type IPRouterCfg struct { NetworkName string `required:"true" description:"Name of this network and service."` Interface string `description:"Name of the local tun interface"` LocalNet string `required:"true" description:"Local /30 CIDR address"` Routes string `description:"Comma separated list of CIDR subnets to advertise"` } receptor-1.5.3/pkg/services/mock_services/000077500000000000000000000000001475465740700206265ustar00rootroot00000000000000receptor-1.5.3/pkg/services/mock_services/udp_proxy.go000066400000000000000000000067731475465740700232230ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/services/udp_proxy.go // // Generated by this command: // // mockgen -source=pkg/services/udp_proxy.go -destination=pkg/services/mock_services/udp_proxy.go // // Package mock_services is a generated GoMock package. package mock_services import ( reflect "reflect" logger "github.com/ansible/receptor/pkg/logger" netceptor "github.com/ansible/receptor/pkg/netceptor" gomock "go.uber.org/mock/gomock" ) // MockNetcForUDPProxy is a mock of NetcForUDPProxy interface. type MockNetcForUDPProxy struct { ctrl *gomock.Controller recorder *MockNetcForUDPProxyMockRecorder isgomock struct{} } // MockNetcForUDPProxyMockRecorder is the mock recorder for MockNetcForUDPProxy. type MockNetcForUDPProxyMockRecorder struct { mock *MockNetcForUDPProxy } // NewMockNetcForUDPProxy creates a new mock instance. func NewMockNetcForUDPProxy(ctrl *gomock.Controller) *MockNetcForUDPProxy { mock := &MockNetcForUDPProxy{ctrl: ctrl} mock.recorder = &MockNetcForUDPProxyMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockNetcForUDPProxy) EXPECT() *MockNetcForUDPProxyMockRecorder { return m.recorder } // GetLogger mocks base method. func (m *MockNetcForUDPProxy) GetLogger() *logger.ReceptorLogger { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLogger") ret0, _ := ret[0].(*logger.ReceptorLogger) return ret0 } // GetLogger indicates an expected call of GetLogger. func (mr *MockNetcForUDPProxyMockRecorder) GetLogger() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogger", reflect.TypeOf((*MockNetcForUDPProxy)(nil).GetLogger)) } // ListenPacket mocks base method. func (m *MockNetcForUDPProxy) ListenPacket(service string) (netceptor.PacketConner, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListenPacket", service) ret0, _ := ret[0].(netceptor.PacketConner) ret1, _ := ret[1].(error) return ret0, ret1 } // ListenPacket indicates an expected call of ListenPacket. func (mr *MockNetcForUDPProxyMockRecorder) ListenPacket(service any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenPacket", reflect.TypeOf((*MockNetcForUDPProxy)(nil).ListenPacket), service) } // ListenPacketAndAdvertise mocks base method. func (m *MockNetcForUDPProxy) ListenPacketAndAdvertise(service string, tags map[string]string) (netceptor.PacketConner, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ListenPacketAndAdvertise", service, tags) ret0, _ := ret[0].(netceptor.PacketConner) ret1, _ := ret[1].(error) return ret0, ret1 } // ListenPacketAndAdvertise indicates an expected call of ListenPacketAndAdvertise. func (mr *MockNetcForUDPProxyMockRecorder) ListenPacketAndAdvertise(service, tags any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListenPacketAndAdvertise", reflect.TypeOf((*MockNetcForUDPProxy)(nil).ListenPacketAndAdvertise), service, tags) } // NewAddr mocks base method. func (m *MockNetcForUDPProxy) NewAddr(node, service string) netceptor.Addr { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewAddr", node, service) ret0, _ := ret[0].(netceptor.Addr) return ret0 } // NewAddr indicates an expected call of NewAddr. func (mr *MockNetcForUDPProxyMockRecorder) NewAddr(node, service any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewAddr", reflect.TypeOf((*MockNetcForUDPProxy)(nil).NewAddr), node, service) } receptor-1.5.3/pkg/services/tcp_proxy.go000066400000000000000000000112731475465740700203540ustar00rootroot00000000000000package services import ( "crypto/tls" "fmt" "net" "strconv" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/utils" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) // TCPProxyServiceInbound listens on a TCP port and forwards the connection over the Receptor network. func TCPProxyServiceInbound(s *netceptor.Netceptor, host string, port int, tlsServer *tls.Config, node string, rservice string, tlsClient *tls.Config, ) error { tli, err := net.Listen("tcp", net.JoinHostPort(host, strconv.Itoa(port))) if tlsServer != nil { tli = tls.NewListener(tli, tlsServer) } if err != nil { return fmt.Errorf("error listening on TCP: %s", err) } go func() { for { tc, err := tli.Accept() if err != nil { s.Logger.Error("Error accepting TCP connection: %s\n", err) return } qc, err := s.Dial(node, rservice, tlsClient) if err != nil { s.Logger.Error("Error connecting on Receptor network: %s\n", err) continue } go utils.BridgeConns(tc, "tcp service", qc, "receptor connection", s.Logger) } }() return nil } // TCPProxyServiceOutbound listens on the Receptor network and forwards the connection via TCP. func TCPProxyServiceOutbound(s *netceptor.Netceptor, service string, tlsServer *tls.Config, address string, tlsClient *tls.Config, ) error { qli, err := s.ListenAndAdvertise(service, tlsServer, map[string]string{ "type": "TCP Proxy", "address": address, }) if err != nil { return fmt.Errorf("error listening on Receptor network: %s", err) } go func() { for { qc, err := qli.Accept() if err != nil { s.Logger.Error("Error accepting connection on Receptor network: %s\n", err) return } var tc net.Conn if tlsClient == nil { tc, err = net.Dial("tcp", address) } else { tc, err = tls.Dial("tcp", address, tlsClient) } if err != nil { s.Logger.Error("Error connecting via TCP: %s\n", err) continue } go utils.BridgeConns(qc, "receptor service", tc, "tcp connection", s.Logger) } }() return nil } // tcpProxyInboundCfg is the cmdline configuration object for a TCP inbound proxy. type TCPProxyInboundCfg struct { Port int `required:"true" description:"Local TCP port to bind to"` BindAddr string `description:"Address to bind TCP listener to" default:"0.0.0.0"` RemoteNode string `required:"true" description:"Receptor node to connect to"` RemoteService string `required:"true" description:"Receptor service name to connect to"` TLSServer string `description:"Name of TLS server config for the TCP listener"` TLSClient string `description:"Name of TLS client config for the Receptor connection"` } // Run runs the action. func (cfg TCPProxyInboundCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running TCP inbound proxy service %v\n", cfg) tlsClientCfg, err := netceptor.MainInstance.GetClientTLSConfig(cfg.TLSClient, cfg.RemoteNode, netceptor.ExpectedHostnameTypeReceptor) if err != nil { return err } TLSServerConfig, err := netceptor.MainInstance.GetServerTLSConfig(cfg.TLSServer) if err != nil { return err } return TCPProxyServiceInbound(netceptor.MainInstance, cfg.BindAddr, cfg.Port, TLSServerConfig, cfg.RemoteNode, cfg.RemoteService, tlsClientCfg) } // tcpProxyOutboundCfg is the cmdline configuration object for a TCP outbound proxy. type TCPProxyOutboundCfg struct { Service string `required:"true" description:"Receptor service name to bind to"` Address string `required:"true" description:"Address for outbound TCP connection"` TLSServer string `description:"Name of TLS server config for the Receptor service"` TLSClient string `description:"Name of TLS client config for the TCP connection"` } // Run runs the action. func (cfg TCPProxyOutboundCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running TCP inbound proxy service %s\n", cfg) TLSServerConfig, err := netceptor.MainInstance.GetServerTLSConfig(cfg.TLSServer) if err != nil { return err } host, _, err := net.SplitHostPort(cfg.Address) if err != nil { return err } tlsClientCfg, err := netceptor.MainInstance.GetClientTLSConfig(cfg.TLSClient, host, netceptor.ExpectedHostnameTypeDNS) if err != nil { return err } return TCPProxyServiceOutbound(netceptor.MainInstance, cfg.Service, TLSServerConfig, cfg.Address, tlsClientCfg) } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-proxies", "tcp-server", "Listen for TCP and forward via Receptor", TCPProxyInboundCfg{}, cmdline.Section(servicesSection)) cmdline.RegisterConfigTypeForApp("receptor-proxies", "tcp-client", "Listen on a Receptor service and forward via TCP", TCPProxyOutboundCfg{}, cmdline.Section(servicesSection)) } receptor-1.5.3/pkg/services/udp_proxy.go000066400000000000000000000156211475465740700203570ustar00rootroot00000000000000package services import ( "fmt" "net" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" net_interface "github.com/ansible/receptor/pkg/services/interfaces" "github.com/ansible/receptor/pkg/utils" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) type NetcForUDPProxy interface { NewAddr(node string, service string) netceptor.Addr ListenPacket(service string) (netceptor.PacketConner, error) GetLogger() *logger.ReceptorLogger ListenPacketAndAdvertise(service string, tags map[string]string) (netceptor.PacketConner, error) } type Net struct{} func (n *Net) ResolveUDPAddr(network string, address string) (*net.UDPAddr, error) { return net.ResolveUDPAddr(network, address) } func (n *Net) ListenUDP(network string, laddr *net.UDPAddr) (net_interface.UDPConnInterface, error) { return net.ListenUDP(network, laddr) } func (n *Net) DialUDP(network string, laddr *net.UDPAddr, raddr *net.UDPAddr) (net_interface.UDPConnInterface, error) { return net.DialUDP(network, laddr, raddr) } // UDPProxyServiceInbound listens on a UDP port and forwards packets to a remote Receptor service. func UDPProxyServiceInbound(s NetcForUDPProxy, host string, port int, node string, service string, nett net_interface.NetterUDP) error { connMap := make(map[string]netceptor.PacketConner) buffer := make([]byte, utils.NormalBufferSize) addrStr := fmt.Sprintf("%s:%d", host, port) udpAddr, err := nett.ResolveUDPAddr("udp", addrStr) if err != nil { return fmt.Errorf("could not resolve address %s", addrStr) } uc, err := nett.ListenUDP("udp", udpAddr) if err != nil { return fmt.Errorf("error listening on UDP: %s", err) } ncAddr := s.NewAddr(node, service) go func() { for { n, addr, err := uc.ReadFrom(buffer) if err != nil { s.GetLogger().Error("Error reading from UDP: %s\n", err) return } raddrStr := addr.String() pc, ok := connMap[raddrStr] if !ok { pc, err = s.ListenPacket("") if err != nil { s.GetLogger().Error("Error listening on Receptor network: %s\n", err) return } s.GetLogger().Debug("Received new UDP connection from %s\n", raddrStr) connMap[raddrStr] = pc go runNetceptorToUDPInbound(pc, uc, addr, s.NewAddr(node, service), s.GetLogger()) } wn, err := pc.WriteTo(buffer[:n], ncAddr) if err != nil { s.GetLogger().Error("Error sending packet on Receptor network: %s\n", err) continue } if wn != n { s.GetLogger().Debug("Not all bytes written on Receptor network\n") continue } } }() return nil } func runNetceptorToUDPInbound(pc netceptor.PacketConner, uc net_interface.UDPConnInterface, udpAddr net.Addr, expectedAddr netceptor.Addr, logger *logger.ReceptorLogger) { buf := make([]byte, utils.NormalBufferSize) for { n, addr, err := pc.ReadFrom(buf) if err != nil { pc.GetLogger().Error("Error reading from Receptor network: %s\n", err) continue } if addr != expectedAddr { pc.GetLogger().Debug("Received packet from unexpected source %s\n", addr) continue } wn, err := uc.WriteTo(buf[:n], udpAddr) if err != nil { pc.GetLogger().Error("Error sending packet via UDP: %s\n", err) continue } if wn != n { pc.GetLogger().Debug("Not all bytes written via UDP\n") continue } } } // UDPProxyServiceOutbound listens on the Receptor network and forwards packets via UDP. func UDPProxyServiceOutbound(s NetcForUDPProxy, service string, address string, nett net_interface.NetterUDP) error { connMap := make(map[string]net_interface.UDPConnInterface) buffer := make([]byte, utils.NormalBufferSize) udpAddr, err := nett.ResolveUDPAddr("udp", address) if err != nil { return fmt.Errorf("could not resolve UDP address %s", address) } pc, err := s.ListenPacketAndAdvertise(service, map[string]string{ "type": "UDP Proxy", "address": address, }) if err != nil { return fmt.Errorf("error listening on service %s: %s", service, err) } go func() { for { n, addr, err := pc.ReadFrom(buffer) if err != nil { s.GetLogger().Error("Error reading from Receptor network: %s\n", err) return } raddrStr := addr.String() uc, ok := connMap[raddrStr] if !ok { uc, err = nett.DialUDP("udp", nil, udpAddr) if err != nil { s.GetLogger().Error("Error connecting via UDP: %s\n", err) return } s.GetLogger().Debug("Opened new UDP connection to %s\n", raddrStr) connMap[raddrStr] = uc go runUDPToNetceptorOutbound(uc, pc, addr, s.GetLogger()) } wn, err := uc.Write(buffer[:n]) if err != nil { s.GetLogger().Error("Error writing to UDP: %s\n", err) continue } if wn != n { s.GetLogger().Debug("Not all bytes written to UDP\n") continue } } }() return nil } func runUDPToNetceptorOutbound(uc net_interface.UDPConnInterface, pc netceptor.PacketConner, addr net.Addr, logger *logger.ReceptorLogger) { buf := make([]byte, utils.NormalBufferSize) for { n, err := uc.Read(buf) if err != nil { pc.GetLogger().Error("Error reading from UDP: %s\n", err) return } wn, err := pc.WriteTo(buf[:n], addr) if err != nil { pc.GetLogger().Error("Error writing to the Receptor network: %s\n", err) continue } if wn != n { pc.GetLogger().Debug("Not all bytes written to the Netceptor network\n") continue } } } // udpProxyInboundCfg is the cmdline configuration object for a UDP inbound proxy. type UDPProxyInboundCfg struct { Port int `required:"true" description:"Local UDP port to bind to"` BindAddr string `description:"Address to bind UDP listener to" default:"0.0.0.0"` RemoteNode string `required:"true" description:"Receptor node to connect to"` RemoteService string `required:"true" description:"Receptor service name to connect to"` } // Run runs the action. func (cfg UDPProxyInboundCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running UDP inbound proxy service %v\n", cfg) return UDPProxyServiceInbound(netceptor.MainInstance, cfg.BindAddr, cfg.Port, cfg.RemoteNode, cfg.RemoteService, &Net{}) } // udpProxyOutboundCfg is the cmdline configuration object for a UDP outbound proxy. type UDPProxyOutboundCfg struct { Service string `required:"true" description:"Receptor service name to bind to"` Address string `required:"true" description:"Address for outbound UDP connection"` } // Run runs the action. func (cfg UDPProxyOutboundCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running UDP outbound proxy service %s\n", cfg) return UDPProxyServiceOutbound(netceptor.MainInstance, cfg.Service, cfg.Address, &Net{}) } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-proxies", "udp-server", "Listen for UDP and forward via Receptor", UDPProxyInboundCfg{}, cmdline.Section(servicesSection)) cmdline.RegisterConfigTypeForApp("receptor-proxies", "udp-client", "Listen on a Receptor service and forward via UDP", UDPProxyOutboundCfg{}, cmdline.Section(servicesSection)) } receptor-1.5.3/pkg/services/udp_proxy_test.go000066400000000000000000000117761475465740700214250ustar00rootroot00000000000000package services import ( "errors" "testing" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/netceptor/mock_netceptor" mock_net_interface "github.com/ansible/receptor/pkg/services/interfaces/mock_interfaces" "github.com/ansible/receptor/pkg/services/mock_services" "go.uber.org/mock/gomock" ) func setUpMocks(ctrl *gomock.Controller) (*mock_services.MockNetcForUDPProxy, *mock_net_interface.MockNetterUDP, *mock_net_interface.MockUDPConnInterface, *mock_netceptor.MockPacketConner) { mockNetceptor := mock_services.NewMockNetcForUDPProxy(ctrl) mockNetter := mock_net_interface.NewMockNetterUDP(ctrl) mockUDPConn := mock_net_interface.NewMockUDPConnInterface(ctrl) mockPacketCon := mock_netceptor.NewMockPacketConner(ctrl) logger := logger.NewReceptorLogger("") mockNetceptor.EXPECT().GetLogger().AnyTimes().Return(logger) return mockNetceptor, mockNetter, mockUDPConn, mockPacketCon } func TestUDPProxyServiceInbound(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() var mockNetceptor *mock_services.MockNetcForUDPProxy var mockNetter *mock_net_interface.MockNetterUDP var mockUDPConn *mock_net_interface.MockUDPConnInterface var mockPacketCon *mock_netceptor.MockPacketConner type testCase struct { name string host string port int node string service string expectErr bool calls func() } tests := []testCase{ { name: "Fail ResolveUDPAddr", expectErr: true, calls: func() { mockNetter.EXPECT().ResolveUDPAddr(gomock.Any(), gomock.Any()).Return(nil, errors.New("RecolveUDPAddr error")) }, }, { name: "Fail ListenUDP", expectErr: true, calls: func() { mockNetter.EXPECT().ResolveUDPAddr(gomock.Any(), gomock.Any()).Return(nil, nil) mockNetter.EXPECT().ListenUDP(gomock.Any(), gomock.Any()).Return(nil, errors.New("Listen Udp Error")) }, }, { name: "Fail UDP Con Read From", calls: func() { mockNetter.EXPECT().ResolveUDPAddr(gomock.Any(), gomock.Any()).Return(nil, nil) mockNetter.EXPECT().ListenUDP(gomock.Any(), gomock.Any()).Return(mockUDPConn, nil) mockNetceptor.EXPECT().NewAddr(gomock.Any(), gomock.Any()).Return(netceptor.Addr{}) mockUDPConn.EXPECT().ReadFrom(gomock.Any()).Return(0, nil, errors.New("Read From error")).AnyTimes() }, }, { name: "Fail Netceptor listen packet", calls: func() { mockNetter.EXPECT().ResolveUDPAddr(gomock.Any(), gomock.Any()).Return(nil, nil) mockNetter.EXPECT().ListenUDP(gomock.Any(), gomock.Any()).Return(mockUDPConn, nil) mockNetceptor.EXPECT().NewAddr(gomock.Any(), gomock.Any()).Return(netceptor.Addr{}) mockUDPConn.EXPECT().ReadFrom(gomock.Any()).Return(0, netceptor.Addr{}, nil).AnyTimes() mockNetceptor.EXPECT().ListenPacket(gomock.Any()).Return(mockPacketCon, errors.New("Clean Up error")).AnyTimes() }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockNetceptor, mockNetter, mockUDPConn, mockPacketCon = setUpMocks(ctrl) tc.calls() err := UDPProxyServiceInbound(mockNetceptor, tc.host, tc.port, tc.node, tc.service, mockNetter) if tc.expectErr { if err == nil { t.Errorf("net UDPProxyServiceInbound fail case error") } return } else if err != nil { t.Errorf("net UDPProxyServiceInbound error") } }) } } func TestUDPProxyServiceOutbound(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() var mockNetceptor *mock_services.MockNetcForUDPProxy var mockNetter *mock_net_interface.MockNetterUDP var mockPacketCon *mock_netceptor.MockPacketConner type testCase struct { name string service string address string expectErr bool calls func() } tests := []testCase{ { name: "Fail ResolveUDPAddr", expectErr: true, calls: func() { mockNetter.EXPECT().ResolveUDPAddr(gomock.Any(), gomock.Any()).Return(nil, errors.New("RecolveUDPAddr error")) }, }, { name: "Fail Listen And Advertive", expectErr: true, calls: func() { mockNetter.EXPECT().ResolveUDPAddr(gomock.Any(), gomock.Any()).Return(nil, nil) mockNetceptor.EXPECT().ListenPacketAndAdvertise(gomock.Any(), gomock.Any()).Return(nil, errors.New("Netceptor Listen Error")) }, }, { name: "Fail Read From", calls: func() { mockNetter.EXPECT().ResolveUDPAddr(gomock.Any(), gomock.Any()).Return(nil, nil) mockNetceptor.EXPECT().ListenPacketAndAdvertise(gomock.Any(), gomock.Any()).Return(mockPacketCon, nil) mockPacketCon.EXPECT().ReadFrom(gomock.Any()).Return(0, nil, errors.New("Read From error")).AnyTimes() }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockNetceptor, mockNetter, _, mockPacketCon = setUpMocks(ctrl) tc.calls() err := UDPProxyServiceOutbound(mockNetceptor, tc.service, tc.address, mockNetter) if tc.expectErr { if err == nil { t.Errorf("net UDPProxyServiceOutbound fail case error") } return } else if err != nil { t.Errorf("net UDPProxyServiceOutbound error") } }) } } receptor-1.5.3/pkg/services/unix_proxy.go000066400000000000000000000102111475465740700205400ustar00rootroot00000000000000package services import ( "crypto/tls" "fmt" "net" "os" "runtime" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/utils" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) // UnixProxyServiceInbound listens on a Unix socket and forwards connections over the Receptor network. func UnixProxyServiceInbound(s *netceptor.Netceptor, filename string, permissions os.FileMode, node string, rservice string, tlscfg *tls.Config, ) error { uli, lock, err := utils.UnixSocketListen(filename, permissions) if err != nil { return fmt.Errorf("error opening Unix socket: %s", err) } go func() { defer lock.Unlock() for { uc, err := uli.Accept() if err != nil { s.Logger.Error("Error accepting Unix socket connection: %s", err) return } go func() { qc, err := s.Dial(node, rservice, tlscfg) if err != nil { s.Logger.Error("Error connecting on Receptor network: %s", err) return } utils.BridgeConns(uc, "unix socket service", qc, "receptor connection", s.Logger) }() } }() return nil } // UnixProxyServiceOutbound listens on the Receptor network and forwards the connection via a Unix socket. func UnixProxyServiceOutbound(s *netceptor.Netceptor, service string, tlscfg *tls.Config, filename string) error { qli, err := s.ListenAndAdvertise(service, tlscfg, map[string]string{ "type": "Unix Proxy", "filename": filename, }) if err != nil { return fmt.Errorf("error listening on Receptor network: %s", err) } go func() { for { qc, err := qli.Accept() if err != nil { s.Logger.Error("Error accepting connection on Receptor network: %s\n", err) return } uc, err := net.Dial("unix", filename) if err != nil { s.Logger.Error("Error connecting via Unix socket: %s\n", err) continue } go utils.BridgeConns(qc, "receptor service", uc, "unix socket connection", s.Logger) } }() return nil } // unixProxyInboundCfg is the cmdline configuration object for a Unix socket inbound proxy. type UnixProxyInboundCfg struct { Filename string `required:"true" description:"Socket filename, which will be overwritten"` Permissions int `description:"Socket file permissions" default:"0600"` RemoteNode string `required:"true" description:"Receptor node to connect to"` RemoteService string `required:"true" description:"Receptor service name to connect to"` TLS string `description:"Name of TLS client config for the Receptor connection"` } // Run runs the action. func (cfg UnixProxyInboundCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running Unix socket inbound proxy service %v\n", cfg) tlscfg, err := netceptor.MainInstance.GetClientTLSConfig(cfg.TLS, cfg.RemoteNode, netceptor.ExpectedHostnameTypeReceptor) if err != nil { return err } return UnixProxyServiceInbound(netceptor.MainInstance, cfg.Filename, os.FileMode(cfg.Permissions), //nolint:gosec cfg.RemoteNode, cfg.RemoteService, tlscfg) } // unixProxyOutboundCfg is the cmdline configuration object for a Unix socket outbound proxy. type UnixProxyOutboundCfg struct { Service string `required:"true" description:"Receptor service name to bind to"` Filename string `required:"true" description:"Socket filename, which must already exist"` TLS string `description:"Name of TLS server config for the Receptor connection"` } // Run runs the action. func (cfg UnixProxyOutboundCfg) Run() error { netceptor.MainInstance.Logger.Debug("Running Unix socket inbound proxy service %s\n", cfg) tlscfg, err := netceptor.MainInstance.GetServerTLSConfig(cfg.TLS) if err != nil { return err } return UnixProxyServiceOutbound(netceptor.MainInstance, cfg.Service, tlscfg, cfg.Filename) } func init() { version := viper.GetInt("version") if version > 1 { return } if runtime.GOOS != "windows" { cmdline.RegisterConfigTypeForApp("receptor-proxies", "unix-socket-server", "Listen on a Unix socket and forward via Receptor", UnixProxyInboundCfg{}, cmdline.Section(servicesSection)) cmdline.RegisterConfigTypeForApp("receptor-proxies", "unix-socket-client", "Listen via Receptor and forward to a Unix socket", UnixProxyOutboundCfg{}, cmdline.Section(servicesSection)) } } receptor-1.5.3/pkg/services/unix_proxy_test.go000066400000000000000000000030411475465740700216020ustar00rootroot00000000000000package services import ( "context" "crypto/tls" "os" "testing" "github.com/ansible/receptor/pkg/netceptor" ) func TestUnixProxyServiceInbound(t *testing.T) { type testCase struct { name string filename string permissions os.FileMode node string rservice string tlscfg *tls.Config expecterr bool } tests := []testCase{ { name: "Fail UnixSocketListen", expecterr: true, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() s := netceptor.New(ctx, "Unix Test Node") err := UnixProxyServiceInbound(s, tc.filename, tc.permissions, tc.node, tc.rservice, tc.tlscfg) if tc.expecterr { if err == nil { t.Errorf("net UnixProxyServiceInbound fail case error") } return } else if err != nil { t.Errorf("net UnixProxyServiceInbound error") } }) } } func TestUnixProxyServiceOutbound(t *testing.T) { type testCase struct { name string expecterr bool service string tlscfg *tls.Config filename string } tests := []testCase{ { name: "Fail UnixSocketListen", }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctx := context.Background() s := netceptor.New(ctx, "Unix Test Node") err := UnixProxyServiceOutbound(s, tc.service, tc.tlscfg, tc.filename) if tc.expecterr { if err == nil { t.Errorf("net UnixProxyServiceInbound fail case error") } return } else if err != nil { t.Errorf("net UnixProxyServiceInbound error") } }) } } receptor-1.5.3/pkg/tickrunner/000077500000000000000000000000001475465740700163335ustar00rootroot00000000000000receptor-1.5.3/pkg/tickrunner/tickrunner.go000066400000000000000000000020361475465740700210470ustar00rootroot00000000000000package tickrunner import ( "context" "time" ) // Run runs a task at a given periodic interval, or as requested over a channel. // If many requests come in close to the same time, only run the task once. // Callers can ask for the task to be run within a given amount of time, which // overrides defaultReqDelay. Sending a zero to the channel runs it immediately. func Run(ctx context.Context, f func(), periodicInterval time.Duration, defaultReqDelay time.Duration) chan time.Duration { runChan := make(chan time.Duration) go func() { nextRunTime := time.Now().Add(periodicInterval) for { select { case <-time.After(time.Until(nextRunTime)): nextRunTime = time.Now().Add(periodicInterval) f() case req := <-runChan: proposedTime := time.Now() if req == 0 { proposedTime = proposedTime.Add(defaultReqDelay) } else { proposedTime = proposedTime.Add(req) } if proposedTime.Before(nextRunTime) { nextRunTime = proposedTime } case <-ctx.Done(): return } } }() return runChan } receptor-1.5.3/pkg/types/000077500000000000000000000000001475465740700153135ustar00rootroot00000000000000receptor-1.5.3/pkg/types/main.go000066400000000000000000000142761475465740700166000ustar00rootroot00000000000000package types import ( "context" "fmt" "os" "regexp" "runtime" "strings" "time" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/workceptor" "github.com/grafana/pyroscope-go" "github.com/sirupsen/logrus" "gopkg.in/yaml.v2" ) type NodeCfg struct { ID string `description:"Node ID. Defaults to the local hostname." barevalue:"yes"` DataDir string `description:"Directory in which to store node data." default:"/tmp/receptor"` FirewallRules []netceptor.FirewallRuleData `description:"Firewall rules, see documentation for syntax."` MaxIdleConnectionTimeout string `description:"Maximum duration with no traffic before a backend connection is timed out and refreshed."` ReceptorKubeSupportReconnect string ReceptorKubeClientsetQPS string ReceptorKubeClientsetBurst string ReceptorKubeClientsetRateLimiter string } var receptorDataDir string func (cfg NodeCfg) Init() error { var err error if cfg.ID == "" { host, err := os.Hostname() if err != nil { return err } lchost := strings.ToLower(host) if lchost == "localhost" || strings.HasPrefix(lchost, "localhost.") { return fmt.Errorf("no node ID specified and local host name is localhost") } cfg.ID = host } else { submitIDRegex := regexp.MustCompile(`^[.\-_@:a-zA-Z0-9]*$`) match := submitIDRegex.FindSubmatch([]byte(cfg.ID)) if match == nil { return fmt.Errorf("node id can only contain a-z, A-Z, 0-9 or special characters . - _ @ : but received: %s", cfg.ID) } } if strings.ToLower(cfg.ID) == "localhost" { return fmt.Errorf("node ID \"localhost\" is reserved") } receptorDataDir = cfg.DataDir netceptor.MainInstance = netceptor.New(context.Background(), cfg.ID) if len(cfg.FirewallRules) > 0 { rules, err := netceptor.ParseFirewallRules(cfg.FirewallRules) if err != nil { return err } err = netceptor.MainInstance.AddFirewallRules(rules, true) if err != nil { return err } } // update netceptor.MainInstance with the MaxIdleConnectionTimeout from the nodeCfg struct // this is a fall-forward mechanism. If the user didn't provide a value for MaxIdleConnectionTimeout in their configuration file, // we will apply the default timeout of 30s to netceptor.maxConnectionIdleTime if cfg.MaxIdleConnectionTimeout != "" { err = netceptor.MainInstance.SetMaxConnectionIdleTime(cfg.MaxIdleConnectionTimeout) if err != nil { return err } } workceptor.MainInstance, err = workceptor.New(context.Background(), netceptor.MainInstance, receptorDataDir) if err != nil { return err } controlsvc.MainInstance = controlsvc.New(true, netceptor.MainInstance) err = workceptor.MainInstance.RegisterWithControlService(controlsvc.MainInstance) if err != nil { return err } return nil } func (cfg NodeCfg) Run() error { workceptor.MainInstance.ListKnownUnitIDs() // Triggers a scan of unit dirs and restarts any that need it return nil } type ReceptorPyroscopeCfg struct { ApplicationName string Tags map[string]string ServerAddress string // e.g http://pyroscope.services.internal:4040 BasicAuthUser string // http basic auth user BasicAuthPassword string // http basic auth password TenantID string // specify TenantId when using phlare multi-tenancy UploadRate string ProfileTypes []string DisableGCRuns bool // this will disable automatic runtime.GC runs between getting the heap profiles HTTPHeaders map[string]string } type UploadRate struct { UploadRate time.Duration `yaml:"uploadRate"` } func (pyroscopeCfg ReceptorPyroscopeCfg) Init() error { if pyroscopeCfg.ApplicationName == "" { return nil } runtime.SetMutexProfileFraction(5) runtime.SetBlockProfileRate(5) pyroscopeLogger := logrus.New() pyroscopeLogger.SetLevel(logrus.DebugLevel) if _, err := os.Stat(receptorDataDir); os.IsNotExist(err) { err := os.MkdirAll(receptorDataDir, 0o700) if err != nil { fmt.Printf("error creating directory: %v", err) } } logFile, err := os.OpenFile(fmt.Sprintf("%s/pyroscope.log", receptorDataDir), os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { pyroscopeLogger.Fatalf("Error opening log file: %v", err) } pyroscopeLogger.SetOutput(logFile) pyroscopeLogger.SetFormatter(&logrus.JSONFormatter{}) _, err = pyroscope.Start(pyroscope.Config{ ApplicationName: pyroscopeCfg.ApplicationName, Tags: pyroscopeCfg.Tags, ServerAddress: pyroscopeCfg.ServerAddress, BasicAuthUser: pyroscopeCfg.BasicAuthUser, BasicAuthPassword: pyroscopeCfg.BasicAuthPassword, TenantID: pyroscopeCfg.TenantID, UploadRate: getUploadRate(pyroscopeCfg), Logger: pyroscopeLogger, ProfileTypes: getProfileTypes(pyroscopeCfg), DisableGCRuns: pyroscopeCfg.DisableGCRuns, HTTPHeaders: pyroscopeCfg.HTTPHeaders, }) if err != nil { return err } else { return nil } } func getUploadRate(cfg ReceptorPyroscopeCfg) time.Duration { if cfg.UploadRate == "" { return 15 * time.Second } var uploadRate UploadRate err := yaml.Unmarshal([]byte(cfg.UploadRate), &uploadRate) if err != nil { fmt.Println("failed to parse uploadRate from config file") } return uploadRate.UploadRate } func getProfileTypes(cfg ReceptorPyroscopeCfg) []pyroscope.ProfileType { profileType := []pyroscope.ProfileType{ pyroscope.ProfileCPU, pyroscope.ProfileAllocObjects, pyroscope.ProfileAllocSpace, pyroscope.ProfileInuseObjects, pyroscope.ProfileInuseSpace, } if len(cfg.ProfileTypes) == 0 { return profileType } for _, pt := range cfg.ProfileTypes { switch pt { case "ProfileGoroutines": profileType = append(profileType, pyroscope.ProfileGoroutines) case "ProfileMutexCount": profileType = append(profileType, pyroscope.ProfileMutexCount) case "ProfileMutexDuration": profileType = append(profileType, pyroscope.ProfileMutexDuration) case "ProfileBlockCount": profileType = append(profileType, pyroscope.ProfileBlockCount) case "ProfileBlockDuration": profileType = append(profileType, pyroscope.ProfileBlockDuration) } } return profileType } receptor-1.5.3/pkg/types/main_test.go000066400000000000000000000017101475465740700176240ustar00rootroot00000000000000package types import "testing" func TestMainInitNodeID(t *testing.T) { mainInitNodeIDTestCases := []struct { name string nodeID string expectedErr string }{ { name: "successful, no error", nodeID: "t.e-s_t@1:234", expectedErr: "", }, { name: "failed, charactered not allowed", nodeID: "test!#&123", expectedErr: "node id can only contain a-z, A-Z, 0-9 or special characters . - _ @ : but received: test!#&123", }, } for _, testCase := range mainInitNodeIDTestCases { t.Run(testCase.name, func(t *testing.T) { cfg := NodeCfg{ ID: testCase.nodeID, } err := cfg.Init() if err == nil && testCase.expectedErr != "" { t.Errorf("exected error but got no error") } else if err != nil && err.Error() != testCase.expectedErr { t.Errorf("expected error to be %s, but got: %s", testCase.expectedErr, err.Error()) } t.Cleanup(func() { cfg = NodeCfg{} }) }) } } receptor-1.5.3/pkg/utils/000077500000000000000000000000001475465740700153075ustar00rootroot00000000000000receptor-1.5.3/pkg/utils/bridge.go000066400000000000000000000031021475465740700170660ustar00rootroot00000000000000package utils import ( "io" "strings" "github.com/ansible/receptor/pkg/logger" ) // NormalBufferSize is the size of buffers used by various processes when copying data between sockets. const NormalBufferSize = 65536 // BridgeConns bridges two connections, like netcat. func BridgeConns(c1 io.ReadWriteCloser, c1Name string, c2 io.ReadWriteCloser, c2Name string, logger *logger.ReceptorLogger) { doneChan := make(chan bool) go bridgeHalf(c1, c1Name, c2, c2Name, doneChan, logger) go bridgeHalf(c2, c2Name, c1, c1Name, doneChan, logger) <-doneChan <-doneChan } // BridgeHalf bridges the read side of c1 to the write side of c2. func bridgeHalf(c1 io.ReadWriteCloser, c1Name string, c2 io.ReadWriteCloser, c2Name string, done chan bool, logger *logger.ReceptorLogger) { logger.Trace(" Bridging %s to %s\n", c1Name, c2Name) defer func() { done <- true }() buf := make([]byte, NormalBufferSize) shouldClose := false for { n, err := c1.Read(buf) if err != nil { if err.Error() != "EOF" && !strings.Contains(err.Error(), "use of closed network connection") { logger.Error("Connection read error: %s\n", err) } shouldClose = true } if n > 0 { logger.Trace(" Copied %d bytes from %s to %s\n", n, c1Name, c2Name) wn, err := c2.Write(buf[:n]) if err != nil { logger.Error("Connection write error: %s\n", err) shouldClose = true } if wn != n { logger.Error("Not all bytes written\n") shouldClose = true } } if shouldClose { logger.Trace(" Stopping bridge %s to %s\n", c1Name, c2Name) _ = c2.Close() return } } } receptor-1.5.3/pkg/utils/broker.go000066400000000000000000000040241475465740700171220ustar00rootroot00000000000000package utils import ( "context" "fmt" "reflect" "sync" ) // Broker code adapted from https://stackoverflow.com/questions/36417199/how-to-broadcast-message-using-channel // which is licensed under Creative Commons CC BY-SA 4.0. // Broker implements a simple pub-sub broadcast system. type Broker struct { ctx context.Context msgType reflect.Type publishCh chan interface{} subCh chan chan interface{} unsubCh chan chan interface{} } // NewBroker allocates a new Broker object. func NewBroker(ctx context.Context, msgType reflect.Type) *Broker { b := &Broker{ ctx: ctx, msgType: msgType, publishCh: make(chan interface{}), subCh: make(chan chan interface{}), unsubCh: make(chan chan interface{}), } go b.start() return b } // start starts the broker goroutine. func (b *Broker) start() { subs := map[chan interface{}]struct{}{} for { select { case <-b.ctx.Done(): for ch := range subs { close(ch) } return case msgCh := <-b.subCh: subs[msgCh] = struct{}{} case msgCh := <-b.unsubCh: delete(subs, msgCh) close(msgCh) case msg := <-b.publishCh: wg := sync.WaitGroup{} for msgCh := range subs { wg.Add(1) go func(msgCh chan interface{}) { defer wg.Done() select { case msgCh <- msg: case <-b.ctx.Done(): } }(msgCh) } wg.Wait() } } } // Subscribe registers to receive messages from the broker. func (b *Broker) Subscribe() chan interface{} { msgCh := make(chan interface{}) select { case <-b.ctx.Done(): return nil case b.subCh <- msgCh: return msgCh } } // Unsubscribe de-registers a message receiver. func (b *Broker) Unsubscribe(msgCh chan interface{}) { select { case <-b.ctx.Done(): case b.unsubCh <- msgCh: } } // Publish sends a message to all subscribers. func (b *Broker) Publish(msg interface{}) error { if reflect.TypeOf(msg) != b.msgType { return fmt.Errorf("messages to broker must be of type %s", b.msgType.String()) } select { case <-b.ctx.Done(): case b.publishCh <- msg: } return nil } receptor-1.5.3/pkg/utils/common.go000066400000000000000000000011061475465740700171240ustar00rootroot00000000000000package utils import ( "crypto/x509" "github.com/ansible/receptor/pkg/logger" ) func ParseReceptorNamesFromCert(cert *x509.Certificate, expectedHostname string, logger *logger.ReceptorLogger) (bool, []string, error) { var receptorNames []string receptorNames, err := ReceptorNames(cert.Extensions) if err != nil { logger.Error("RVF failed to get ReceptorNames: %s", err) return false, nil, err } found := false for _, receptorName := range receptorNames { if receptorName == expectedHostname { found = true break } } return found, receptorNames, nil } receptor-1.5.3/pkg/utils/common_test.go000066400000000000000000000103131475465740700201630ustar00rootroot00000000000000package utils_test import ( "crypto/x509" "crypto/x509/pkix" "encoding/asn1" "encoding/pem" "math/big" "reflect" "testing" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/utils" ) func TestParseReceptorNamesFromCert(t *testing.T) { type args struct { cert *x509.Certificate expectedHostname string logger *logger.ReceptorLogger } const testGoodCertPEMData = ` -----BEGIN CERTIFICATE----- MIIFRDCCAyygAwIBAgIEYiZSQDANBgkqhkiG9w0BAQsFADA7MTkwNwYDVQQDEzBB bnNpYmxlIEF1dG9tYXRpb24gQ29udHJvbGxlciBOb2RlcyBNZXNoIFJPT1QgQ0Ew HhcNMjIwMzA3MTg0MzEyWhcNMzExMjI4MDUwMzUxWjARMQ8wDQYDVQQDEwZmb29i YXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnXsRTTIoV2Oqh5zvN JzQBYOZPpxmnKzwLvgeop44Csk++zARvg5XIpmPbSEU2PY3pNGvLTH6nD54/ZfOI RzSN0ipvfcrpJtkrJ7OYo1gX7ROXM30x3bj2KcJ/cMgMiZMQLqPegKhtMHLGz0TX +0MfJ5jqTlowVjSAyUhK6pMtf2ISpHqOA6uvmLhUhkruUrUkHMnbwWMTzrO8QDMa dLvV+hiWZNZFaf6Xt3lNBRY+yrXuSG7ZOc/6UsWDb4NVALL1mJ0IjfSeiK58Sf8V HUY4MEjy8VW2lfARU/mcNkxrUY1DBNp5zcHMhwoLkLId90PyFyzXMDCvZxHrGEwt Z23UAYY/wAvw1XWm5XJBiLzaL12dStuHeZgtAUOucQHvEOglvPilU6vKf5PFdxqo KEOwXtgLUTlw4otm2bWx5p2LPlxkPApbAv7UaxiTbcpuIMh8WTTSk/EUgpyEUjph iN0uqnp2fH9Mmyn8hgSB/Kf6FhIZFl3VMNN6x8VTkqLkzVG8Ud48gFHfraVQXvaL cDDCLxTeda6Th6uTw2zCifBzXbWxZKjlinx8MEM/kIA1we/wlwsYwpQNhkbOorR3 eJ6Exdl1Ar8l3jHp293hCvxUNuzG5Z9oPDMQ6MSm8xxrBN2pYZNL3DCWaJ0njuNj YeNR7l7s+9ibX5RD+uASiC6hOwIDAQABo3oweDAOBgNVHQ8BAf8EBAMCB4AwHQYD VR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMB8GA1UdIwQYMBaAFFAlC81rH211 fPJoWglERKb/7/NfMCYGA1UdEQQfMB2HBH8AAAGgFQYJKwYBBAGSCBMBoAgMBmZv b2JhcjANBgkqhkiG9w0BAQsFAAOCAgEAbzKRqx2i8S0Kuu0bIX094EoGiGSTWW4l YNHwn9mC/5KgzjSvxTkD0pInt31d5O27rK7/wMVezeqBIG92uwwZr7ndS6Fe0FT1 7tMZ1VH5VetIiicbu3AYssqMs/JYEocqOngLh/pGHmlwcnmPpCltipcE50bv9YWn O8Yc5O7v16SxHzGsDUDO5eQAe2qvBaE5F5SBCVkjSoajmh3fdx/4eSzoF2wrug3/ O+WAb70UXX6r8dmRpr4RezQ6XPWAG57BgU3g0NUkczFo5gFndBUJngLhR6wr14xB st21haZ65XIA46PB8jY04l/H2INwCzo++PlKJ3ROKwLXYDSZlgQ3X9XxsSzCX3Hs viK9Ybzp2W8sl1Pvtb/jodcNTpD2IB8IrWnvuOgnwVmewqAqlxM7Ers9kC83lBpt EhAXh0QyJ5BpHOkpm4jpVhOx1swHTBDoibysvpdr5KuuOm1JTr7cYRYhIe65rVz3 aL0PryzHdvQB97LhYAaUPtFnxNxUIeXKZO3Ndg/KSrSe4IqGz51uKjxJy+MnH9// nnG0JqlerSVvSPSiZ2kdn4OwzV2eA3Gj3uyTSGsjjoj82bhhRwKaSWmUh+AJByQ9 kE6r/6za1Hvm+i/mz8f1cTUxFjF5pKzrprNRz5NMzs6NkQ0pg+mq5CNzav1ATSyv Bdt96MbGrC0= -----END CERTIFICATE-----` goodBlock, _ := pem.Decode([]byte(testGoodCertPEMData)) testGoodCert, _ := x509.ParseCertificate(goodBlock.Bytes) extSubjectAltName := pkix.Extension{} extSubjectAltName.Id = asn1.ObjectIdentifier{2, 5, 29, 17} extSubjectAltName.Critical = false testUglyCert, _ := x509.ParseCertificate(goodBlock.Bytes) pi, _ := new(big.Int).SetString("3.14159", 10) extSubjectAltName.Value, _ = asn1.Marshal(pi) testUglyCert.Extensions = append(testUglyCert.Extensions, extSubjectAltName) var uglyWant1 []string tests := []struct { name string args args want bool want1 []string wantErr bool }{ { name: "Positive test", args: args{ cert: testGoodCert, expectedHostname: "foobar", logger: logger.NewReceptorLogger("test"), }, want: true, // found want1: []string{"foobar"}, // ReceptorNames wantErr: false, }, { name: "Non-matching hostname test", args: args{ cert: testGoodCert, expectedHostname: "foobaz-server", logger: logger.NewReceptorLogger("test"), }, want: false, want1: []string{"foobar"}, wantErr: false, }, { name: "Error test", args: args{ cert: testUglyCert, expectedHostname: "foobaz-server", logger: logger.NewReceptorLogger("test"), }, want: false, want1: uglyWant1, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, got1, err := utils.ParseReceptorNamesFromCert(tt.args.cert, tt.args.expectedHostname, tt.args.logger) if (err != nil) != tt.wantErr { t.Errorf("ParseReceptorNamesFromCert() error = %v, wantErr %v", err, tt.wantErr) return } if got != tt.want { t.Errorf("ParseReceptorNamesFromCert() got = %v, want %v", got, tt.want) } if !reflect.DeepEqual(got1, tt.want1) { t.Errorf("ParseReceptorNamesFromCert() got1 = %v, want %v", got1, tt.want1) } }) } } receptor-1.5.3/pkg/utils/error_kind.go000066400000000000000000000014001475465740700177670ustar00rootroot00000000000000package utils import "fmt" // ErrorWithKind represents an error wrapped with a designation of what kind of error it is. type ErrorWithKind struct { Err error Kind string } // Error returns the error text as a string. func (ek ErrorWithKind) Error() string { return fmt.Sprintf("%s error: %v", ek.Kind, ek.Err) } // WrapErrorWithKind creates an ErrorWithKind that wraps an underlying error. func WrapErrorWithKind(err error, kind string) ErrorWithKind { return ErrorWithKind{ Err: err, Kind: kind, } } // ErrorIsKind returns true if err is an ErrorWithKind of the specified kind, or false otherwise (including if nil). func ErrorIsKind(err error, kind string) bool { ek, ok := err.(ErrorWithKind) if !ok { return false } return ek.Kind == kind } receptor-1.5.3/pkg/utils/error_kind_test.go000066400000000000000000000046561475465740700210460ustar00rootroot00000000000000package utils_test import ( "fmt" "reflect" "testing" "github.com/ansible/receptor/pkg/utils" ) const ( goodKind string = "connection" goodErrorString string = "unit was already started" ) var errUnitWasAlreadyStarted error = fmt.Errorf(goodErrorString) //nolint:staticcheck func TestErrorWithKind_Error(t *testing.T) { type fields struct { err error kind string } tests := []struct { name string fields fields want string }{ { name: "Positive", fields: fields{ err: errUnitWasAlreadyStarted, kind: goodKind, }, want: fmt.Sprintf("%s error: %s", goodKind, goodErrorString), }, { name: "Negative", fields: fields{ err: nil, kind: goodKind, }, want: fmt.Sprintf("%s error: ", goodKind), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { ek := utils.ErrorWithKind{ Err: tt.fields.err, Kind: tt.fields.kind, } if got := ek.Error(); got != tt.want { t.Errorf("ErrorWithKind.Error() = %v, want %v", got, tt.want) } }) } } func TestWrapErrorWithKind(t *testing.T) { type args struct { err error kind string } tests := []struct { name string args args want utils.ErrorWithKind }{ { name: "Positive", args: args{ err: errUnitWasAlreadyStarted, kind: goodKind, }, want: utils.ErrorWithKind{ Err: errUnitWasAlreadyStarted, Kind: goodKind, }, }, { name: "Negative", args: args{ err: nil, kind: goodKind, }, want: utils.ErrorWithKind{ Err: nil, Kind: goodKind, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := utils.WrapErrorWithKind(tt.args.err, tt.args.kind); !reflect.DeepEqual(got, tt.want) { t.Errorf("WrapErrorWithKind() = %v, want %v", got, tt.want) } }) } } func TestErrorIsKind(t *testing.T) { type args struct { err error kind string } tests := []struct { name string args args want bool }{ { name: "Positive", args: args{ err: utils.WrapErrorWithKind( errUnitWasAlreadyStarted, goodKind, ), kind: goodKind, }, want: true, }, { name: "Negative", args: args{ err: nil, kind: goodKind, }, want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := utils.ErrorIsKind(tt.args.err, tt.args.kind); got != tt.want { t.Errorf("ErrorIsKind() = %v, want %v", got, tt.want) } }) } } receptor-1.5.3/pkg/utils/flock.go000066400000000000000000000015151475465740700167360ustar00rootroot00000000000000//go:build !windows // +build !windows package utils import ( "fmt" "syscall" ) // ErrLocked is returned when the flock is already held. var ErrLocked = fmt.Errorf("fslock is already locked") // FLock represents a file lock. type FLock struct { Fd int } // TryFLock non-blockingly attempts to acquire a lock on the file. func TryFLock(filename string) (*FLock, error) { fd, err := syscall.Open(filename, syscall.O_CREAT|syscall.O_RDONLY|syscall.O_CLOEXEC, syscall.S_IRUSR|syscall.S_IWUSR) if err != nil { return nil, err } err = syscall.Flock(fd, syscall.LOCK_EX|syscall.LOCK_NB) if err == syscall.EWOULDBLOCK { err = ErrLocked } if err != nil { _ = syscall.Close(fd) return nil, err } return &FLock{Fd: fd}, nil } // Unlock unlocks the file lock. func (lock *FLock) Unlock() error { return syscall.Close(lock.Fd) } receptor-1.5.3/pkg/utils/flock_test.go000066400000000000000000000030531475465740700177740ustar00rootroot00000000000000//go:build !windows // +build !windows package utils_test import ( "os" "path/filepath" "testing" "github.com/ansible/receptor/pkg/utils" ) func TestTryFLock(t *testing.T) { type args struct { filename string } tests := []struct { name string args args want *utils.FLock wantErr bool }{ { name: "Positive", args: args{ filename: filepath.Join(os.TempDir(), "good_flock_listener"), }, want: &utils.FLock{0}, wantErr: false, }, { name: "Negative", args: args{ filename: "", }, want: &utils.FLock{}, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := utils.TryFLock(tt.args.filename) if (err != nil) != tt.wantErr { t.Errorf("%s: TryFLock(): error = %v, wantErr %v", tt.name, err, tt.wantErr) return } if err == nil { if got.Fd < 0 { t.Errorf("%s: UnixSocketListen(): Invalid got Fd = %+v", tt.name, got) } } }) } } func TestFLock_Unlock(t *testing.T) { type fields struct { Fd int } tests := []struct { name string fields fields wantErr bool }{ { name: "Positive", fields: fields{ Fd: 1, }, wantErr: false, }, { name: "Negative", fields: fields{ Fd: -1, }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { lock := &utils.FLock{ Fd: tt.fields.Fd, } if err := lock.Unlock(); (err != nil) != tt.wantErr { t.Errorf("%s: FLock.Unlock() error = %v, wantErr %v", tt.name, err, tt.wantErr) } }) } } receptor-1.5.3/pkg/utils/flock_windows.go000066400000000000000000000010711475465740700205050ustar00rootroot00000000000000//go:build windows // +build windows package utils import ( "fmt" ) // ErrLocked is returned when the flock is already held var ErrLocked = fmt.Errorf("fslock is already locked") // FLock represents a Unix file lock, but is not usable on Windows type FLock struct{} // TryFLock is not implemented on Windows func TryFLock(filename string) (*FLock, error) { return nil, fmt.Errorf("file locks not implemented on Windows") } // Unlock is not implemented on Windows func (lock *FLock) Unlock() error { return fmt.Errorf("file locks not implemented on Windows") } receptor-1.5.3/pkg/utils/incremental_duration.go000066400000000000000000000021151475465740700220430ustar00rootroot00000000000000package utils import ( "math" "time" ) // IncrementalDuration handles a time.Duration with max limits. type IncrementalDuration struct { Duration time.Duration InitialDuration time.Duration MaxDuration time.Duration multiplier float64 } // NewIncrementalDuration returns an IncrementalDuration object with initialized values. func NewIncrementalDuration(duration, maxDuration time.Duration, multiplier float64) *IncrementalDuration { return &IncrementalDuration{ Duration: duration, InitialDuration: duration, MaxDuration: maxDuration, multiplier: multiplier, } } // Reset sets current duration to initial duration. func (id *IncrementalDuration) Reset() { id.Duration = id.InitialDuration } func (id *IncrementalDuration) IncreaseDuration() { id.Duration = time.Duration(math.Min(id.multiplier*float64(id.Duration), float64(id.MaxDuration))) } // NextTimeout returns a timeout channel based on current duration. func (id *IncrementalDuration) NextTimeout() <-chan time.Time { ch := time.After(id.Duration) id.IncreaseDuration() return ch } receptor-1.5.3/pkg/utils/incremental_duration_test.go000066400000000000000000000040621475465740700231050ustar00rootroot00000000000000package utils_test import ( "testing" "time" "github.com/ansible/receptor/pkg/utils" ) const newIncrementalDurationMessage string = "NewIncrementalDuration() = %v, want %v" func TestNewIncrementalDuration(t *testing.T) { type args struct { Duration time.Duration maxDuration time.Duration multiplier float64 } tests := []struct { name string args args want time.Duration }{ { name: "NewIncrementalDuration1", args: args{ Duration: 1 * time.Second, maxDuration: 10 * time.Second, multiplier: 2.0, }, want: 1 * time.Second, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := utils.NewIncrementalDuration(tt.args.Duration, tt.args.maxDuration, tt.args.multiplier); got.Duration != tt.want { t.Errorf(newIncrementalDurationMessage, got, tt.want) } }) } } func TestIncrementalDurationReset(t *testing.T) { delay := utils.NewIncrementalDuration(1*time.Second, 10*time.Second, 2.0) want1 := 1 * time.Second if delay.Duration != want1 { t.Errorf(newIncrementalDurationMessage, delay.Duration, want1) } <-delay.NextTimeout() want2 := 2 * time.Second if delay.Duration != want2 { t.Errorf(newIncrementalDurationMessage, delay.Duration, want2) } delay.Reset() if delay.Duration != want1 { t.Errorf("Reset() = %v, want %v", delay.Duration, want1) } } func TestIncrementalDurationincreaseDuration(t *testing.T) { delay := utils.NewIncrementalDuration(1*time.Second, 10*time.Second, 2.0) for i := 0; i <= 10; i++ { delay.IncreaseDuration() } want10 := 10 * time.Second if delay.Duration != want10 { t.Errorf("increaseDuration() = %v, want %v", delay.Duration, want10) } } func TestIncrementalDurationNextTimeout(t *testing.T) { delay := utils.NewIncrementalDuration(1*time.Second, 10*time.Second, 2.0) want1 := 1 * time.Second if delay.Duration != want1 { t.Errorf(newIncrementalDurationMessage, delay.Duration, want1) } <-delay.NextTimeout() want2 := 2 * time.Second if delay.Duration != want2 { t.Errorf("NextTimeout() = %v, want %v", delay.Duration, want2) } } receptor-1.5.3/pkg/utils/job_context.go000066400000000000000000000056011475465740700201560ustar00rootroot00000000000000package utils import ( "context" "sync" "time" ) // JobContext is a synchronization object that combines the functions of a Context and a WaitGroup. // The expected lifecycle is: // - Caller calls JobContext.NewJob() with a parent context and a count of workers expected. // - Caller launches the given number of workers, passing the JobContext to them. // - Workers can check for termination by using the JobContext as a context.Context. // - Workers can cancel the overall job by calling JobContext.Cancel(). // - Workers must call JobContext.WorkerDone() when they complete, like sync.WaitGroup.Done(). // - The caller, or other goroutines. can call JobContext.Wait() to wait for job completion. // // A single JobContext can only run one job at a time. If JobContext.NewJob() is called while a job // is already running, that job will be cancelled and waited on prior to starting the new job. type JobContext struct { Ctx context.Context JcCancel context.CancelFunc Wg *sync.WaitGroup JcRunning bool RunningLock *sync.Mutex } // NewJob starts a new job with a defined number of workers. If a prior job is running, it is cancelled. func (mw *JobContext) NewJob(ctx context.Context, workers int, returnIfRunning bool) bool { if mw.RunningLock == nil { mw.RunningLock = &sync.Mutex{} } mw.RunningLock.Lock() for mw.JcRunning { if returnIfRunning { mw.RunningLock.Unlock() return false } mw.JcCancel() mw.RunningLock.Unlock() mw.Wait() mw.RunningLock.Lock() } mw.JcRunning = true mw.Ctx, mw.JcCancel = context.WithCancel(ctx) mw.Wg = &sync.WaitGroup{} mw.Wg.Add(workers) mw.RunningLock.Unlock() go func() { mw.Wg.Wait() mw.RunningLock.Lock() mw.JcRunning = false mw.JcCancel() mw.RunningLock.Unlock() }() return true } // WorkerDone signals that a worker is finished, like sync.WaitGroup.Done(). func (mw *JobContext) WorkerDone() { mw.Wg.Done() } // Wait waits for the current job to complete, like sync.WaitGroup.Wait(). // If no job has been started, always just returns. func (mw *JobContext) Wait() { if mw.Wg != nil { mw.Wg.Wait() } } // Done implements Context.Done(). func (mw *JobContext) Done() <-chan struct{} { return mw.Ctx.Done() } // Err implements Context.Err(). func (mw *JobContext) Err() error { return mw.Ctx.Err() } // Deadline implements Context.Deadline(). func (mw *JobContext) Deadline() (time time.Time, ok bool) { return mw.Ctx.Deadline() } // Value implements Context.Value(). func (mw *JobContext) Value(key interface{}) interface{} { return mw.Ctx.Value(key) } // Cancel cancels the JobContext's context. If no job has been started, this does nothing. func (mw *JobContext) Cancel() { if mw.JcCancel != nil { mw.JcCancel() } } // Running returns true if a job is currently running. func (mw *JobContext) Running() bool { mw.RunningLock.Lock() defer mw.RunningLock.Unlock() return mw.JcRunning } receptor-1.5.3/pkg/utils/job_context_test.go000066400000000000000000000101671475465740700212200ustar00rootroot00000000000000package utils_test import ( "context" "reflect" "sync" "testing" "time" "github.com/ansible/receptor/pkg/utils" ) type fields struct { Ctx context.Context JcCancel context.CancelFunc Wg *sync.WaitGroup JcRunning bool RunningLock *sync.Mutex } func setupGoodFields() fields { goodCtx, goodCancel := context.WithCancel(context.Background()) goodFields := &fields{ Ctx: goodCtx, JcCancel: goodCancel, Wg: &sync.WaitGroup{}, JcRunning: true, RunningLock: &sync.Mutex{}, } return *goodFields } func TestJobContextRunning(t *testing.T) { tests := []struct { name string fields fields want bool }{ { name: "Positive", fields: setupGoodFields(), want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mw := &utils.JobContext{ Ctx: tt.fields.Ctx, JcCancel: tt.fields.JcCancel, Wg: tt.fields.Wg, JcRunning: tt.fields.JcRunning, RunningLock: tt.fields.RunningLock, } if got := mw.Running(); got != tt.want { t.Errorf("JobContext.Running() = %v, want %v", got, tt.want) } }) } } func TestJobContextCancel(t *testing.T) { tests := []struct { name string fields fields }{ { name: "Positive", fields: setupGoodFields(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mw := &utils.JobContext{ Ctx: tt.fields.Ctx, JcCancel: tt.fields.JcCancel, Wg: tt.fields.Wg, JcRunning: tt.fields.JcRunning, RunningLock: tt.fields.RunningLock, } mw.Cancel() }) } } func TestJobContextValue(t *testing.T) { type args struct { key interface{} } tests := []struct { name string fields fields args args want interface{} }{ { name: "Positive", fields: setupGoodFields(), want: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mw := &utils.JobContext{ Ctx: tt.fields.Ctx, JcCancel: tt.fields.JcCancel, Wg: tt.fields.Wg, JcRunning: tt.fields.JcRunning, RunningLock: tt.fields.RunningLock, } if got := mw.Value(tt.args.key); !reflect.DeepEqual(got, tt.want) { t.Errorf("JobContext.Value() = %v, want %v", got, tt.want) } }) } } func TestJobContextDeadline(t *testing.T) { tests := []struct { name string fields fields wantTime time.Time wantOk bool }{ { name: "Positive", fields: setupGoodFields(), wantTime: time.Time{}, wantOk: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mw := &utils.JobContext{ Ctx: tt.fields.Ctx, JcCancel: tt.fields.JcCancel, Wg: tt.fields.Wg, JcRunning: tt.fields.JcRunning, RunningLock: tt.fields.RunningLock, } gotTime, gotOk := mw.Deadline() if !reflect.DeepEqual(gotTime, tt.wantTime) { t.Errorf("JobContext.Deadline() gotTime = %v, want %v", gotTime, tt.wantTime) } if gotOk != tt.wantOk { t.Errorf("JobContext.Deadline() gotOk = %v, want %v", gotOk, tt.wantOk) } }) } } func TestJobContextErr(t *testing.T) { tests := []struct { name string fields fields wantErr bool }{ { name: "Positive", fields: setupGoodFields(), wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mw := &utils.JobContext{ Ctx: tt.fields.Ctx, JcCancel: tt.fields.JcCancel, Wg: tt.fields.Wg, JcRunning: tt.fields.JcRunning, RunningLock: tt.fields.RunningLock, } if err := mw.Err(); (err != nil) != tt.wantErr { t.Errorf("JobContext.Err() error = %v, wantErr %v", err, tt.wantErr) } }) } } func TestJobContextWait(t *testing.T) { tests := []struct { name string fields fields }{ { name: "Positive", fields: setupGoodFields(), }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { mw := &utils.JobContext{ Ctx: tt.fields.Ctx, JcCancel: tt.fields.JcCancel, Wg: tt.fields.Wg, JcRunning: tt.fields.JcRunning, RunningLock: tt.fields.RunningLock, } mw.Wait() }) } } receptor-1.5.3/pkg/utils/other_name.go000066400000000000000000000061621475465740700177640ustar00rootroot00000000000000package utils import ( "crypto/x509/pkix" "encoding/asn1" "net" ) var ( // OIDSubjectAltName is the OID for subjectAltName. OIDSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17} // OIDReceptorName is the OID for a Receptor node ID. OIDReceptorName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 2312, 19, 1} ) // OtherNameDecode is used for decoding the OtherName field type of an x.509 subjectAltName. type OtherNameDecode struct { ID asn1.ObjectIdentifier Value asn1.RawValue } // ReceptorNames returns a list of Receptor node IDs found in the subjectAltName field of an x.509 certificate. func ReceptorNames(extensions []pkix.Extension) ([]string, error) { names := make([]string, 0) for _, extension := range extensions { if extension.Id.Equal(OIDSubjectAltName) { values := make([]asn1.RawValue, 0) _, err := asn1.Unmarshal(extension.Value, &values) if err != nil { return nil, err } for _, value := range values { if value.Tag == 0 { on := OtherNameDecode{} _, err = asn1.UnmarshalWithParams(value.FullBytes, &on, "tag:0") if err != nil { return nil, err } if on.ID.Equal(OIDReceptorName) { var name string _, err = asn1.Unmarshal(on.Value.Bytes, &name) if err != nil { return nil, err } names = append(names, name) } } } } } return names, nil } // UTFString is used for encoding a UTF-8 string. type UTFString struct { A string `asn1:"utf8"` } // DNSNameEncode is used for encoding the OtherName field of an x.509 subjectAltName. type DNSNameEncode struct { Value string `asn1:"tag:2"` } // IPAddressEncode is used for encoding the OtherName field of an x.509 subjectAltName. type IPAddressEncode struct { Value []byte `asn1:"tag:7"` } // OtherNameEncode is used for encoding the OtherName field of an x.509 subjectAltName. type OtherNameEncode struct { OID asn1.ObjectIdentifier Value UTFString `asn1:"tag:0"` } // GeneralNameEncode is used for encoding a GeneralName in an x.509 certificate. type GeneralNameEncode struct { Names []interface{} `asn1:"tag:0"` } // MakeReceptorSAN generates a subjectAltName extension, optionally containing Receptor names. func MakeReceptorSAN(dnsNames []string, ipAddresses []net.IP, nodeIDs []string) (*pkix.Extension, error) { rawValues := []asn1.RawValue{} for _, name := range dnsNames { rawValues = append(rawValues, asn1.RawValue{Tag: 2, Class: 2, Bytes: []byte(name)}) } for _, rawIP := range ipAddresses { ip := rawIP.To4() if ip == nil { ip = rawIP } rawValues = append(rawValues, asn1.RawValue{Tag: 7, Class: 2, Bytes: ip}) } for _, nodeID := range nodeIDs { var err error var asnOtherName []byte asnOtherName, err = asn1.Marshal(OtherNameEncode{ OID: OIDReceptorName, Value: UTFString{A: nodeID}, }) if err != nil { return nil, err } rawValues = append(rawValues, asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: asnOtherName[2:]}) } sanBytes, err := asn1.Marshal(rawValues) if err != nil { return nil, err } sanExt := pkix.Extension{ Id: OIDSubjectAltName, Critical: false, Value: sanBytes, } return &sanExt, nil } receptor-1.5.3/pkg/utils/other_name_test.go000066400000000000000000000051331475465740700210200ustar00rootroot00000000000000package utils_test import ( "crypto/x509/pkix" "encoding/asn1" "net" "reflect" "testing" "github.com/ansible/receptor/pkg/utils" "github.com/google/go-cmp/cmp" ) func TestReceptorNames(t *testing.T) { type args struct { extensions []pkix.Extension } rawValues := []asn1.RawValue{ { Tag: 2, Class: 2, Bytes: []byte(`hybrid_node1`), }, } goodValue, err := asn1.Marshal(rawValues) if err != nil { t.Errorf("asn1.Marshal(): %s", err) } tests := []struct { name string args args want []string wantErr bool }{ { name: "Positive", args: args{ extensions: []pkix.Extension{ { Id: utils.OIDSubjectAltName, Critical: false, Value: goodValue, }, }, }, want: []string{}, wantErr: false, }, { name: "Negative", args: args{ extensions: []pkix.Extension{ { Id: utils.OIDSubjectAltName, Critical: false, Value: nil, }, }, }, want: nil, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := utils.ReceptorNames(tt.args.extensions) if (err != nil) != tt.wantErr { t.Errorf("ReceptorNames() error = %v, wantErr %v", err, tt.wantErr) return } if diff := cmp.Diff(got, tt.want); diff != "" { t.Errorf("ReceptorNames() mismatch(-want +got)\n%s", diff) } }) } } func TestMakeReceptorSAN(t *testing.T) { type args struct { dnsNames []string ipAddresses []net.IP nodeIDs []string } tests := []struct { name string args args want *pkix.Extension wantErr bool }{ { name: "Positive", args: args{ dnsNames: []string{ "hybrid_node1", }, ipAddresses: []net.IP{ net.IPv4(192, 168, 4, 1), }, nodeIDs: []string{ "hybrid_node1", }, }, want: &pkix.Extension{ Id: utils.OIDSubjectAltName, Critical: false, Value: []byte{ 48, 49, 130, 12, 104, 121, 98, 114, 105, 100, 95, 110, 111, 100, 101, 49, 135, 4, 192, 168, 4, 1, 160, 27, 6, 9, 43, 6, 1, 4, 1, 146, 8, 19, 1, 160, 14, 12, 12, 104, 121, 98, 114, 105, 100, 95, 110, 111, 100, 101, 49, }, }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := utils.MakeReceptorSAN(tt.args.dnsNames, tt.args.ipAddresses, tt.args.nodeIDs) if (err != nil) != tt.wantErr { t.Errorf("MakeReceptorSAN() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("MakeReceptorSAN() = %+v, want %+v", got, tt.want) } }) } } receptor-1.5.3/pkg/utils/readstring_context.go000066400000000000000000000014571475465740700215530ustar00rootroot00000000000000package utils import ( "bufio" "context" ) type readStringResult = struct { str string err error } // ReadStringContext calls bufio.Reader.ReadString() but uses a context. Note that if the // ctx.Done() fires, the ReadString() call is still active, and bufio is not re-entrant, so it is // important for callers to error out of further use of the bufio. Also, the goroutine will not // exit until the bufio's underlying connection is closed. func ReadStringContext(ctx context.Context, reader *bufio.Reader, delim byte) (string, error) { result := make(chan *readStringResult, 1) go func() { str, err := reader.ReadString(delim) result <- &readStringResult{ str: str, err: err, } }() select { case res := <-result: return res.str, res.err case <-ctx.Done(): return "", ctx.Err() } } receptor-1.5.3/pkg/utils/sysinfo.go000066400000000000000000000006001475465740700173240ustar00rootroot00000000000000package utils import ( "runtime" "github.com/pbnjay/memory" ) // GetSysCPUCount returns number of logical CPU cores on the system. func GetSysCPUCount() int { return runtime.NumCPU() } // GetSysMemoryMiB returns the capacity (in mebibytes) of the physical memory installed on the system. func GetSysMemoryMiB() uint64 { return memory.TotalMemory() / 1048576 // bytes to MiB } receptor-1.5.3/pkg/utils/sysinfo_test.go000066400000000000000000000021221475465740700203640ustar00rootroot00000000000000package utils_test import ( "os/exec" "runtime" "strconv" "strings" "testing" "github.com/ansible/receptor/pkg/utils" ) func TestGetSysCPUCount(t *testing.T) { got := utils.GetSysCPUCount() if got <= 0 { t.Errorf("Non-positive CPU count: %d\n", got) } if runtime.GOOS == "linux" { commandOutput, _ := exec.Command("nproc").CombinedOutput() commandOutputWithout := strings.TrimSpace(string(commandOutput)) want, _ := strconv.Atoi(commandOutputWithout) if got != want { t.Errorf("Expected CPU count: %d, got %d\n", want, got) } } } func TestGetSysMemoryMiB(t *testing.T) { got := utils.GetSysMemoryMiB() if got <= 0 { t.Errorf("Non-positive Memory: %d\n", got) } if runtime.GOOS == "linux" { commandOutput, _ := exec.Command("sed", "-n", "s/^MemTotal:[[:space:]]*\\([[:digit:]]*\\).*/\\1/p", "/proc/meminfo").CombinedOutput() commandOutputWithout := strings.TrimSpace(string(commandOutput)) wantKb, _ := strconv.ParseUint(commandOutputWithout, 10, 64) want := wantKb / 1024 if got != want { t.Errorf("Expected Memory: %d, got %d\n", want, got) } } } receptor-1.5.3/pkg/utils/unixsock.go000066400000000000000000000016251475465740700175050ustar00rootroot00000000000000//go:build !windows // +build !windows package utils import ( "fmt" "net" "os" ) // UnixSocketListen listens on a Unix socket, handling file locking and permissions. func UnixSocketListen(filename string, permissions os.FileMode) (net.Listener, *FLock, error) { lock, err := TryFLock(filename + ".lock") if err != nil { return nil, nil, fmt.Errorf("could not acquire lock on socket file: %s", err) } err = os.RemoveAll(filename) if err != nil { _ = lock.Unlock() return nil, nil, fmt.Errorf("could not overwrite socket file: %s", err) } uli, err := net.Listen("unix", filename) if err != nil { _ = lock.Unlock() return nil, nil, fmt.Errorf("could not listen on socket file: %s", err) } err = os.Chmod(filename, permissions) if err != nil { _ = uli.Close() _ = lock.Unlock() return nil, nil, fmt.Errorf("error setting socket file permissions: %s", err) } return uli, lock, nil } receptor-1.5.3/pkg/utils/unixsock_test.go000066400000000000000000000035011475465740700205370ustar00rootroot00000000000000//go:build !windows // +build !windows package utils_test import ( "io/fs" "net" "os" "path/filepath" "testing" "github.com/ansible/receptor/pkg/utils" "golang.org/x/sys/unix" ) func TestUnixSocketListen(t *testing.T) { type args struct { filename string permissions os.FileMode } badFilename := "" tests := []struct { name string args args want net.Listener want1 *utils.FLock wantErr bool }{ { name: "Positive", args: args{ filename: filepath.Join(os.TempDir(), "good_unixsock_listener"), permissions: 0x0400, }, wantErr: false, }, { name: "Negative", args: args{ filename: badFilename, permissions: 0x0000, }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { lockFilename := tt.args.filename + ".lock" defer os.Remove(lockFilename) _, got1, err := utils.UnixSocketListen(tt.args.filename, tt.args.permissions) if (err != nil) != tt.wantErr { t.Errorf("%s: UnixSocketListen(): error = %+v, wantErr = %+v", tt.name, err, tt.wantErr) return } if err == nil { if got1.Fd < 0 { t.Errorf("%s: UnixSocketListen(): Invalid got1 Fd = %+v", tt.name, got1) } defer got1.Unlock() err = unix.Flock(got1.Fd, unix.LOCK_EX) if err != nil { t.Errorf("%s: UnixSocketListen(): Test lock error = %+v", tt.name, err) } gotFileInfo, err := os.Stat(tt.args.filename) if err != nil { t.Errorf("%s: UnixSocketListen(): Stat error = %+v", tt.name, err) } gotPermissions := gotFileInfo.Mode() & fs.ModePerm wantPermissions := tt.args.permissions & fs.ModePerm if gotPermissions != wantPermissions { t.Errorf("%s: UnixSocketListen(): Got permission = %d, want permissions = %d", tt.name, gotPermissions, wantPermissions) } } }) } } receptor-1.5.3/pkg/utils/unixsock_windows.go000066400000000000000000000004551475465740700212570ustar00rootroot00000000000000//go:build windows // +build windows package utils import ( "fmt" "net" "os" ) // UnixSocketListen is not available on Windows func UnixSocketListen(filename string, permissions os.FileMode) (net.Listener, *FLock, error) { return nil, nil, fmt.Errorf("Unix sockets not available on Windows") } receptor-1.5.3/pkg/workceptor/000077500000000000000000000000001475465740700163465ustar00rootroot00000000000000receptor-1.5.3/pkg/workceptor/cmdline.go000066400000000000000000000003661475465740700203150ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import "github.com/ghjm/cmdline" var workersSection = &cmdline.ConfigSection{ Description: "Commands to configure workers that process units of work:", Order: 30, } receptor-1.5.3/pkg/workceptor/command.go000066400000000000000000000363471475465740700203300ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "bufio" "context" "flag" "fmt" "io" "os" "os/exec" "os/signal" "path" "strconv" "strings" "sync" "syscall" "time" "github.com/ghjm/cmdline" "github.com/google/shlex" "github.com/spf13/viper" ) type BaseWorkUnitForWorkUnit interface { CancelContext() ID() string Init(w *Workceptor, unitID string, workType string, fs FileSystemer, watcher WatcherWrapper) LastUpdateError() error Load() error MonitorLocalStatus() Release(force bool) error Save() error SetFromParams(_ map[string]string) error Status() *StatusFileData StatusFileName() string StdoutFileName() string UnitDir() string UnredactedStatus() *StatusFileData UpdateBasicStatus(state int, detail string, stdoutSize int64) UpdateFullStatus(statusFunc func(*StatusFileData)) GetStatusCopy() StatusFileData GetStatusWithoutExtraData() *StatusFileData SetStatusExtraData(interface{}) GetStatusLock() *sync.RWMutex GetWorkceptor() *Workceptor SetWorkceptor(*Workceptor) GetContext() context.Context GetCancel() context.CancelFunc } // commandUnit implements the WorkUnit interface for the Receptor command worker plugin. type commandUnit struct { BaseWorkUnitForWorkUnit command string baseParams string allowRuntimeParams bool done bool } // CommandExtraData is the content of the ExtraData JSON field for a command worker. type CommandExtraData struct { Pid int Params string } func termThenKill(cmd *exec.Cmd, doneChan chan bool) { if cmd.Process == nil { return } pserr := cmd.Process.Signal(os.Interrupt) if pserr != nil { MainInstance.nc.GetLogger().Warning("Error processing Interrupt Signal: %+v", pserr) } select { case <-doneChan: return case <-time.After(10 * time.Second): MainInstance.nc.GetLogger().Warning("timed out waiting for pid %d to terminate with SIGINT", cmd.Process.Pid) } if cmd.Process != nil { MainInstance.nc.GetLogger().Info("sending SIGKILL to pid %d", cmd.Process.Pid) pkerr := cmd.Process.Kill() if pkerr != nil { MainInstance.nc.GetLogger().Warning("Error killing pid %d: %+v", cmd.Process.Pid, pkerr) } } } // cmdWaiter hangs around and waits for the command to be done because apparently you // can't safely call exec.Cmd.Exited() unless you already know the command has exited. func cmdWaiter(cmd *exec.Cmd, doneChan chan bool) { _ = cmd.Wait() doneChan <- true } // commandRunner is run in a separate process, to monitor the subprocess and report back metadata. func commandRunner(command string, params string, unitdir string) error { status := StatusFileData{} status.ExtraData = &CommandExtraData{} statusFilename := path.Join(unitdir, "status") err := status.UpdateBasicStatus(statusFilename, WorkStatePending, "Not started yet", 0) if err != nil { MainInstance.nc.GetLogger().Error("Error updating status file %s: %s", statusFilename, err) } var cmd *exec.Cmd if params == "" { cmd = exec.Command(command) } else { paramList, err := shlex.Split(params) if err != nil { return err } cmd = exec.Command(command, paramList...) } termChan := make(chan os.Signal, 1) signal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM) stdin, err := os.Open(path.Join(unitdir, "stdin")) if err != nil { return err } payloadDebug, _ := strconv.Atoi(os.Getenv("RECEPTOR_PAYLOAD_TRACE_LEVEL")) if payloadDebug != 0 { splitUnitDir := strings.Split(unitdir, "/") workUnitID := splitUnitDir[len(splitUnitDir)-1] stdinStream, err := cmd.StdinPipe() if err != nil { return err } var payload string reader := bufio.NewReader(stdin) for { response, err := reader.ReadString('\n') if err != nil { if err.Error() != "EOF" { MainInstance.nc.GetLogger().Error("Error reading work unit %v stdin: %v\n", workUnitID, err) } break } payload += response } MainInstance.nc.GetLogger().DebugPayload(payloadDebug, payload, workUnitID, "stdin") io.WriteString(stdinStream, payload) stdinStream.Close() } else { cmd.Stdin = stdin } stdout, err := os.OpenFile(path.Join(unitdir, "stdout"), os.O_CREATE+os.O_WRONLY+os.O_SYNC, 0o600) if err != nil { return err } cmd.Stdout = stdout cmd.Stderr = stdout err = cmd.Start() if err != nil { return err } doneChan := make(chan bool, 1) go cmdWaiter(cmd, doneChan) writeStatusFailures := 0 loop: for { select { case <-doneChan: break loop case <-termChan: termThenKill(cmd, doneChan) err = status.UpdateBasicStatus(statusFilename, WorkStateFailed, "Killed", stdoutSize(unitdir)) if err != nil { MainInstance.nc.GetLogger().Error("Error updating status file %s: %s", statusFilename, err) } os.Exit(-1) case <-time.After(250 * time.Millisecond): err = status.UpdateBasicStatus(statusFilename, WorkStateRunning, fmt.Sprintf("Running: PID %d", cmd.Process.Pid), stdoutSize(unitdir)) if err != nil { MainInstance.nc.GetLogger().Error("Error updating status file %s: %s", statusFilename, err) writeStatusFailures++ if writeStatusFailures > 3 { MainInstance.nc.GetLogger().Error("Exceeded retries for updating status file %s: %s", statusFilename, err) os.Exit(-1) } } else { writeStatusFailures = 0 } } } if err != nil { err = status.UpdateBasicStatus(statusFilename, WorkStateFailed, fmt.Sprintf("Error: %s", err), stdoutSize(unitdir)) if err != nil { MainInstance.nc.GetLogger().Error("Error updating status file %s: %s", statusFilename, err) } return err } if cmd.ProcessState.Success() { err = status.UpdateBasicStatus(statusFilename, WorkStateSucceeded, cmd.ProcessState.String(), stdoutSize(unitdir)) if err != nil { MainInstance.nc.GetLogger().Error("Error updating status file %s: %s", statusFilename, err) } } else { err = status.UpdateBasicStatus(statusFilename, WorkStateFailed, cmd.ProcessState.String(), stdoutSize(unitdir)) if err != nil { MainInstance.nc.GetLogger().Error("Error updating status file %s: %s", statusFilename, err) } } err = stdin.Close() if err != nil { MainInstance.nc.GetLogger().Error("Error closing %s: %s", path.Join(unitdir, "stdin"), err) } err = stdout.Close() if err != nil { MainInstance.nc.GetLogger().Error("Error closing %s: %s", path.Join(unitdir, "stdout"), err) } os.Exit(cmd.ProcessState.ExitCode()) return nil } func combineParams(baseParams string, userParams string) string { var allParams string switch { case userParams == "": allParams = baseParams case baseParams == "": allParams = userParams default: allParams = strings.Join([]string{baseParams, userParams}, " ") } return allParams } // SetFromParams sets the in-memory state from parameters. func (cw *commandUnit) SetFromParams(params map[string]string) error { cmdParams, ok := params["params"] if !ok { cmdParams = "" } if cmdParams != "" && !cw.allowRuntimeParams { return fmt.Errorf("extra params provided but not allowed") } cw.GetStatusCopy().ExtraData.(*CommandExtraData).Params = combineParams(cw.baseParams, cmdParams) return nil } // Status returns a copy of the status currently loaded in memory. func (cw *commandUnit) Status() *StatusFileData { return cw.UnredactedStatus() } // UnredactedStatus returns a copy of the status currently loaded in memory, including secrets. func (cw *commandUnit) UnredactedStatus() *StatusFileData { cw.GetStatusLock().RLock() status := cw.GetStatusWithoutExtraData() ed, ok := cw.GetStatusCopy().ExtraData.(*CommandExtraData) if ok { edCopy := *ed status.ExtraData = &edCopy } cw.GetStatusLock().RUnlock() return status } // runCommand actually runs the exec.Cmd. This is in a separate function so the Python worker can call it. func (cw *commandUnit) runCommand(cmd *exec.Cmd) error { cmdSetDetach(cmd) cw.done = false cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Start(); err != nil { cw.UpdateBasicStatus(WorkStateFailed, fmt.Sprintf("Failed to start command runner: %s", err), 0) return err } cw.UpdateFullStatus(func(status *StatusFileData) { if status.ExtraData == nil { status.ExtraData = &CommandExtraData{} } status.ExtraData.(*CommandExtraData).Pid = cmd.Process.Pid }) doneChan := make(chan bool) go func() { <-doneChan cw.done = true cw.UpdateFullStatus(func(status *StatusFileData) { status.ExtraData = nil }) }() go cmdWaiter(cmd, doneChan) go cw.MonitorLocalStatus() return nil } // Start launches a job with given parameters. func (cw *commandUnit) Start() error { level := cw.GetWorkceptor().nc.GetLogger().GetLogLevel() levelName, _ := cw.GetWorkceptor().nc.GetLogger().LogLevelToName(level) cw.UpdateBasicStatus(WorkStatePending, "Launching command runner", 0) // TODO: This is another place where we rely on a pre-built binary for testing. // Consider invoking the commandRunner directly? var receptorBin string if flag.Lookup("test.v") == nil { receptorBin = os.Args[0] } else { receptorBin = "receptor" } cmd := exec.Command(receptorBin, "--node", "id=worker", "--log-level", levelName, "--command-runner", fmt.Sprintf("command=%s", cw.command), fmt.Sprintf("params=%s", cw.Status().ExtraData.(*CommandExtraData).Params), fmt.Sprintf("unitdir=%s", cw.UnitDir())) return cw.runCommand(cmd) } // Restart resumes monitoring a job after a Receptor restart. func (cw *commandUnit) Restart() error { if err := cw.Load(); err != nil { return err } state := cw.Status().State if IsComplete(state) { // Job already complete - no need to restart monitoring return nil } if state == WorkStatePending { // Job never started - mark it failed cw.UpdateBasicStatus(WorkStateFailed, "Pending at restart", stdoutSize(cw.UnitDir())) } go cw.MonitorLocalStatus() return nil } // Cancel stops a running job. func (cw *commandUnit) Cancel() error { cw.CancelContext() status := cw.Status() ced, ok := status.ExtraData.(*CommandExtraData) if !ok || ced.Pid <= 0 { return nil } proc, err := os.FindProcess(ced.Pid) if err != nil { return err } defer proc.Release() err = proc.Signal(os.Interrupt) if err != nil { if strings.Contains(err.Error(), "already finished") { return nil } return err } proc.Wait() cw.UpdateBasicStatus(WorkStateCanceled, "Canceled", -1) return nil } // Release releases resources associated with a job. Implies Cancel. func (cw *commandUnit) Release(force bool) error { err := cw.Cancel() if err != nil && !force { return err } return cw.BaseWorkUnitForWorkUnit.Release(force) } // ************************************************************************** // Command line // ************************************************************************** // CommandWorkerCfg is the cmdline configuration object for a worker that runs a command. type CommandWorkerCfg struct { WorkType string `required:"true" description:"Name for this worker type"` Command string `required:"true" description:"Command to run to process units of work"` Params string `description:"Command-line parameters"` AllowRuntimeParams bool `description:"Allow users to add more parameters" default:"false"` VerifySignature bool `description:"Verify a signed work submission" default:"false"` } func (cfg CommandWorkerCfg) NewWorker(bwu BaseWorkUnitForWorkUnit, w *Workceptor, unitID string, workType string) WorkUnit { if bwu == nil { bwu = &BaseWorkUnit{ status: StatusFileData{ ExtraData: &CommandExtraData{}, }, } } cw := &commandUnit{ BaseWorkUnitForWorkUnit: bwu, command: cfg.Command, baseParams: cfg.Params, allowRuntimeParams: cfg.AllowRuntimeParams, } cw.BaseWorkUnitForWorkUnit.Init(w, unitID, workType, FileSystem{}, nil) return cw } func (cfg CommandWorkerCfg) GetWorkType() string { return cfg.WorkType } func (cfg CommandWorkerCfg) GetVerifySignature() bool { return cfg.VerifySignature } // Run runs the action. func (cfg CommandWorkerCfg) Run() error { if cfg.VerifySignature && MainInstance.VerifyingKey == "" { return fmt.Errorf("VerifySignature for work command '%s' is true, but the work verification public key is not specified", cfg.WorkType) } err := MainInstance.RegisterWorker(cfg.WorkType, cfg.NewWorker, cfg.VerifySignature) return err } // commandRunnerCfg is a hidden command line option for a command runner process. type commandRunnerCfg struct { Command string `required:"true"` Params string `required:"true"` UnitDir string `required:"true"` } // Run runs the action. func (cfg commandRunnerCfg) Run() error { err := commandRunner(cfg.Command, cfg.Params, cfg.UnitDir) if err != nil { statusFilename := path.Join(cfg.UnitDir, "status") err2 := (&StatusFileData{}).UpdateBasicStatus(statusFilename, WorkStateFailed, err.Error(), stdoutSize(cfg.UnitDir)) if err2 != nil { MainInstance.nc.GetLogger().Error("Error updating status file %s: %s", statusFilename, err2) } MainInstance.nc.GetLogger().Error("Command runner exited with error: %s\n", err) os.Exit(-1) } os.Exit(0) return nil } type SigningKeyPrivateCfg struct { PrivateKey string `description:"Private key to sign work submissions" barevalue:"yes" default:""` TokenExpiration string `description:"Expiration of the signed json web token, e.g. 3h or 3h30m" default:""` } type VerifyingKeyPublicCfg struct { PublicKey string `description:"Public key to verify signed work submissions" barevalue:"yes" default:""` } func filenameExists(filename string) error { if _, err := os.Stat(filename); err != nil { if os.IsNotExist(err) { return fmt.Errorf("%s does not exist", filename) } return err } return nil } func (cfg SigningKeyPrivateCfg) Prepare() error { duration, err := cfg.PrepareSigningKeyPrivateCfg() if err != nil { return fmt.Errorf(err.Error()) //nolint:govet,staticcheck } MainInstance.SigningExpiration = *duration MainInstance.SigningKey = cfg.PrivateKey return nil } func (cfg SigningKeyPrivateCfg) PrepareSigningKeyPrivateCfg() (*time.Duration, error) { err := filenameExists(cfg.PrivateKey) if err != nil { return nil, err } if cfg.TokenExpiration != "" { duration, err := time.ParseDuration(cfg.TokenExpiration) if err != nil { return nil, fmt.Errorf("failed to parse TokenExpiration -- valid examples include '1.5h', '30m', '30m10s'") } return &duration, nil } return nil, nil } func (cfg VerifyingKeyPublicCfg) Prepare() error { err := filenameExists(cfg.PublicKey) if err != nil { return err } MainInstance.VerifyingKey = cfg.PublicKey return nil } func (cfg VerifyingKeyPublicCfg) PrepareVerifyingKeyPublicCfg() error { err := filenameExists(cfg.PublicKey) if err != nil { return err } return nil } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-workers", "work-signing", "Private key to sign work submissions", SigningKeyPrivateCfg{}, cmdline.Singleton, cmdline.Section(workersSection)) cmdline.RegisterConfigTypeForApp("receptor-workers", "work-verification", "Public key to verify work submissions", VerifyingKeyPublicCfg{}, cmdline.Singleton, cmdline.Section(workersSection)) cmdline.RegisterConfigTypeForApp("receptor-workers", "work-command", "Run a worker using an external command", CommandWorkerCfg{}, cmdline.Section(workersSection)) cmdline.RegisterConfigTypeForApp("receptor-workers", "command-runner", "Wrapper around a process invocation", commandRunnerCfg{}, cmdline.Hidden) } receptor-1.5.3/pkg/workceptor/command_detach_unixlike.go000066400000000000000000000003401475465740700235300ustar00rootroot00000000000000//go:build !windows && !no_workceptor // +build !windows,!no_workceptor package workceptor import ( "os/exec" "syscall" ) func cmdSetDetach(cmd *exec.Cmd) { cmd.SysProcAttr = &syscall.SysProcAttr{ Setsid: true, } } receptor-1.5.3/pkg/workceptor/command_detach_windows.go000066400000000000000000000002461475465740700233770ustar00rootroot00000000000000//go:build windows && !no_workceptor // +build windows,!no_workceptor package workceptor import ( "os/exec" ) func cmdSetDetach(cmd *exec.Cmd) { // Do nothing } receptor-1.5.3/pkg/workceptor/command_test.go000066400000000000000000000302261475465740700213550ustar00rootroot00000000000000package workceptor_test import ( "context" "errors" "fmt" "os/exec" "sync" "testing" "time" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/pkg/workceptor/mock_workceptor" "go.uber.org/mock/gomock" ) func statusExpectCalls(mockBaseWorkUnit *mock_workceptor.MockBaseWorkUnitForWorkUnit) { statusLock := &sync.RWMutex{} mockBaseWorkUnit.EXPECT().GetStatusLock().Return(statusLock).Times(2) mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(&workceptor.StatusFileData{}) mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.CommandExtraData{}, }) } func createCommandTestSetup(t *testing.T) (workceptor.WorkUnit, *mock_workceptor.MockBaseWorkUnitForWorkUnit, *mock_workceptor.MockNetceptorForWorkceptor, *workceptor.Workceptor) { ctrl := gomock.NewController(t) ctx := context.Background() mockBaseWorkUnit := mock_workceptor.NewMockBaseWorkUnitForWorkUnit(ctrl) mockNetceptor := mock_workceptor.NewMockNetceptorForWorkceptor(ctrl) mockNetceptor.EXPECT().NodeID().Return("NodeID") w, err := workceptor.New(ctx, mockNetceptor, "/tmp") if err != nil { t.Errorf("Error while creating Workceptor: %v", err) } cwc := &workceptor.CommandWorkerCfg{} mockBaseWorkUnit.EXPECT().Init(w, "", "", workceptor.FileSystem{}, nil) workUnit := cwc.NewWorker(mockBaseWorkUnit, w, "", "") return workUnit, mockBaseWorkUnit, mockNetceptor, w } func TestCommandSetFromParams(t *testing.T) { wu, mockBaseWorkUnit, _, _ := createCommandTestSetup(t) paramsTestCases := []struct { name string params map[string]string expectedCalls func() errorCatch func(error, *testing.T) }{ { name: "no params with no error", params: map[string]string{"": ""}, expectedCalls: func() { mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.CommandExtraData{}, }) }, errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, }, { name: "params with error", params: map[string]string{"params": "param"}, expectedCalls: func() { }, errorCatch: func(err error, t *testing.T) { if err == nil { t.Error(err) } }, }, } for _, testCase := range paramsTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() err := wu.SetFromParams(testCase.params) testCase.errorCatch(err, t) }) } } func TestUnredactedStatus(t *testing.T) { wu, mockBaseWorkUnit, _, _ := createCommandTestSetup(t) restartTestCases := []struct { name string }{ {name: "test1"}, {name: "test2"}, } statusLock := &sync.RWMutex{} for _, testCase := range restartTestCases { t.Run(testCase.name, func(t *testing.T) { mockBaseWorkUnit.EXPECT().GetStatusLock().Return(statusLock).Times(2) mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(&workceptor.StatusFileData{}) mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.CommandExtraData{}, }) wu.UnredactedStatus() }) } } func TestStart(t *testing.T) { wu, mockBaseWorkUnit, mockNetceptor, w := createCommandTestSetup(t) mockBaseWorkUnit.EXPECT().GetWorkceptor().Return(w).Times(2) mockNetceptor.EXPECT().GetLogger().Times(2) mockBaseWorkUnit.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()) statusExpectCalls(mockBaseWorkUnit) mockBaseWorkUnit.EXPECT().UnitDir() mockBaseWorkUnit.EXPECT().UpdateFullStatus(gomock.Any()) mockBaseWorkUnit.EXPECT().MonitorLocalStatus().AnyTimes() mockBaseWorkUnit.EXPECT().UpdateFullStatus(gomock.Any()).AnyTimes() wu.Start() } func TestRestart(t *testing.T) { wu, mockBaseWorkUnit, _, _ := createCommandTestSetup(t) restartTestCases := []struct { name string expectedCalls func() errorCatch func(error, *testing.T) }{ { name: "load error", expectedCalls: func() { mockBaseWorkUnit.EXPECT().Load().Return(errors.New("terminated")) }, errorCatch: func(err error, t *testing.T) { if err.Error() != "terminated" { t.Error(err) } }, }, { name: "job complete with no error", expectedCalls: func() { statusFile := &workceptor.StatusFileData{State: 2} mockBaseWorkUnit.EXPECT().Load().Return(nil) statusLock := &sync.RWMutex{} mockBaseWorkUnit.EXPECT().GetStatusLock().Return(statusLock).Times(2) mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(statusFile) mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.CommandExtraData{}, }) }, errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, }, { name: "restart successful", expectedCalls: func() { statusFile := &workceptor.StatusFileData{State: 0} mockBaseWorkUnit.EXPECT().Load().Return(nil) statusLock := &sync.RWMutex{} mockBaseWorkUnit.EXPECT().GetStatusLock().Return(statusLock).Times(2) mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(statusFile) mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.CommandExtraData{}, }) mockBaseWorkUnit.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()) mockBaseWorkUnit.EXPECT().UnitDir() }, errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, }, } for _, testCase := range restartTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() mockBaseWorkUnit.EXPECT().MonitorLocalStatus().AnyTimes() err := wu.Restart() testCase.errorCatch(err, t) }) } } func TestCancel(t *testing.T) { wu, mockBaseWorkUnit, _, _ := createCommandTestSetup(t) paramsTestCases := []struct { name string expectedCalls func() errorCatch func(error, *testing.T) }{ { name: "not a valid pid no error", expectedCalls: func() { mockBaseWorkUnit.EXPECT().CancelContext() statusExpectCalls(mockBaseWorkUnit) }, errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, }, { name: "process interrupt error", expectedCalls: func() { mockBaseWorkUnit.EXPECT().CancelContext() mockBaseWorkUnit.EXPECT().GetStatusLock().Return(&sync.RWMutex{}).Times(2) mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(&workceptor.StatusFileData{}) mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.CommandExtraData{ Pid: 1, }, }) }, errorCatch: func(err error, t *testing.T) { if err == nil { t.Error(err) } }, }, { name: "process already finished", expectedCalls: func() { mockBaseWorkUnit.EXPECT().CancelContext() mockBaseWorkUnit.EXPECT().GetStatusLock().Return(&sync.RWMutex{}).Times(2) mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(&workceptor.StatusFileData{}) c := exec.Command("ls", "/tmp") processPid := make(chan int) go func(c *exec.Cmd, processPid chan int) { c.Run() processPid <- c.Process.Pid }(c, processPid) time.Sleep(200 * time.Millisecond) mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.CommandExtraData{ Pid: <-processPid, }, }) }, errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, }, { name: "cancelled process successfully", expectedCalls: func() { mockBaseWorkUnit.EXPECT().CancelContext() mockBaseWorkUnit.EXPECT().GetStatusLock().Return(&sync.RWMutex{}).Times(2) mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(&workceptor.StatusFileData{}) mockBaseWorkUnit.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()) c := exec.Command("sleep", "30") processPid := make(chan int) go func(c *exec.Cmd, processPid chan int) { err := c.Start() if err != nil { fmt.Println(err) } processPid <- c.Process.Pid }(c, processPid) time.Sleep(200 * time.Millisecond) mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.CommandExtraData{ Pid: <-processPid, }, }) }, errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, }, } for _, testCase := range paramsTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() err := wu.Cancel() testCase.errorCatch(err, t) }) } } func TestRelease(t *testing.T) { wu, mockBaseWorkUnit, _, _ := createCommandTestSetup(t) releaseTestCases := []struct { name string expectedCalls func() errorCatch func(error, *testing.T) force bool }{ { name: "cancel error", expectedCalls: func() {}, errorCatch: func(err error, t *testing.T) { if err == nil { t.Error(err) } }, force: false, }, { name: "released successfully", expectedCalls: func() { mockBaseWorkUnit.EXPECT().Release(gomock.Any()) }, errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, force: true, }, } for _, testCase := range releaseTestCases { t.Run(testCase.name, func(t *testing.T) { mockBaseWorkUnit.EXPECT().CancelContext() mockBaseWorkUnit.EXPECT().GetStatusLock().Return(&sync.RWMutex{}).Times(2) mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(&workceptor.StatusFileData{}) mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.CommandExtraData{ Pid: 1, }, }) testCase.expectedCalls() err := wu.Release(testCase.force) testCase.errorCatch(err, t) }) } } func TestSigningKeyPrepare(t *testing.T) { privateKey := workceptor.SigningKeyPrivateCfg{} err := privateKey.Prepare() if err == nil { t.Error(err) } } func TestPrepareSigningKeyPrivateCfg(t *testing.T) { signingKeyTestCases := []struct { name string errorCatch func(error, *testing.T) privateKey string tokenExpiration string }{ { name: "file does not exist error", privateKey: "does_not_exist.txt", tokenExpiration: "", errorCatch: func(err error, t *testing.T) { if err == nil { t.Error(err) } }, }, { name: "failed to parse token expiration", privateKey: "/etc/hosts", tokenExpiration: "random_input", errorCatch: func(err error, t *testing.T) { if err == nil { t.Error(err) } }, }, { name: "duration no error", privateKey: "/etc/hosts", tokenExpiration: "3h", errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, }, { name: "no duration no error", privateKey: "/etc/hosts", tokenExpiration: "", errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, }, } for _, testCase := range signingKeyTestCases { t.Run(testCase.name, func(t *testing.T) { privateKey := workceptor.SigningKeyPrivateCfg{ PrivateKey: testCase.privateKey, TokenExpiration: testCase.tokenExpiration, } _, err := privateKey.PrepareSigningKeyPrivateCfg() testCase.errorCatch(err, t) }) } } func TestVerifyingKeyPrepare(t *testing.T) { publicKey := workceptor.VerifyingKeyPublicCfg{} err := publicKey.Prepare() if err == nil { t.Error(err) } } func TestPrepareVerifyingKeyPrivateCfg(t *testing.T) { verifyingKeyTestCases := []struct { name string errorCatch func(error, *testing.T) publicKey string }{ { name: "file does not exist", publicKey: "does_not_exist.txt", errorCatch: func(err error, t *testing.T) { if err == nil { t.Error(err) } }, }, { name: "prepared successfully", publicKey: "/etc/hosts", errorCatch: func(err error, t *testing.T) { if err != nil { t.Error(err) } }, }, } for _, testCase := range verifyingKeyTestCases { t.Run(testCase.name, func(t *testing.T) { publicKey := workceptor.VerifyingKeyPublicCfg{ PublicKey: testCase.publicKey, } err := publicKey.PrepareVerifyingKeyPublicCfg() testCase.errorCatch(err, t) }) } } receptor-1.5.3/pkg/workceptor/controlsvc.go000066400000000000000000000260321475465740700210740ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "context" "fmt" "os" "path" "strconv" "strings" "github.com/ansible/receptor/pkg/controlsvc" ) type workceptorCommandType struct { w *Workceptor } type workceptorCommand struct { w *Workceptor subcommand string params map[string]interface{} } func (t *workceptorCommandType) InitFromString(params string) (controlsvc.ControlCommand, error) { tokens := strings.Split(params, " ") if len(tokens) == 0 { return nil, fmt.Errorf("no work subcommand") } c := &workceptorCommand{ w: t.w, subcommand: strings.ToLower(tokens[0]), params: make(map[string]interface{}), } switch c.subcommand { case "submit": if len(tokens) < 3 { return nil, fmt.Errorf("work submit requires a target node and work type") } c.params["node"] = tokens[1] c.params["worktype"] = tokens[2] if len(tokens) > 3 { c.params["params"] = strings.Join(tokens[3:], " ") } case "list": if len(tokens) > 1 { c.params["unitid"] = tokens[1] } case "status", "cancel", "release", "force-release": if len(tokens) < 2 { return nil, fmt.Errorf("work %s requires a unit ID", c.subcommand) } if len(tokens) > 2 { return nil, fmt.Errorf("work %s does not take parameters after the unit ID", c.subcommand) } c.params["unitid"] = tokens[1] case "results": if len(tokens) < 2 { return nil, fmt.Errorf("work results requires a unit ID") } if len(tokens) > 3 { return nil, fmt.Errorf("work results only takes a unit ID and optional start position") } c.params["unitid"] = tokens[1] if len(tokens) > 2 { var err error c.params["startpos"], err = strconv.ParseInt(tokens[2], 10, 64) if err != nil { return nil, fmt.Errorf("error converting start position to integer: %s", err) } } else { c.params["startpos"] = int64(0) } } return c, nil } // strFromMap extracts a string from a map[string]interface{}, handling errors. func strFromMap(config map[string]interface{}, name string) (string, error) { value, ok := config[name] if !ok { return "", fmt.Errorf("field %s missing", name) } valueStr, ok := value.(string) if !ok { return "", fmt.Errorf("field %s must be a string", name) } return valueStr, nil } // intFromMap extracts an int64 from a map[string]interface{}, handling errors. func intFromMap(config map[string]interface{}, name string) (int64, error) { value, ok := config[name] if !ok { return 0, fmt.Errorf("field %s missing", name) } valueInt, ok := value.(int64) if ok { return valueInt, nil } valueFloat, ok := value.(float64) if ok { return int64(valueFloat), nil } valueStr, ok := value.(string) if ok { valueInt, err := strconv.ParseInt(valueStr, 10, 64) if err != nil { return valueInt, err } } return 0, fmt.Errorf("field %s value %s is not convertible to an int", name, value) } func boolFromMap(config map[string]interface{}, name string) (bool, error) { value, ok := config[name] if !ok { return false, fmt.Errorf("field %s missing", name) } valueBoolStr, ok := value.(string) if !ok { return false, fmt.Errorf("field %s must be a string", name) } if valueBoolStr == "true" { return true, nil } if valueBoolStr == "false" { return false, nil } return false, fmt.Errorf("field %s value %s is not convertible to a bool", name, value) } func (t *workceptorCommandType) InitFromJSON(config map[string]interface{}) (controlsvc.ControlCommand, error) { subCmd, err := strFromMap(config, "subcommand") if err != nil { return nil, err } c := &workceptorCommand{ w: t.w, subcommand: strings.ToLower(subCmd), params: make(map[string]interface{}), } switch c.subcommand { case "submit": for k, v := range config { _, ok := v.(string) if !ok { return nil, fmt.Errorf("submit parameters must all be strings and %s is not", k) } c.params[k] = v } _, err := strFromMap(c.params, "node") if err != nil { return nil, err } _, err = strFromMap(c.params, "worktype") if err != nil { return nil, err } case "status", "cancel", "release", "force-release": c.params["unitid"], err = strFromMap(config, "unitid") if err != nil { return nil, err } signature, err := strFromMap(config, "signature") if err == nil { c.params["signature"] = signature } case "list": unitID, err := strFromMap(config, "unitid") if err == nil { c.params["unitid"] = unitID } case "results": c.params["unitid"], err = strFromMap(config, "unitid") if err != nil { return nil, err } c.params["startpos"], err = intFromMap(config, "startpos") if err != nil { return nil, err } signature, err := strFromMap(config, "signature") if err == nil { c.params["signature"] = signature } } return c, nil } func (c *workceptorCommand) processSignature(workType, signature string, connIsUnix, signWork bool) error { shouldVerifySignature := c.w.ShouldVerifySignature(workType, signWork) if !shouldVerifySignature && signature != "" { return fmt.Errorf("work type did not expect a signature") } if shouldVerifySignature && !connIsUnix { err := c.w.VerifySignature(signature) if err != nil { return err } } return nil } func getSignWorkFromStatus(status *StatusFileData) bool { red, ok := status.ExtraData.(*RemoteExtraData) if ok { return red.SignWork } return false } // Worker function called by the control service to process a "work" command. func (c *workceptorCommand) ControlFunc(ctx context.Context, nc controlsvc.NetceptorForControlCommand, cfo controlsvc.ControlFuncOperations) (map[string]interface{}, error) { addr := cfo.RemoteAddr() connIsUnix := false if addr.Network() == "unix" { connIsUnix = true } switch c.subcommand { case "submit": workNode, err := strFromMap(c.params, "node") if err != nil { return nil, err } workType, err := strFromMap(c.params, "worktype") if err != nil { return nil, err } tlsClient, err := strFromMap(c.params, "tlsclient") if err != nil { tlsClient = "" // optional so don't return } ttl, err := strFromMap(c.params, "ttl") if err != nil { ttl = "" } signWork, err := boolFromMap(c.params, "signwork") if err != nil { signWork = false } signature, err := strFromMap(c.params, "signature") if err != nil { signature = "" } workUnitID, err := strFromMap(c.params, "workUnitID") if err != nil { workUnitID = "" } workParams := make(map[string]string) nonParams := []string{"command", "subcommand", "node", "worktype", "tlsclient", "ttl", "signwork", "signature"} inNonParams := func(p string) bool { for _, nonparam := range nonParams { if p == nonparam { return true } } return false } for k, v := range c.params { if ok := inNonParams(k); ok { continue } vStr, ok := v.(string) if !ok { return nil, fmt.Errorf("%s must be a string", k) } workParams[k] = vStr } err = c.processSignature(workType, signature, connIsUnix, signWork) if err != nil { return nil, err } isLocalHost := strings.EqualFold(workNode, "localhost") var worker WorkUnit if workNode == nc.NodeID() || isLocalHost { if ttl != "" { return nil, fmt.Errorf("ttl option is intended for remote work only") } worker, err = c.w.AllocateUnit(workType, workUnitID, workParams) } else { worker, err = c.w.AllocateRemoteUnit(workNode, workType, workUnitID, tlsClient, ttl, signWork, workParams) } if err != nil { return nil, err } cfr := make(map[string]interface{}) cfr["unitid"] = worker.ID() stdin, err := os.OpenFile(path.Join(worker.UnitDir(), "stdin"), os.O_CREATE+os.O_WRONLY, 0o600) if err != nil { return nil, err } worker.UpdateBasicStatus(WorkStatePending, "Waiting for Input Data", 0) err = cfo.ReadFromConn(fmt.Sprintf("Work unit created with ID %s. Send stdin data and EOF.\n", worker.ID()), stdin, &controlsvc.SocketConnIO{}) if err != nil { worker.UpdateBasicStatus(WorkStateFailed, fmt.Sprintf("Error reading input data: %s", err), 0) return nil, err } err = stdin.Close() if err != nil { worker.UpdateBasicStatus(WorkStateFailed, fmt.Sprintf("Error reading input data: %s", err), 0) return nil, err } worker.UpdateBasicStatus(WorkStatePending, "Starting Worker", 0) err = worker.Start() if err != nil && !IsPending(err) { worker.UpdateBasicStatus(WorkStateFailed, fmt.Sprintf("Error starting worker: %s", err), 0) return cfr, err } if IsPending(err) { cfr["result"] = "Job Submitted" } else { cfr["result"] = "Job Started" } return cfr, nil case "list": var unitList []string targetUnitID, ok := c.params["unitid"].(string) if ok { unitList = append(unitList, targetUnitID) } else { unitList = c.w.ListKnownUnitIDs() } cfr := make(map[string]interface{}) for i := range unitList { unitID := unitList[i] status, err := c.w.unitStatusForCFR(unitID) if err != nil { return nil, err } cfr[unitID] = status } return cfr, nil case "status": unitid, err := strFromMap(c.params, "unitid") if err != nil { return nil, err } cfr, err := c.w.unitStatusForCFR(unitid) if err != nil { return nil, err } return cfr, nil case "cancel", "release", "force-release": unitid, err := strFromMap(c.params, "unitid") if err != nil { return nil, err } signature, err := strFromMap(c.params, "signature") if err != nil { signature = "" } cfr := make(map[string]interface{}) var pendingMsg string var completeMsg string if c.subcommand == "cancel" { pendingMsg = "cancel pending" completeMsg = "cancelled" } else { pendingMsg = "release pending" completeMsg = "released" } unit, err := c.w.findUnit(unitid) if err != nil { cfr["unit not found"] = unitid return cfr, err } status := unit.Status() signWork := getSignWorkFromStatus(status) err = c.processSignature(status.WorkType, signature, connIsUnix, signWork) if err != nil { return nil, err } if c.subcommand == "cancel" { err = unit.Cancel() } else { err = unit.Release(c.subcommand == "force-release") } if err != nil && !IsPending(err) { return nil, err } if IsPending(err) { cfr[pendingMsg] = unitid } else { cfr[completeMsg] = unitid } return cfr, nil case "results": unitid, err := strFromMap(c.params, "unitid") if err != nil { return nil, err } startPos, err := intFromMap(c.params, "startpos") if err != nil { return nil, err } signature, err := strFromMap(c.params, "signature") if err != nil { signature = "" } unit, err := c.w.findUnit(unitid) if err != nil { return nil, err } status := unit.Status() signWork := getSignWorkFromStatus(status) err = c.processSignature(status.WorkType, signature, connIsUnix, signWork) if err != nil { return nil, err } resultChan, err := c.w.GetResults(ctx, unitid, startPos) if err != nil { return nil, err } err = cfo.WriteToConn(fmt.Sprintf("Streaming results for work unit %s\n", unitid), resultChan) if err != nil { return nil, err } err = cfo.Close() if err != nil { return nil, err } return nil, nil } return nil, fmt.Errorf("bad command") } receptor-1.5.3/pkg/workceptor/controlsvc_test.go000066400000000000000000000034261475465740700221350ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "testing" "github.com/ansible/receptor/pkg/controlsvc" ) func Test_workceptorCommandTypeInitFromString(t *testing.T) { type fields struct { w *Workceptor } type args struct { params string } tests := []struct { name string fields fields args args want controlsvc.ControlCommand wantErr bool }{ { name: "Positive cancel", fields: fields{ w: nil, }, args: args{ params: "cancel u", }, wantErr: false, }, { name: "Positive force-release", fields: fields{ w: nil, }, args: args{ params: "force-release u", }, wantErr: false, }, { name: "Positive list", fields: fields{ w: nil, }, args: args{ params: "list", }, wantErr: false, }, { name: "Positive release", fields: fields{ w: nil, }, args: args{ params: "release u", }, wantErr: false, }, { name: "Positive results", fields: fields{ w: nil, }, args: args{ params: "results u", }, wantErr: false, }, { name: "Positive status", fields: fields{ w: nil, }, args: args{ params: "status u", }, wantErr: false, }, { name: "Positive submit", fields: fields{ w: nil, }, args: args{ params: "submit n w", }, wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tr := &workceptorCommandType{ w: tt.fields.w, } got, err := tr.InitFromString(tt.args.params) if (err != nil) != tt.wantErr { t.Errorf("workceptorCommandType.InitFromString() error = %v, wantErr %v", err, tt.wantErr) return } if got == nil { t.Errorf("workceptorCommandType.InitFromString() returned nil") } }) } } receptor-1.5.3/pkg/workceptor/interfaces.go000066400000000000000000000021751475465740700210250ustar00rootroot00000000000000package workceptor // WorkUnit represents a local unit of work. type WorkUnit interface { ID() string UnitDir() string StatusFileName() string StdoutFileName() string Save() error Load() error SetFromParams(params map[string]string) error UpdateBasicStatus(state int, detail string, stdoutSize int64) UpdateFullStatus(statusFunc func(*StatusFileData)) LastUpdateError() error Status() *StatusFileData UnredactedStatus() *StatusFileData Start() error Restart() error Cancel() error Release(force bool) error } type WorkerConfig interface { GetWorkType() string GetVerifySignature() bool NewWorker(bwu BaseWorkUnitForWorkUnit, w *Workceptor, unitID string, workType string) WorkUnit } // NewWorkerFunc represents a factory of WorkUnit instances. type NewWorkerFunc func(bwu BaseWorkUnitForWorkUnit, w *Workceptor, unitID string, workType string) WorkUnit // StatusFileData is the structure of the JSON data saved to a status file. // This struct should only contain value types, except for ExtraData. type StatusFileData struct { State int Detail string StdoutSize int64 WorkType string ExtraData interface{} } receptor-1.5.3/pkg/workceptor/json_test.go000066400000000000000000000030721475465740700207070ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "context" "os" "testing" "github.com/ansible/receptor/pkg/netceptor" ) func newCommandWorker(_ BaseWorkUnitForWorkUnit, w *Workceptor, unitID string, workType string) WorkUnit { cw := &commandUnit{ BaseWorkUnitForWorkUnit: &BaseWorkUnit{ status: StatusFileData{ ExtraData: &CommandExtraData{}, }, }, command: "echo", baseParams: "foo", allowRuntimeParams: true, } cw.BaseWorkUnitForWorkUnit.Init(w, unitID, workType, FileSystem{}, nil) return cw } func TestWorkceptorJson(t *testing.T) { tmpdir, err := os.MkdirTemp(os.TempDir(), "receptor-test-*") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) nc := netceptor.New(context.TODO(), "test") w, err := New(context.Background(), nc, tmpdir) if err != nil { t.Fatal(err) } err = w.RegisterWorker("command", newCommandWorker, false) if err != nil { t.Fatal(err) } cw, err := w.AllocateUnit("command", "", make(map[string]string)) if err != nil { t.Fatal(err) } cw.UpdateFullStatus(func(status *StatusFileData) { ed, ok := status.ExtraData.(*CommandExtraData) if !ok { t.Fatal("ExtraData type assertion failed") } ed.Pid = 12345 }) err = cw.Save() if err != nil { t.Fatal(err) } cw2 := newCommandWorker(nil, w, cw.ID(), "command") err = cw2.Load() if err != nil { t.Fatal(err) } ed2, ok := cw2.Status().ExtraData.(*CommandExtraData) if !ok { t.Fatal("ExtraData type assertion failed") } if ed2.Pid != 12345 { t.Fatal("PID did not make it through") } } receptor-1.5.3/pkg/workceptor/kubernetes.go000066400000000000000000001440311475465740700210470ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "bufio" "context" "errors" "fmt" "io" "net" "net/url" "os" "strconv" "strings" "sync" "time" "github.com/ghjm/cmdline" "github.com/google/shlex" "github.com/spf13/viper" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/remotecommand" watch2 "k8s.io/client-go/tools/watch" "k8s.io/client-go/util/flowcontrol" ) // KubeUnit implements the WorkUnit interface. type KubeUnit struct { BaseWorkUnitForWorkUnit authMethod string streamMethod string baseParams string allowRuntimeAuth bool allowRuntimeCommand bool allowRuntimeParams bool allowRuntimePod bool deletePodOnRestart bool namePrefix string config *rest.Config clientset *kubernetes.Clientset pod *corev1.Pod podPendingTimeout time.Duration } // kubeExtraData is the content of the ExtraData JSON field for a Kubernetes worker. type KubeExtraData struct { Image string Command string Params string KubeNamespace string KubeConfig string KubePod string PodName string } type KubeAPIer interface { NewNotFound(schema.GroupResource, string) *apierrors.StatusError OneTermEqualSelector(string, string) fields.Selector NewForConfig(*rest.Config) (*kubernetes.Clientset, error) GetLogs(*kubernetes.Clientset, string, string, *corev1.PodLogOptions) *rest.Request Get(context.Context, *kubernetes.Clientset, string, string, metav1.GetOptions) (*corev1.Pod, error) Create(context.Context, *kubernetes.Clientset, string, *corev1.Pod, metav1.CreateOptions) (*corev1.Pod, error) List(context.Context, *kubernetes.Clientset, string, metav1.ListOptions) (*corev1.PodList, error) Watch(context.Context, *kubernetes.Clientset, string, metav1.ListOptions) (watch.Interface, error) Delete(context.Context, *kubernetes.Clientset, string, string, metav1.DeleteOptions) error SubResource(*kubernetes.Clientset, string, string) *rest.Request InClusterConfig() (*rest.Config, error) NewDefaultClientConfigLoadingRules() *clientcmd.ClientConfigLoadingRules BuildConfigFromFlags(string, string) (*rest.Config, error) NewClientConfigFromBytes([]byte) (clientcmd.ClientConfig, error) NewSPDYExecutor(*rest.Config, string, *url.URL) (remotecommand.Executor, error) StreamWithContext(context.Context, remotecommand.Executor, remotecommand.StreamOptions) error UntilWithSync(context.Context, cache.ListerWatcher, runtime.Object, watch2.PreconditionFunc, ...watch2.ConditionFunc) (*watch.Event, error) NewFakeNeverRateLimiter() flowcontrol.RateLimiter NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter } type KubeAPIWrapper struct{} func (ku KubeAPIWrapper) NewNotFound(qualifiedResource schema.GroupResource, name string) *apierrors.StatusError { return apierrors.NewNotFound(qualifiedResource, name) } func (ku KubeAPIWrapper) OneTermEqualSelector(k string, v string) fields.Selector { return fields.OneTermEqualSelector(k, v) } func (ku KubeAPIWrapper) NewForConfig(c *rest.Config) (*kubernetes.Clientset, error) { return kubernetes.NewForConfig(c) } func (ku KubeAPIWrapper) GetLogs(clientset *kubernetes.Clientset, namespace string, name string, opts *corev1.PodLogOptions) *rest.Request { return clientset.CoreV1().Pods(namespace).GetLogs(name, opts) } func (ku KubeAPIWrapper) Get(ctx context.Context, clientset *kubernetes.Clientset, namespace string, name string, opts metav1.GetOptions) (*corev1.Pod, error) { return clientset.CoreV1().Pods(namespace).Get(ctx, name, opts) } func (ku KubeAPIWrapper) Create(ctx context.Context, clientset *kubernetes.Clientset, namespace string, pod *corev1.Pod, opts metav1.CreateOptions) (*corev1.Pod, error) { return clientset.CoreV1().Pods(namespace).Create(ctx, pod, opts) } func (ku KubeAPIWrapper) List(ctx context.Context, clientset *kubernetes.Clientset, namespace string, opts metav1.ListOptions) (*corev1.PodList, error) { return clientset.CoreV1().Pods(namespace).List(ctx, opts) } func (ku KubeAPIWrapper) Watch(ctx context.Context, clientset *kubernetes.Clientset, namespace string, opts metav1.ListOptions) (watch.Interface, error) { return clientset.CoreV1().Pods(namespace).Watch(ctx, opts) } func (ku KubeAPIWrapper) Delete(ctx context.Context, clientset *kubernetes.Clientset, namespace string, name string, opts metav1.DeleteOptions) error { return clientset.CoreV1().Pods(namespace).Delete(ctx, name, opts) } func (ku KubeAPIWrapper) SubResource(clientset *kubernetes.Clientset, podName string, podNamespace string) *rest.Request { return clientset.CoreV1().RESTClient().Post().Resource("pods").Name(podName).Namespace(podNamespace).SubResource("attach") } func (ku KubeAPIWrapper) InClusterConfig() (*rest.Config, error) { return rest.InClusterConfig() } func (ku KubeAPIWrapper) NewDefaultClientConfigLoadingRules() *clientcmd.ClientConfigLoadingRules { return clientcmd.NewDefaultClientConfigLoadingRules() } func (ku KubeAPIWrapper) BuildConfigFromFlags(masterURL string, kubeconfigPath string) (*rest.Config, error) { return clientcmd.BuildConfigFromFlags(masterURL, kubeconfigPath) } func (ku KubeAPIWrapper) NewClientConfigFromBytes(configBytes []byte) (clientcmd.ClientConfig, error) { return clientcmd.NewClientConfigFromBytes(configBytes) } func (ku KubeAPIWrapper) NewSPDYExecutor(config *rest.Config, method string, url *url.URL) (remotecommand.Executor, error) { return remotecommand.NewSPDYExecutor(config, method, url) } func (ku KubeAPIWrapper) StreamWithContext(ctx context.Context, exec remotecommand.Executor, options remotecommand.StreamOptions) error { return exec.StreamWithContext(ctx, options) } func (ku KubeAPIWrapper) UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition watch2.PreconditionFunc, conditions ...watch2.ConditionFunc) (*watch.Event, error) { return watch2.UntilWithSync(ctx, lw, objType, precondition, conditions...) } func (ku KubeAPIWrapper) NewFakeNeverRateLimiter() flowcontrol.RateLimiter { return flowcontrol.NewFakeNeverRateLimiter() } func (ku KubeAPIWrapper) NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter { return flowcontrol.NewFakeAlwaysRateLimiter() } // KubeAPIWrapperInstance is a package level var that wraps all required kubernetes API calls. // It is instantiated in the NewkubeWorker function and available throughout the package. var KubeAPIWrapperInstance KubeAPIer var KubeAPIWrapperLock sync.Mutex // ErrPodCompleted is returned when pod has already completed before we could attach. var ErrPodCompleted = fmt.Errorf("pod ran to completion") // ErrPodFailed is returned when pod has failed before we could attach. var ErrPodFailed = fmt.Errorf("pod failed to start") // ErrImagePullBackOff is returned when the image for the container in the Pod cannot be pulled. var ErrImagePullBackOff = fmt.Errorf("container failed to start") // podRunningAndReady is a completion criterion for pod ready to be attached to. func podRunningAndReady() func(event watch.Event) (bool, error) { imagePullBackOffRetries := 3 inner := func(event watch.Event) (bool, error) { if event.Type == watch.Deleted { return false, KubeAPIWrapperInstance.NewNotFound(schema.GroupResource{Resource: "pods"}, "") } if t, ok := event.Object.(*corev1.Pod); ok { switch t.Status.Phase { case corev1.PodFailed: return false, ErrPodFailed case corev1.PodSucceeded: return false, ErrPodCompleted case corev1.PodRunning, corev1.PodPending: conditions := t.Status.Conditions if conditions == nil { return false, nil } for i := range conditions { if conditions[i].Type == corev1.PodReady && conditions[i].Status == corev1.ConditionTrue { return true, nil } if conditions[i].Type == corev1.ContainersReady && conditions[i].Status == corev1.ConditionFalse { statuses := t.Status.ContainerStatuses for j := range statuses { if statuses[j].State.Waiting != nil { if statuses[j].State.Waiting.Reason == "ImagePullBackOff" { if imagePullBackOffRetries == 0 { return false, ErrImagePullBackOff } imagePullBackOffRetries-- } } } } } } } return false, nil } return inner } func GetTimeoutOpenLogstream(kw *KubeUnit) int { // RECEPTOR_OPEN_LOGSTREAM_TIMEOUT // default: 1 openLogStreamTimeout := 1 envTimeout := os.Getenv("RECEPTOR_OPEN_LOGSTREAM_TIMEOUT") if envTimeout != "" { var err error openLogStreamTimeout, err = strconv.Atoi(envTimeout) if err != nil || openLogStreamTimeout < 1 { // ignore error, use default kw.GetWorkceptor().nc.GetLogger().Warning("Invalid value for RECEPTOR_OPEN_LOGSTREAM_TIMEOUT: %s. Ignoring", envTimeout) openLogStreamTimeout = 1 } } kw.GetWorkceptor().nc.GetLogger().Debug("RECEPTOR_OPEN_LOGSTREAM_TIMEOUT: %d", openLogStreamTimeout) return openLogStreamTimeout } func (kw *KubeUnit) kubeLoggingConnectionHandler(timestamps bool, sinceTime time.Time) (io.ReadCloser, error) { var logStream io.ReadCloser var err error podNamespace := kw.pod.Namespace podName := kw.pod.Name podOptions := &corev1.PodLogOptions{ Container: "worker", Follow: true, } if timestamps { podOptions.Timestamps = true podOptions.SinceTime = &metav1.Time{Time: sinceTime} } KubeAPIWrapperLock.Lock() logReq := KubeAPIWrapperInstance.GetLogs(kw.clientset, podNamespace, podName, podOptions) KubeAPIWrapperLock.Unlock() // get logstream, with retry for retries := 5; retries > 0; retries-- { logStream, err = logReq.Stream(kw.GetContext()) if err == nil { break } kw.GetWorkceptor().nc.GetLogger().Warning( "Error opening log stream for pod %s/%s. Will retry %d more times. Error: %s", podNamespace, podName, retries, err, ) time.Sleep(time.Duration(GetTimeoutOpenLogstream(kw)) * time.Second) } if err != nil { errMsg := fmt.Sprintf("Error opening log stream for pod %s/%s. Error: %s", podNamespace, podName, err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) return nil, err } return logStream, nil } func (kw *KubeUnit) kubeLoggingNoReconnect(streamWait *sync.WaitGroup, stdout *STDoutWriter, stdoutErr *error) { // Legacy method, for use on k8s < v1.23.14 // uses io.Copy to stream data from pod to stdout file // known issues around this, as logstream can terminate due to log rotation // or 4 hr timeout defer streamWait.Done() podNamespace := kw.pod.Namespace podName := kw.pod.Name logStream, err := kw.kubeLoggingConnectionHandler(false, time.Time{}) if err != nil { return } _, *stdoutErr = io.Copy(stdout, logStream) if *stdoutErr != nil { kw.GetWorkceptor().nc.GetLogger().Error( "Error streaming pod logs to stdout for pod %s/%s. Error: %s", podNamespace, podName, *stdoutErr, ) } } func (kw *KubeUnit) KubeLoggingWithReconnect(streamWait *sync.WaitGroup, stdout *STDoutWriter, stdinErr *error, stdoutErr *error) { // preferred method for k8s >= 1.23.14 defer streamWait.Done() var sinceTime time.Time var err error podNamespace := kw.pod.Namespace podName := kw.pod.Name retries := 5 successfulWrite := false remainingRetries := retries // resets on each successful read from pod stdout for { if *stdinErr != nil { // fail to send stdin to pod, no need to continue return } // get pod, with retry for retries := 5; retries > 0; retries-- { KubeAPIWrapperLock.Lock() kw.pod, err = KubeAPIWrapperInstance.Get(kw.GetContext(), kw.clientset, podNamespace, podName, metav1.GetOptions{}) KubeAPIWrapperLock.Unlock() if err == nil { break } kw.GetWorkceptor().nc.GetLogger().Warning( "Error getting pod %s/%s. Will retry %d more times. Error: %s", podNamespace, podName, retries, err, ) time.Sleep(time.Second) } if err != nil { errMsg := fmt.Sprintf("Error getting pod %s/%s. Error: %s", podNamespace, podName, err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) // fail to get pod, no need to continue return } logStream, err := kw.kubeLoggingConnectionHandler(true, sinceTime) if err != nil { // fail to get log stream, no need to continue return } // read from logstream streamReader := bufio.NewReader(logStream) for *stdinErr == nil { // check between every line read to see if we need to stop reading line, err := streamReader.ReadString('\n') if err != nil { if kw.GetContext().Err() == context.Canceled { kw.GetWorkceptor().nc.GetLogger().Info( "Context was canceled while reading logs for pod %s/%s. Assuming pod has finished", podNamespace, podName, ) return } kw.GetWorkceptor().nc.GetLogger().Info( "Detected Error: %s for pod %s/%s. Will retry %d more times.", err, podNamespace, podName, remainingRetries, ) successfulWrite = false remainingRetries-- if remainingRetries > 0 { time.Sleep(200 * time.Millisecond) break } kw.GetWorkceptor().nc.GetLogger().Error("Error reading from pod %s/%s: %s", podNamespace, podName, err) // At this point we exausted all retries, every retry we either failed to read OR we read but did not get newer msg // If we got a EOF on the last retry we assume that we read everything and we can stop the loop // we ASSUME this is the happy path. // If kube api returned an error there is a missing new line and that line never gets read. if err != io.EOF { *stdoutErr = err } else if line != "" && err == io.EOF { _, err = stdout.Write([]byte(line + "\n")) if err != nil { *stdoutErr = fmt.Errorf("writing to stdout: %s", err) kw.GetWorkceptor().nc.GetLogger().Error("Error writing to stdout: %s", err) return } } return } split := strings.SplitN(line, " ", 2) msg := line timestamp := ParseTime(split[0]) if timestamp != nil { if !timestamp.After(sinceTime) && !successfulWrite { continue } sinceTime = *timestamp msg = split[1] } else { kw.GetWorkceptor().nc.GetLogger().Debug("No timestamp received, log line: '%s'", line) } _, err = stdout.Write([]byte(msg)) if err != nil { *stdoutErr = fmt.Errorf("writing to stdout: %s", err) kw.GetWorkceptor().nc.GetLogger().Error("Error writing to stdout: %s", err) return } remainingRetries = retries // each time we read successfully, reset this counter successfulWrite = true } logStream.Close() } } func (kw *KubeUnit) CreatePod(env map[string]string) error { ked := kw.UnredactedStatus().ExtraData.(*KubeExtraData) command, err := shlex.Split(ked.Command) if err != nil { return err } params, err := shlex.Split(ked.Params) if err != nil { return err } pod := &corev1.Pod{} var spec *corev1.PodSpec var objectMeta *metav1.ObjectMeta if ked.KubePod != "" { decode := scheme.Codecs.UniversalDeserializer().Decode _, _, err := decode([]byte(ked.KubePod), nil, pod) if err != nil { return err } foundWorker := false spec = &pod.Spec for i := range spec.Containers { if spec.Containers[i].Name == "worker" { spec.Containers[i].Stdin = true spec.Containers[i].StdinOnce = true foundWorker = true break } } if !foundWorker { return fmt.Errorf("at least one container must be named worker") } spec.RestartPolicy = corev1.RestartPolicyNever userNamespace := pod.ObjectMeta.Namespace if userNamespace != "" { ked.KubeNamespace = userNamespace } userPodName := pod.ObjectMeta.Name if userPodName != "" { kw.namePrefix = userPodName + "-" } objectMeta = &pod.ObjectMeta objectMeta.Name = "" objectMeta.GenerateName = kw.namePrefix objectMeta.Namespace = ked.KubeNamespace } else { objectMeta = &metav1.ObjectMeta{ GenerateName: kw.namePrefix, Namespace: ked.KubeNamespace, } spec = &corev1.PodSpec{ Containers: []corev1.Container{{ Name: "worker", Image: ked.Image, Command: command, Args: params, Stdin: true, StdinOnce: true, TTY: false, }}, RestartPolicy: corev1.RestartPolicyNever, } } pod = &corev1.Pod{ ObjectMeta: *objectMeta, Spec: *spec, } if env != nil { evs := make([]corev1.EnvVar, 0) for k, v := range env { evs = append(evs, corev1.EnvVar{ Name: k, Value: v, }) } pod.Spec.Containers[0].Env = evs } KubeAPIWrapperLock.Lock() // get pod and store to kw.pod kw.pod, err = KubeAPIWrapperInstance.Create(kw.GetContext(), kw.clientset, ked.KubeNamespace, pod, metav1.CreateOptions{}) KubeAPIWrapperLock.Unlock() if err != nil { return err } select { case <-kw.GetContext().Done(): return fmt.Errorf("cancelled") default: } kw.UpdateFullStatus(func(status *StatusFileData) { status.State = WorkStatePending status.Detail = "Pod created" status.StdoutSize = 0 status.ExtraData.(*KubeExtraData).PodName = kw.pod.Name }) KubeAPIWrapperLock.Lock() // Wait for the pod to be running fieldSelector := KubeAPIWrapperInstance.OneTermEqualSelector("metadata.name", kw.pod.Name).String() KubeAPIWrapperLock.Unlock() lw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { options.FieldSelector = fieldSelector return KubeAPIWrapperInstance.List(kw.GetContext(), kw.clientset, ked.KubeNamespace, options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { options.FieldSelector = fieldSelector return KubeAPIWrapperInstance.Watch(kw.GetContext(), kw.clientset, ked.KubeNamespace, options) }, } ctxPodReady := kw.GetContext() if kw.podPendingTimeout != time.Duration(0) { var ctxPodCancel context.CancelFunc ctxPodReady, ctxPodCancel = context.WithTimeout(kw.GetContext(), kw.podPendingTimeout) defer ctxPodCancel() } time.Sleep(2 * time.Second) KubeAPIWrapperLock.Lock() ev, err := KubeAPIWrapperInstance.UntilWithSync(ctxPodReady, lw, &corev1.Pod{}, nil, podRunningAndReady()) KubeAPIWrapperLock.Unlock() if ev == nil || ev.Object == nil { return fmt.Errorf("did not return an event while watching pod for work unit %s", kw.ID()) } var ok bool kw.pod, ok = ev.Object.(*corev1.Pod) if !ok { return fmt.Errorf("watch did not return a pod") } if err == ErrPodCompleted { // Hao: shouldn't we also call kw.Cancel() in these cases? for _, cstat := range kw.pod.Status.ContainerStatuses { if cstat.Name == "worker" { if cstat.State.Terminated != nil && cstat.State.Terminated.ExitCode != 0 { return fmt.Errorf("container failed with exit code %d: %s", cstat.State.Terminated.ExitCode, cstat.State.Terminated.Message) } break } } return err } else if err != nil { // any other error besides ErrPodCompleted stdout, err2 := NewStdoutWriter(FileSystem{}, kw.UnitDir()) if err2 != nil { errMsg := fmt.Sprintf("Error opening stdout file: %s", err2) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) return fmt.Errorf(errMsg) //nolint:govet,staticcheck } var stdoutErr error var streamWait sync.WaitGroup streamWait.Add(1) go kw.kubeLoggingNoReconnect(&streamWait, stdout, &stdoutErr) streamWait.Wait() kw.Cancel() if len(kw.pod.Status.ContainerStatuses) == 1 { if kw.pod.Status.ContainerStatuses[0].State.Waiting != nil { return fmt.Errorf("%s, %s", err.Error(), kw.pod.Status.ContainerStatuses[0].State.Waiting.Reason) } for _, cstat := range kw.pod.Status.ContainerStatuses { if cstat.Name == "worker" { if cstat.State.Waiting != nil { return fmt.Errorf("%s, %s", err.Error(), cstat.State.Waiting.Reason) } if cstat.State.Terminated != nil && cstat.State.Terminated.ExitCode != 0 { return fmt.Errorf("%s, exit code %d: %s", err.Error(), cstat.State.Terminated.ExitCode, cstat.State.Terminated.Message) } break } } } return err } return nil } func (kw *KubeUnit) runWorkUsingLogger() { skipStdin := true status := kw.Status() ked := status.ExtraData.(*KubeExtraData) podName := ked.PodName podNamespace := ked.KubeNamespace if podName == "" { // create new pod if ked.PodName is empty // TODO: add retry logic to make this more resilient to transient errors if err := kw.CreatePod(nil); err != nil { if err != ErrPodCompleted { errMsg := fmt.Sprintf("Error creating pod: %s", err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) return } } else { // for newly created pod we need to stream stdin skipStdin = false } podName = kw.pod.Name podNamespace = kw.pod.Namespace } else { if podNamespace == "" { errMsg := fmt.Sprintf("Error creating pod: pod namespace is empty for pod %s", podName, ) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) return } // resuming from a previously created pod var err error for retries := 5; retries > 0; retries-- { // check if the kw.ctx is already cancel select { case <-kw.GetContext().Done(): errMsg := fmt.Sprintf("Context Done while getting pod %s/%s. Error: %s", podNamespace, podName, kw.GetContext().Err()) kw.GetWorkceptor().nc.GetLogger().Warning(errMsg) //nolint:govet return default: } KubeAPIWrapperLock.Lock() kw.pod, err = KubeAPIWrapperInstance.Get(kw.GetContext(), kw.clientset, podNamespace, podName, metav1.GetOptions{}) KubeAPIWrapperLock.Unlock() if err == nil { break } kw.GetWorkceptor().nc.GetLogger().Warning( "Error getting pod %s/%s. Will retry %d more times. Retrying: %s", podNamespace, podName, retries, err, ) time.Sleep(200 * time.Millisecond) } if err != nil { errMsg := fmt.Sprintf("Error getting pod %s/%s. Error: %s", podNamespace, podName, err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) return } } // Attach stdin stream to the pod var exec remotecommand.Executor if !skipStdin { KubeAPIWrapperLock.Lock() req := KubeAPIWrapperInstance.SubResource(kw.clientset, podName, podNamespace) KubeAPIWrapperLock.Unlock() req.VersionedParams( &corev1.PodExecOptions{ Container: "worker", Stdin: true, Stdout: false, Stderr: false, TTY: false, }, scheme.ParameterCodec, ) var err error KubeAPIWrapperLock.Lock() exec, err = KubeAPIWrapperInstance.NewSPDYExecutor(kw.config, "POST", req.URL()) KubeAPIWrapperLock.Unlock() if err != nil { errMsg := fmt.Sprintf("Error creating SPDY executor: %s", err) kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) return } } var stdinErr error var stdoutErr error // finishedChan signal the stdin and stdout monitoring goroutine to stop finishedChan := make(chan struct{}) // this will signal the stdin and stdout monitoring goroutine to stop when this function returns defer close(finishedChan) stdinErrChan := make(chan struct{}) // signal that stdin goroutine have errored and stop stdout goroutine // open stdin reader that reads from the work unit's data directory var stdin *STDinReader if !skipStdin { var err error stdin, err = NewStdinReader(FileSystem{}, kw.UnitDir()) if err != nil { if errors.Is(err, errFileSizeZero) { skipStdin = true } else { errMsg := fmt.Sprintf("Error opening stdin file: %s", err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) return } } else { // goroutine to cancel stdin reader go func() { select { case <-kw.GetContext().Done(): stdin.reader.Close() return case <-finishedChan: case <-stdin.Done(): return } }() } } // open stdout writer that writes to work unit's data directory stdout, err := NewStdoutWriter(FileSystem{}, kw.UnitDir()) if err != nil { errMsg := fmt.Sprintf("Error opening stdout file: %s", err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) return } // goroutine to cancel stdout stream go func() { select { case <-kw.GetContext().Done(): stdout.writer.Close() return case <-stdinErrChan: stdout.writer.Close() return case <-finishedChan: return } }() streamWait := sync.WaitGroup{} streamWait.Add(2) if skipStdin { kw.UpdateBasicStatus(WorkStateRunning, "Pod Running", stdout.Size()) streamWait.Done() } else { go func() { defer streamWait.Done() kw.UpdateFullStatus(func(status *StatusFileData) { status.State = WorkStatePending status.Detail = "Sending stdin to pod" }) var err error for retries := 5; retries > 0; retries-- { KubeAPIWrapperLock.Lock() err = KubeAPIWrapperInstance.StreamWithContext(kw.GetContext(), exec, remotecommand.StreamOptions{ Stdin: stdin, Tty: false, }) KubeAPIWrapperLock.Unlock() if err != nil { // NOTE: io.EOF for stdin is handled by remotecommand and will not trigger this kw.GetWorkceptor().nc.GetLogger().Warning( "Error streaming stdin to pod %s/%s. Will retry %d more times. Error: %s", podNamespace, podName, retries, err, ) time.Sleep(200 * time.Millisecond) } else { break } } if err != nil { stdinErr = err errMsg := fmt.Sprintf( "Error streaming stdin to pod %s/%s. Error: %s", podNamespace, podName, err, ) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, stdout.Size()) close(stdinErrChan) // signal STDOUT goroutine to stop } else { if stdin.Error() == io.EOF { kw.UpdateBasicStatus(WorkStateRunning, "Pod Running", stdout.Size()) } else { // this is probably not possible... errMsg := fmt.Sprintf("Error reading stdin: %s", stdin.Error()) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.GetWorkceptor().nc.GetLogger().Error("Pod status at time of error %s", kw.pod.Status.String()) kw.UpdateBasicStatus(WorkStateFailed, errMsg, stdout.Size()) close(stdinErrChan) // signal STDOUT goroutine to stop } } }() } stdoutWithReconnect := ShouldUseReconnect(kw) if stdoutWithReconnect && stdoutErr == nil { kw.GetWorkceptor().nc.GetLogger().Debug("streaming stdout with reconnect support") go kw.KubeLoggingWithReconnect(&streamWait, stdout, &stdinErr, &stdoutErr) } else { kw.GetWorkceptor().nc.GetLogger().Debug("streaming stdout with no reconnect support") go kw.kubeLoggingNoReconnect(&streamWait, stdout, &stdoutErr) } streamWait.Wait() if stdinErr != nil || stdoutErr != nil { var errDetail string switch { case stdinErr == nil: errDetail = fmt.Sprintf("Error with pod's stdout: %s", stdoutErr) case stdoutErr == nil: errDetail = fmt.Sprintf("Error with pod's stdin: %s", stdinErr) default: errDetail = fmt.Sprintf("Error running pod. stdin: %s, stdout: %s", stdinErr, stdoutErr) } if kw.GetContext().Err() != context.Canceled { kw.UpdateBasicStatus(WorkStateFailed, errDetail, stdout.Size()) } return } // only transition from WorkStateRunning to WorkStateSucceeded if WorkStateFailed is set we do not override if kw.GetContext().Err() != context.Canceled && kw.Status().State == WorkStateRunning { kw.UpdateBasicStatus(WorkStateSucceeded, "Finished", stdout.Size()) } } func IsCompatibleK8S(kw *KubeUnit, versionStr string) bool { semver, err := version.ParseSemantic(versionStr) if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("could parse Kubernetes server version %s, will not use reconnect support", versionStr) return false } // ignore pre-release in version comparison semver = semver.WithPreRelease("") // The patch was backported to minor version 23, 24 and 25 // We check z stream based on the minor version // if minor versions < 23, set to high value (e.g. v1.22.9999) // if minor versions == 23, compare with v1.23.14 // if minor version == 24, compare with v1.24.8 // if minor version == 25, compare with v1.25.4 // if minor versions > 23, compare with low value (e.g. v1.26.0) var compatibleVer string switch { case semver.Minor() == 23: compatibleVer = "v1.23.14" case semver.Minor() == 24: compatibleVer = "v1.24.8" case semver.Minor() == 25: compatibleVer = "v1.25.4" case semver.Minor() > 25: compatibleVer = fmt.Sprintf("%d.%d.0", semver.Major(), semver.Minor()) default: compatibleVer = fmt.Sprintf("%d.%d.9999", semver.Major(), semver.Minor()) } if semver.AtLeast(version.MustParseSemantic(compatibleVer)) { kw.GetWorkceptor().nc.GetLogger().Debug("Kubernetes version %s is at least %s, using reconnect support", semver, compatibleVer) return true } kw.GetWorkceptor().nc.GetLogger().Debug("Kubernetes version %s not at least %s, not using reconnect support", semver, compatibleVer) return false } func ShouldUseReconnect(kw *KubeUnit) bool { // Support for streaming from pod with timestamps using reconnect method is in all current versions // Can override the detection by setting the RECEPTOR_KUBE_SUPPORT_RECONNECT // accepted values: "enabled", "disabled", "auto". The default is "enabled" // all invalid values will assume to be "disabled" version := viper.GetInt("version") var env string ok := false switch version { case 2: env = viper.GetString("node.ReceptorKubeSupportReconnect") if env != "" { ok = true } default: env, ok = os.LookupEnv("RECEPTOR_KUBE_SUPPORT_RECONNECT") } if ok { switch env { case "enabled": return true case "disabled": return false case "auto": return true default: return false } } serverVerInfo, err := kw.clientset.ServerVersion() if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("could not detect Kubernetes server version, will not use reconnect support") return false } return IsCompatibleK8S(kw, serverVerInfo.String()) } func ParseTime(s string) *time.Time { t, err := time.Parse(time.RFC3339, s) if err == nil { return &t } t, err = time.Parse(time.RFC3339Nano, s) if err == nil { return &t } return nil } func getDefaultInterface() (string, error) { nifs, err := net.Interfaces() if err != nil { return "", err } for i := range nifs { nif := nifs[i] if nif.Flags&net.FlagUp != 0 && nif.Flags&net.FlagLoopback == 0 { ads, err := nif.Addrs() if err == nil && len(ads) > 0 { for j := range ads { ad := ads[j] ip, ok := ad.(*net.IPNet) if ok { if !ip.IP.IsLoopback() && !ip.IP.IsMulticast() { return ip.IP.String(), nil } } } } } } return "", fmt.Errorf("could not determine local address") } func (kw *KubeUnit) runWorkUsingTCP() { // Create local cancellable context ctx, cancel := kw.GetContext(), kw.GetCancel() defer cancel() // Create the TCP listener lc := net.ListenConfig{} defaultInterfaceIP, err := getDefaultInterface() var li net.Listener if err == nil { li, err = lc.Listen(ctx, "tcp", fmt.Sprintf("%s:", defaultInterfaceIP)) } if ctx.Err() != nil { return } var listenHost, listenPort string if err == nil { listenHost, listenPort, err = net.SplitHostPort(li.Addr().String()) } if err != nil { errMsg := fmt.Sprintf("Error listening: %s", err) kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet return } // Wait for a single incoming connection connChan := make(chan *net.TCPConn) go func() { conn, err := li.Accept() lcerr := li.Close() if lcerr != nil { errMsg := fmt.Sprintf("Error closing listener: %+v", lcerr) kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet cancel() return } if ctx.Err() != nil { return } var tcpConn *net.TCPConn if err == nil { var ok bool tcpConn, ok = conn.(*net.TCPConn) if !ok { err = fmt.Errorf("connection was not a TCPConn") } } if err != nil { errMsg := fmt.Sprintf("Error accepting: %s", err) kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet cancel() return } connChan <- tcpConn }() // Create the pod err = kw.CreatePod(map[string]string{"RECEPTOR_HOST": listenHost, "RECEPTOR_PORT": listenPort}) if err != nil { errMsg := fmt.Sprintf("Error creating pod: %s", err) kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet cancel() return } // Wait for the pod to connect back to us var conn *net.TCPConn select { case <-ctx.Done(): return case conn = <-connChan: } // Open stdin reader var stdin *STDinReader stdin, err = NewStdinReader(FileSystem{}, kw.UnitDir()) if err != nil { errMsg := fmt.Sprintf("Error opening stdin file: %s", err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) cancel() return } // Open stdout writer stdout, err := NewStdoutWriter(FileSystem{}, kw.UnitDir()) if err != nil { errMsg := fmt.Sprintf("Error opening stdout file: %s", err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) cancel() return } kw.UpdateBasicStatus(WorkStatePending, "Sending stdin to pod", 0) // Write stdin to pod go func() { _, err := io.Copy(conn, stdin) if ctx.Err() != nil { return } cwerr := conn.CloseWrite() if cwerr != nil { errMsg := fmt.Sprintf("Error closing writing side: %+v", cwerr) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) cancel() return } if err != nil { errMsg := fmt.Sprintf("Error sending stdin to pod: %s", err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) cancel() return } }() // Goroutine to update status when stdin is fully sent to the pod, which is when we // update from WorkStatePending to WorkStateRunning. go func() { select { case <-ctx.Done(): return case <-stdin.Done(): err := stdin.Error() if err == io.EOF { kw.UpdateBasicStatus(WorkStateRunning, "Pod Running", stdout.Size()) } else { kw.UpdateBasicStatus(WorkStateFailed, fmt.Sprintf("Error reading stdin: %s", err), stdout.Size()) cancel() } } }() // Read stdout from pod _, err = io.Copy(stdout, conn) if ctx.Err() != nil { return } if err != nil { errMsg := fmt.Sprintf("Error reading stdout from pod: %s", err) kw.GetWorkceptor().nc.GetLogger().Error(errMsg) //nolint:govet kw.UpdateBasicStatus(WorkStateFailed, errMsg, 0) cancel() return } if ctx.Err() == nil { kw.UpdateBasicStatus(WorkStateSucceeded, "Finished", stdout.Size()) } } func (kw *KubeUnit) connectUsingKubeconfig() error { var err error ked := kw.UnredactedStatus().ExtraData.(*KubeExtraData) if ked.KubeConfig == "" { KubeAPIWrapperLock.Lock() clr := KubeAPIWrapperInstance.NewDefaultClientConfigLoadingRules() KubeAPIWrapperLock.Unlock() KubeAPIWrapperLock.Lock() kw.config, err = KubeAPIWrapperInstance.BuildConfigFromFlags("", clr.GetDefaultFilename()) KubeAPIWrapperLock.Unlock() if ked.KubeNamespace == "" { c, err := clr.Load() if err != nil { return err } curContext, ok := c.Contexts[c.CurrentContext] if ok && curContext != nil { kw.UpdateFullStatus(func(sfd *StatusFileData) { sfd.ExtraData.(*KubeExtraData).KubeNamespace = curContext.Namespace }) } else { return fmt.Errorf("could not determine namespace") } } } else { KubeAPIWrapperLock.Lock() cfg, err := KubeAPIWrapperInstance.NewClientConfigFromBytes([]byte(ked.KubeConfig)) KubeAPIWrapperLock.Unlock() if err != nil { return err } if ked.KubeNamespace == "" { namespace, _, err := cfg.Namespace() if err != nil { return err } kw.UpdateFullStatus(func(sfd *StatusFileData) { sfd.ExtraData.(*KubeExtraData).KubeNamespace = namespace }) } kw.config, err = cfg.ClientConfig() if err != nil { return err } } if err != nil { return err } return nil } func (kw *KubeUnit) connectUsingIncluster() error { var err error KubeAPIWrapperLock.Lock() kw.config, err = KubeAPIWrapperInstance.InClusterConfig() KubeAPIWrapperLock.Unlock() if err != nil { return err } return nil } func (kw *KubeUnit) connectToKube() error { var err error switch { case kw.authMethod == "kubeconfig" || kw.authMethod == "runtime": err = kw.connectUsingKubeconfig() case kw.authMethod == "incluster": err = kw.connectUsingIncluster() default: return fmt.Errorf("unknown auth method %s", kw.authMethod) } if err != nil { return err } kw.config.QPS = float32(100) kw.config.Burst = 1000 // RECEPTOR_KUBE_CLIENTSET_QPS // default: 100 version := viper.GetInt("version") var envQPS string ok := false switch version { case 2: envQPS = viper.GetString("node.ReceptorKubeClientsetQPS") if envQPS != "" { ok = true } default: envQPS, ok = os.LookupEnv("RECEPTOR_KUBE_CLIENTSET_QPS") } if ok { qps, err := strconv.Atoi(envQPS) if err != nil { // ignore error, use default kw.GetWorkceptor().nc.GetLogger().Warning("Invalid value for RECEPTOR_KUBE_CLIENTSET_QPS: %s. Ignoring", envQPS) } else { kw.config.QPS = float32(qps) kw.config.Burst = qps * 10 } } kw.GetWorkceptor().nc.GetLogger().Debug("RECEPTOR_KUBE_CLIENTSET_QPS: %s", envQPS) // RECEPTOR_KUBE_CLIENTSET_BURST // default: 10 x QPS var envBurst string switch version { case 2: envBurst = viper.GetString("node.ReceptorKubeClientsetBurst") if envBurst != "" { ok = true } default: envBurst, ok = os.LookupEnv("RECEPTOR_KUBE_CLIENTSET_BURST") } if ok { burst, err := strconv.Atoi(envBurst) if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("Invalid value for RECEPTOR_KUBE_CLIENTSET_BURST: %s. Ignoring", envQPS) } else { kw.config.Burst = burst } } kw.GetWorkceptor().nc.GetLogger().Debug("RECEPTOR_KUBE_CLIENTSET_BURST: %s", envBurst) kw.GetWorkceptor().nc.GetLogger().Debug("Initializing Kubernetes clientset") // RECEPTOR_KUBE_CLIENTSET_RATE_LIMITER // default: tokenbucket // options: never, always, tokenbucket var envRateLimiter string switch version { case 2: envRateLimiter = viper.GetString("node.ReceptorKubeClientsetRateLimiter") if envRateLimiter != "" { ok = true } default: envRateLimiter, ok = os.LookupEnv("RECEPTOR_KUBE_CLIENTSET_RATE_LIMITER") } if ok { switch envRateLimiter { case "never": KubeAPIWrapperLock.Lock() kw.config.RateLimiter = KubeAPIWrapperInstance.NewFakeNeverRateLimiter() KubeAPIWrapperLock.Unlock() case "always": KubeAPIWrapperLock.Lock() kw.config.RateLimiter = KubeAPIWrapperInstance.NewFakeAlwaysRateLimiter() KubeAPIWrapperLock.Unlock() default: } kw.GetWorkceptor().nc.GetLogger().Debug("RateLimiter: %s", envRateLimiter) } kw.GetWorkceptor().nc.GetLogger().Debug("QPS: %f, Burst: %d", kw.config.QPS, kw.config.Burst) KubeAPIWrapperLock.Lock() kw.clientset, err = KubeAPIWrapperInstance.NewForConfig(kw.config) KubeAPIWrapperLock.Unlock() if err != nil { return err } return nil } func readFileToString(filename string) (string, error) { // If filename is "", the function returns "" if filename == "" { return "", nil } content, err := os.ReadFile(filename) if err != nil { return "", err } return string(content), nil } // SetFromParams sets the in-memory state from parameters. func (kw *KubeUnit) SetFromParams(params map[string]string) error { ked := kw.GetStatusCopy().ExtraData.(*KubeExtraData) type value struct { name string permission bool setter func(string) error } setString := func(target *string) func(string) error { ssf := func(value string) error { *target = value return nil } return ssf } var err error ked.KubePod, err = readFileToString(ked.KubePod) if err != nil { return fmt.Errorf("could not read pod: %s", err) } ked.KubeConfig, err = readFileToString(ked.KubeConfig) if err != nil { return fmt.Errorf("could not read kubeconfig: %s", err) } userParams := "" userCommand := "" userImage := "" userPod := "" podPendingTimeoutString := "" values := []value{ {name: "kube_command", permission: kw.allowRuntimeCommand, setter: setString(&userCommand)}, {name: "kube_image", permission: kw.allowRuntimeCommand, setter: setString(&userImage)}, {name: "kube_params", permission: kw.allowRuntimeParams, setter: setString(&userParams)}, {name: "kube_namespace", permission: kw.allowRuntimeAuth, setter: setString(&ked.KubeNamespace)}, {name: "secret_kube_config", permission: kw.allowRuntimeAuth, setter: setString(&ked.KubeConfig)}, {name: "secret_kube_pod", permission: kw.allowRuntimePod, setter: setString(&userPod)}, {name: "pod_pending_timeout", permission: kw.allowRuntimeParams, setter: setString(&podPendingTimeoutString)}, } for i := range values { v := values[i] value, ok := params[v.name] if ok && value != "" { if !v.permission { return fmt.Errorf("%s provided but not allowed", v.name) } err := v.setter(value) if err != nil { return fmt.Errorf("error setting value for %s: %s", v.name, err) } } } if kw.authMethod == "runtime" && ked.KubeConfig == "" { return fmt.Errorf("param secret_kube_config must be provided if AuthMethod=runtime") } if userPod != "" && (userParams != "" || userCommand != "" || userImage != "") { return fmt.Errorf("params kube_command, kube_image, kube_params not compatible with secret_kube_pod") } if podPendingTimeoutString != "" { podPendingTimeout, err := time.ParseDuration(podPendingTimeoutString) if err != nil { kw.GetWorkceptor().nc.GetLogger().Error("Failed to parse pod_pending_timeout -- valid examples include '1.5h', '30m', '30m10s'") return err } kw.podPendingTimeout = podPendingTimeout } if userCommand != "" { ked.Command = userCommand } if userImage != "" { ked.Image = userImage } if userPod != "" { ked.KubePod = userPod ked.Image = "" ked.Command = "" kw.baseParams = "" } else { ked.Params = combineParams(kw.baseParams, userParams) } return nil } // Status returns a copy of the status currently loaded in memory. func (kw *KubeUnit) Status() *StatusFileData { status := kw.UnredactedStatus() ed, ok := status.ExtraData.(*KubeExtraData) if ok { ed.KubeConfig = "" ed.KubePod = "" } return status } // Status returns a copy of the status currently loaded in memory. func (kw *KubeUnit) UnredactedStatus() *StatusFileData { kw.GetStatusLock().RLock() status := kw.GetStatusWithoutExtraData() ked, ok := kw.GetStatusCopy().ExtraData.(*KubeExtraData) if ok { kedCopy := *ked status.ExtraData = &kedCopy } kw.GetStatusLock().RUnlock() return status } // startOrRestart is a shared implementation of Start() and Restart(). func (kw *KubeUnit) startOrRestart() error { // Connect to the Kubernetes API if err := kw.connectToKube(); err != nil { return err } // Launch runner process if kw.streamMethod == "tcp" { go kw.runWorkUsingTCP() } else { go kw.runWorkUsingLogger() } go kw.MonitorLocalStatus() return nil } // Restart resumes monitoring a job after a Receptor restart. func (kw *KubeUnit) Restart() error { status := kw.Status() ked := status.ExtraData.(*KubeExtraData) if IsComplete(status.State) { return nil } isTCP := kw.streamMethod == "tcp" if status.State == WorkStateRunning && !isTCP { return kw.startOrRestart() } // Work unit is in Pending state if kw.deletePodOnRestart { err := kw.connectToKube() if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("Pod %s could not be deleted: %s", ked.PodName, err.Error()) } else { KubeAPIWrapperLock.Lock() err := KubeAPIWrapperInstance.Delete(context.Background(), kw.clientset, ked.KubeNamespace, ked.PodName, metav1.DeleteOptions{}) KubeAPIWrapperLock.Unlock() if err != nil { kw.GetWorkceptor().nc.GetLogger().Warning("Pod %s could not be deleted: %s", ked.PodName, err.Error()) } } } if isTCP { return fmt.Errorf("restart not implemented for streammethod tcp") } return fmt.Errorf("work unit is not in running state, cannot be restarted") } // Start launches a job with given parameters. func (kw *KubeUnit) Start() error { kw.UpdateBasicStatus(WorkStatePending, "Connecting to Kubernetes", 0) return kw.startOrRestart() } // Cancel releases resources associated with a job, including cancelling it if running. func (kw *KubeUnit) Cancel() error { kw.CancelContext() kw.UpdateBasicStatus(WorkStateCanceled, "Canceled", -1) if kw.pod != nil { KubeAPIWrapperLock.Lock() err := KubeAPIWrapperInstance.Delete(context.Background(), kw.clientset, kw.pod.Namespace, kw.pod.Name, metav1.DeleteOptions{}) KubeAPIWrapperLock.Unlock() if err != nil { kw.GetWorkceptor().nc.GetLogger().Error("Error deleting pod %s: %s", kw.pod.Name, err) } } if kw.GetCancel() != nil { kw.CancelContext() } return nil } // Release releases resources associated with a job. Implies Cancel. func (kw *KubeUnit) Release(force bool) error { err := kw.Cancel() if err != nil && !force { return err } return kw.BaseWorkUnitForWorkUnit.Release(force) } // ************************************************************************** // Command line // ************************************************************************** // KubeWorkerCfg is the cmdline configuration object for a Kubernetes worker plugin. type KubeWorkerCfg struct { WorkType string `required:"true" description:"Name for this worker type"` Namespace string `description:"Kubernetes namespace to create pods in"` Image string `description:"Container image to use for the worker pod"` Command string `description:"Command to run in the container (overrides entrypoint)"` Params string `description:"Command-line parameters to pass to the entrypoint"` AuthMethod string `description:"One of: kubeconfig, incluster" default:"incluster"` KubeConfig string `description:"Kubeconfig filename (for authmethod=kubeconfig)"` Pod string `description:"Pod definition filename, in json or yaml format"` AllowRuntimeAuth bool `description:"Allow passing API parameters at runtime" default:"false"` AllowRuntimeCommand bool `description:"Allow specifying image & command at runtime" default:"false"` AllowRuntimeParams bool `description:"Allow adding command parameters at runtime" default:"false"` AllowRuntimePod bool `description:"Allow passing Pod at runtime" default:"false"` DeletePodOnRestart bool `description:"On restart, delete the pod if in pending state" default:"true"` StreamMethod string `description:"Method for connecting to worker pods: logger or tcp" default:"logger"` VerifySignature bool `description:"Verify a signed work submission" default:"false"` } // NewWorker is a factory to produce worker instances. func (cfg KubeWorkerCfg) NewWorker(bwu BaseWorkUnitForWorkUnit, w *Workceptor, unitID string, workType string) WorkUnit { return cfg.NewkubeWorker(bwu, w, unitID, workType, nil) } func (cfg KubeWorkerCfg) NewkubeWorker(bwu BaseWorkUnitForWorkUnit, w *Workceptor, unitID string, workType string, kawi KubeAPIer) WorkUnit { if bwu == nil { bwu = &BaseWorkUnit{ status: StatusFileData{ ExtraData: &KubeExtraData{ Image: cfg.Image, Command: cfg.Command, KubeNamespace: cfg.Namespace, KubePod: cfg.Pod, KubeConfig: cfg.KubeConfig, }, }, } } KubeAPIWrapperLock.Lock() if kawi != nil { KubeAPIWrapperInstance = kawi } else { KubeAPIWrapperInstance = KubeAPIWrapper{} } KubeAPIWrapperLock.Unlock() ku := &KubeUnit{ BaseWorkUnitForWorkUnit: bwu, authMethod: strings.ToLower(cfg.AuthMethod), streamMethod: strings.ToLower(cfg.StreamMethod), baseParams: cfg.Params, allowRuntimeAuth: cfg.AllowRuntimeAuth, allowRuntimeCommand: cfg.AllowRuntimeCommand, allowRuntimeParams: cfg.AllowRuntimeParams, allowRuntimePod: cfg.AllowRuntimePod, deletePodOnRestart: cfg.DeletePodOnRestart, namePrefix: fmt.Sprintf("%s-", strings.ToLower(cfg.WorkType)), } ku.BaseWorkUnitForWorkUnit.Init(w, unitID, workType, FileSystem{}, nil) return ku } // Prepare inspects the configuration for validity. func (cfg KubeWorkerCfg) Prepare() error { lcAuth := strings.ToLower(cfg.AuthMethod) if lcAuth != "kubeconfig" && lcAuth != "incluster" && lcAuth != "runtime" { return fmt.Errorf("invalid AuthMethod: %s", cfg.AuthMethod) } if cfg.Namespace == "" && !(lcAuth == "kubeconfig" || cfg.AllowRuntimeAuth) { return fmt.Errorf("must provide namespace when AuthMethod is not kubeconfig") } if cfg.KubeConfig != "" { if lcAuth != "kubeconfig" { return fmt.Errorf("can only provide KubeConfig when AuthMethod=kubeconfig") } _, err := os.Stat(cfg.KubeConfig) if err != nil { return fmt.Errorf("error accessing kubeconfig file: %s", err) } } if cfg.Pod != "" && (cfg.Image != "" || cfg.Command != "" || cfg.Params != "") { return fmt.Errorf("can only provide Pod when Image, Command, and Params are empty") } if cfg.Pod == "" && cfg.Image == "" && !cfg.AllowRuntimeCommand && !cfg.AllowRuntimePod { return fmt.Errorf("must specify a container image to run") } method := strings.ToLower(cfg.StreamMethod) if method != "logger" && method != "tcp" { return fmt.Errorf("stream mode must be logger or tcp") } return nil } func (cfg KubeWorkerCfg) GetWorkType() string { return cfg.WorkType } func (cfg KubeWorkerCfg) GetVerifySignature() bool { return cfg.VerifySignature } // Run runs the action. func (cfg KubeWorkerCfg) Run() error { err := MainInstance.RegisterWorker(cfg.WorkType, cfg.NewWorker, cfg.VerifySignature) return err } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-workers", "work-kubernetes", "Run a worker using Kubernetes", KubeWorkerCfg{}, cmdline.Section(workersSection)) } receptor-1.5.3/pkg/workceptor/kubernetes_test.go000066400000000000000000000357551475465740700221220ustar00rootroot00000000000000package workceptor_test import ( "context" "io" "net/http" "os" "reflect" "strings" "sync" "testing" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/pkg/workceptor/mock_workceptor" "go.uber.org/mock/gomock" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" fakerest "k8s.io/client-go/rest/fake" "k8s.io/client-go/tools/remotecommand" ) func startNetceptorNodeWithWorkceptor() (*workceptor.KubeUnit, error) { kw := &workceptor.KubeUnit{ BaseWorkUnitForWorkUnit: &workceptor.BaseWorkUnit{}, } // Create Netceptor node using external backends n1 := netceptor.New(context.Background(), "node1") b1, err := netceptor.NewExternalBackend() if err != nil { return kw, err } err = n1.AddBackend(b1) if err != nil { return kw, err } w, err := workceptor.New(context.Background(), n1, "") if err != nil { return kw, err } kw.SetWorkceptor(w) return kw, nil } func TestShouldUseReconnect(t *testing.T) { const envVariable string = "RECEPTOR_KUBE_SUPPORT_RECONNECT" kw, err := startNetceptorNodeWithWorkceptor() if err != nil { t.Fatal(err) } tests := []struct { name string envValue string want bool }{ { name: "Enabled test", envValue: "enabled", want: true, }, { name: "Disabled test", envValue: "disabled", want: false, }, { name: "Auto test", envValue: "auto", want: true, }, { name: "Default test", envValue: "default", want: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if tt.envValue != "" { os.Setenv(envVariable, tt.envValue) defer os.Unsetenv(envVariable) } else { os.Unsetenv(envVariable) } if got := workceptor.ShouldUseReconnect(kw); got != tt.want { t.Errorf("shouldUseReconnect() = %v, want %v", got, tt.want) } }) } } func TestGetTimeoutOpenLogstream(t *testing.T) { const envVariable string = "RECEPTOR_OPEN_LOGSTREAM_TIMEOUT" kw, err := startNetceptorNodeWithWorkceptor() if err != nil { t.Fatal(err) } tests := []struct { name string envValue string want int }{ { name: "No env value set", envValue: "", want: 1, }, { name: "Env value set incorrectly to text", envValue: "text instead of int", want: 1, }, { name: "Env value set incorrectly to negative", envValue: "-1", want: 1, }, { name: "Env value set incorrectly to zero", envValue: "0", want: 1, }, { name: "Env value set correctly", envValue: "2", want: 2, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if tt.envValue != "" { os.Setenv(envVariable, tt.envValue) defer os.Unsetenv(envVariable) } else { os.Unsetenv(envVariable) } if got := workceptor.GetTimeoutOpenLogstream(kw); got != tt.want { t.Errorf("GetTimeoutOpenLogstream() = %v, want %v", got, tt.want) } }) } } func TestParseTime(t *testing.T) { type args struct { s string } desiredTimeString := "2024-01-17T00:00:00Z" desiredTime, _ := time.Parse(time.RFC3339, desiredTimeString) tests := []struct { name string args args want *time.Time wantErr bool }{ { name: "Positive test", args: args{ s: desiredTimeString, }, want: &desiredTime, }, { name: "Error test", args: args{ s: "Invalid time", }, want: nil, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { if got := workceptor.ParseTime(tt.args.s); !reflect.DeepEqual(got, tt.want) { t.Errorf("parseTime() = %v, want %v", got, tt.want) } }) } } func createKubernetesTestSetup(t *testing.T) (workceptor.WorkUnit, *mock_workceptor.MockBaseWorkUnitForWorkUnit, *mock_workceptor.MockNetceptorForWorkceptor, *workceptor.Workceptor, *mock_workceptor.MockKubeAPIer, *gomock.Controller, context.Context) { ctrl := gomock.NewController(t) ctx := context.Background() mockBaseWorkUnit := mock_workceptor.NewMockBaseWorkUnitForWorkUnit(ctrl) mockNetceptor := mock_workceptor.NewMockNetceptorForWorkceptor(ctrl) mockNetceptor.EXPECT().NodeID().Return("NodeID") mockKubeAPI := mock_workceptor.NewMockKubeAPIer(ctrl) w, err := workceptor.New(ctx, mockNetceptor, "/tmp") if err != nil { t.Errorf("Error while creating Workceptor: %v", err) } mockBaseWorkUnit.EXPECT().Init(w, "", "", workceptor.FileSystem{}, nil) kubeConfig := workceptor.KubeWorkerCfg{AuthMethod: "incluster"} ku := kubeConfig.NewkubeWorker(mockBaseWorkUnit, w, "", "", mockKubeAPI) return ku, mockBaseWorkUnit, mockNetceptor, w, mockKubeAPI, ctrl, ctx } type hasTerm struct { field, value string } func (h *hasTerm) DeepCopySelector() fields.Selector { return h } func (h *hasTerm) Empty() bool { return true } func (h *hasTerm) Matches(_ fields.Fields) bool { return true } func (h *hasTerm) Requirements() fields.Requirements { return []fields.Requirement{{ Field: h.field, Operator: selection.Equals, Value: h.value, }} } func (h *hasTerm) RequiresExactMatch(_ string) (value string, found bool) { return "", true } func (h *hasTerm) String() string { return "Test" } func (h *hasTerm) Transform(_ fields.TransformFunc) (fields.Selector, error) { return h, nil } type ex struct{} func (e *ex) Stream(_ remotecommand.StreamOptions) error { return nil } func (e *ex) StreamWithContext(_ context.Context, _ remotecommand.StreamOptions) error { return nil } func TestKubeStart(t *testing.T) { ku, mockbwu, mockNet, w, mockKubeAPI, _, ctx := createKubernetesTestSetup(t) startTestCases := []struct { name string expectedCalls func() }{ { name: "test1", expectedCalls: func() { mockbwu.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() config := rest.Config{} mockKubeAPI.EXPECT().InClusterConfig().Return(&config, nil) mockbwu.EXPECT().GetWorkceptor().Return(w).AnyTimes() logger := logger.NewReceptorLogger("") mockNet.EXPECT().GetLogger().Return(logger).AnyTimes() clientset := kubernetes.Clientset{} mockKubeAPI.EXPECT().NewForConfig(gomock.Any()).Return(&clientset, nil) mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() lock := &sync.RWMutex{} mockbwu.EXPECT().GetStatusLock().Return(lock).AnyTimes() kubeExtraData := workceptor.KubeExtraData{} status := workceptor.StatusFileData{ExtraData: &kubeExtraData} mockbwu.EXPECT().GetStatusWithoutExtraData().Return(&status).AnyTimes() mockbwu.EXPECT().GetStatusCopy().Return(status).AnyTimes() mockbwu.EXPECT().GetContext().Return(ctx).AnyTimes() pod := corev1.Pod{TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{Name: "Test Name"}, Spec: corev1.PodSpec{}, Status: corev1.PodStatus{}} mockKubeAPI.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&pod, nil).AnyTimes() mockbwu.EXPECT().UpdateFullStatus(gomock.Any()).AnyTimes() field := hasTerm{} mockKubeAPI.EXPECT().OneTermEqualSelector(gomock.Any(), gomock.Any()).Return(&field).AnyTimes() ev := watch.Event{Object: &pod} mockKubeAPI.EXPECT().UntilWithSync(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&ev, nil).AnyTimes() apierr := apierrors.StatusError{} mockKubeAPI.EXPECT().NewNotFound(gomock.Any(), gomock.Any()).Return(&apierr).AnyTimes() mockbwu.EXPECT().MonitorLocalStatus().AnyTimes() c := rest.RESTClient{} req := rest.NewRequest(&c) mockKubeAPI.EXPECT().SubResource(gomock.Any(), gomock.Any(), gomock.Any()).Return(req).AnyTimes() exec := ex{} mockKubeAPI.EXPECT().NewSPDYExecutor(gomock.Any(), gomock.Any(), gomock.Any()).Return(&exec, nil).AnyTimes() mockbwu.EXPECT().UnitDir().Return("TestDir").AnyTimes() }, }, } for _, testCase := range startTestCases { t.Run(testCase.name, func(t *testing.T) { testCase.expectedCalls() err := ku.Start() if err != nil { t.Error(err) } }) } } func Test_IsCompatibleK8S(t *testing.T) { type args struct { kw *workceptor.KubeUnit versionStr string } kw, err := startNetceptorNodeWithWorkceptor() if err != nil { t.Fatal(err) } tests := []struct { name string args args want bool }{ { name: "Kubernetes X stream negative test", args: args{ versionStr: "v0.0.0", }, want: false, }, { name: "Kubernetes Y stream negative test", args: args{ versionStr: "v1.22.9998", }, want: false, }, { name: "Kubernetes 1.23 Z stream negative test", args: args{ versionStr: "v1.23.13", }, want: false, }, { name: "Kubernetes 1.23 exact positive test", args: args{ versionStr: "v1.23.14", }, want: true, }, { name: "Kubernetes 1.23 Z stream positive test", args: args{ versionStr: "v1.23.15", }, want: true, }, { name: "Kubernetes 1.24 Z stream negative test", args: args{ versionStr: "v1.24.7", }, want: false, }, { name: "Kubernetes 1.24 exact positive test", args: args{ versionStr: "v1.24.8", }, want: true, }, { name: "Kubernetes 1.24 Z stream positive test", args: args{ versionStr: "v1.24.9", }, want: true, }, { name: "Kubernetes 1.25 Z stream negative test", args: args{ versionStr: "v1.25.3", }, want: false, }, { name: "Kuberentes 1.25 exact positive test", args: args{ versionStr: "v1.25.4", }, want: true, }, { name: "Kubernetes 1.25 Z stream positive test", args: args{ versionStr: "v1.25.99", }, want: true, }, { name: "Kubernetes Y stream positive test", args: args{ versionStr: "v1.26.0", }, want: true, }, { name: "Kubernetes X stream positive test 1", args: args{ versionStr: "v2.0.0", }, want: false, }, { name: "Kubernetes X stream positive test 2", args: args{ versionStr: "v2.23.14", }, want: true, }, { name: "Kubernetes X stream positive test 3", args: args{ versionStr: "v2.24.8", }, want: true, }, { name: "Kubernetes X stream positive test 4", args: args{ versionStr: "v2.25.4", }, want: true, }, { name: "Kubernetes X stream positive test 5", args: args{ versionStr: "v2.26.0", }, want: true, }, { name: "Missing Kubernetes version negative test", args: args{ versionStr: "yoloswag", }, want: false, }, { name: "Prerelease Kubernetes version positive test 1", args: args{ versionStr: "v1.32.14+sadfasdf", }, want: true, }, { name: "Prerelease Kubernetes version positive test 2", args: args{ versionStr: "v1.32.14-asdfasdf+12131", }, want: true, }, { name: "Prerelease Kubernetes version positive test 3", args: args{ versionStr: "v1.32.15-asdfasdf+12131", }, want: true, }, } for _, tt := range tests { tt.args.kw = kw t.Run(tt.name, func(t *testing.T) { if got := workceptor.IsCompatibleK8S(tt.args.kw, tt.args.versionStr); !reflect.DeepEqual(got, tt.want) { t.Errorf("IsCompatibleK8S() = %v, want %v", got, tt.want) } }) } } func TestKubeLoggingWithReconnect(t *testing.T) { var stdinErr error var stdoutErr error ku, mockBaseWorkUnit, mockNetceptor, w, mockKubeAPI, ctrl, ctx := createKubernetesTestSetup(t) kw := &workceptor.KubeUnit{ BaseWorkUnitForWorkUnit: mockBaseWorkUnit, } tests := []struct { name string expectedCalls func() }{ { name: "Kube error should be read", expectedCalls: func() { mockBaseWorkUnit.EXPECT().UpdateBasicStatus(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() config := rest.Config{} mockKubeAPI.EXPECT().InClusterConfig().Return(&config, nil) mockBaseWorkUnit.EXPECT().GetWorkceptor().Return(w).AnyTimes() clientset := kubernetes.Clientset{} mockKubeAPI.EXPECT().NewForConfig(gomock.Any()).Return(&clientset, nil) lock := &sync.RWMutex{} mockBaseWorkUnit.EXPECT().GetStatusLock().Return(lock).AnyTimes() mockBaseWorkUnit.EXPECT().MonitorLocalStatus().AnyTimes() mockBaseWorkUnit.EXPECT().UnitDir().Return("TestDir2").AnyTimes() kubeExtraData := workceptor.KubeExtraData{} status := workceptor.StatusFileData{ExtraData: &kubeExtraData} mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(&status).AnyTimes() mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(status).AnyTimes() mockBaseWorkUnit.EXPECT().GetContext().Return(ctx).AnyTimes() pod := corev1.Pod{TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{Name: "Test_Name"}, Spec: corev1.PodSpec{}, Status: corev1.PodStatus{}} mockKubeAPI.EXPECT().Create(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&pod, nil).AnyTimes() mockBaseWorkUnit.EXPECT().UpdateFullStatus(gomock.Any()).AnyTimes() field := hasTerm{} mockKubeAPI.EXPECT().OneTermEqualSelector(gomock.Any(), gomock.Any()).Return(&field).AnyTimes() ev := watch.Event{Object: &pod} mockKubeAPI.EXPECT().UntilWithSync(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&ev, nil).AnyTimes() mockKubeAPI.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(&pod, nil).AnyTimes() req := fakerest.RESTClient{ Client: fakerest.CreateHTTPClient(func(request *http.Request) (*http.Response, error) { resp := &http.Response{ StatusCode: http.StatusOK, Body: io.NopCloser(strings.NewReader("2024-12-09T00:31:18.823849250Z HI\n kube error")), } return resp, nil }), NegotiatedSerializer: scheme.Codecs.WithoutConversion(), } mockKubeAPI.EXPECT().GetLogs(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(req.Request()).AnyTimes() logger := logger.NewReceptorLogger("") mockNetceptor.EXPECT().GetLogger().Return(logger).AnyTimes() mockKubeAPI.EXPECT().SubResource(gomock.Any(), gomock.Any(), gomock.Any()).Return(req.Request()).AnyTimes() exec := ex{} mockKubeAPI.EXPECT().NewSPDYExecutor(gomock.Any(), gomock.Any(), gomock.Any()).Return(&exec, nil).AnyTimes() }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.expectedCalls() ku.Start() time.Sleep(10 * time.Millisecond) kw.CreatePod(nil) wg := &sync.WaitGroup{} wg.Add(1) mockfilesystemer := mock_workceptor.NewMockFileSystemer(ctrl) mockfilesystemer.EXPECT().OpenFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(&os.File{}, nil) stdout, _ := workceptor.NewStdoutWriter(mockfilesystemer, "") mockFileWC := mock_workceptor.NewMockFileWriteCloser(ctrl) stdout.SetWriter(mockFileWC) mockFileWC.EXPECT().Write(gomock.AnyOf([]byte("HI\n"), []byte(" kube error\n"))).Return(0, nil).Times(2) kw.KubeLoggingWithReconnect(wg, stdout, &stdinErr, &stdoutErr) }) } } receptor-1.5.3/pkg/workceptor/lock_test.go000066400000000000000000000036251475465740700206720ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "context" "fmt" "os" "path" "strconv" "sync" "testing" "time" ) func TestStatusFileLock(t *testing.T) { numWriterThreads := 8 numReaderThreads := 8 baseWaitTime := 200 * time.Millisecond tmpdir, err := os.MkdirTemp(os.TempDir(), "receptor-test-*") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) statusFilename := path.Join(tmpdir, "status") startTime := time.Now() var totalWaitTime time.Duration wg := sync.WaitGroup{} wg.Add(numWriterThreads) for i := 0; i < numWriterThreads; i++ { waitTime := time.Duration(i) * baseWaitTime totalWaitTime += waitTime go func(iter int, waitTime time.Duration) { sfd := StatusFileData{} sfd.UpdateFullStatus(statusFilename, func(status *StatusFileData) { time.Sleep(waitTime) status.State = iter status.StdoutSize = int64(iter) status.Detail = fmt.Sprintf("%d", iter) }) wg.Done() }(i, waitTime) } ctx, cancel := context.WithCancel(context.Background()) wg2 := sync.WaitGroup{} wg2.Add(numReaderThreads) for i := 0; i < numReaderThreads; i++ { go func() { sfd := StatusFileData{} fileHasExisted := false for { if ctx.Err() != nil { wg2.Done() return } err := sfd.Load(statusFilename) if os.IsNotExist(err) && !fileHasExisted { continue } fileHasExisted = true if err != nil { t.Fatalf("Error loading status file: %s", err) } detailIter, err := strconv.Atoi(sfd.Detail) if err != nil { t.Fatalf("Error converting status detail to int: %s", err) } if detailIter >= 0 { if int64(sfd.State) != sfd.StdoutSize || sfd.State != detailIter { t.Fatal("Mismatched data in struct") } } } }() } wg.Wait() cancel() totalTime := time.Since(startTime) if totalTime < totalWaitTime { t.Fatal("File locks apparently not locking") } wg2.Wait() } receptor-1.5.3/pkg/workceptor/mock_workceptor/000077500000000000000000000000001475465740700215565ustar00rootroot00000000000000receptor-1.5.3/pkg/workceptor/mock_workceptor/baseworkunit.go000066400000000000000000000330261475465740700246260ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: github.com/ansible/receptor/pkg/workceptor (interfaces: BaseWorkUnitForWorkUnit) // // Generated by this command: // // mockgen . BaseWorkUnitForWorkUnit // // Package mock_workceptor is a generated GoMock package. package mock_workceptor import ( context "context" reflect "reflect" sync "sync" workceptor "github.com/ansible/receptor/pkg/workceptor" gomock "go.uber.org/mock/gomock" ) // MockBaseWorkUnitForWorkUnit is a mock of BaseWorkUnitForWorkUnit interface. type MockBaseWorkUnitForWorkUnit struct { ctrl *gomock.Controller recorder *MockBaseWorkUnitForWorkUnitMockRecorder isgomock struct{} } // MockBaseWorkUnitForWorkUnitMockRecorder is the mock recorder for MockBaseWorkUnitForWorkUnit. type MockBaseWorkUnitForWorkUnitMockRecorder struct { mock *MockBaseWorkUnitForWorkUnit } // NewMockBaseWorkUnitForWorkUnit creates a new mock instance. func NewMockBaseWorkUnitForWorkUnit(ctrl *gomock.Controller) *MockBaseWorkUnitForWorkUnit { mock := &MockBaseWorkUnitForWorkUnit{ctrl: ctrl} mock.recorder = &MockBaseWorkUnitForWorkUnitMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockBaseWorkUnitForWorkUnit) EXPECT() *MockBaseWorkUnitForWorkUnitMockRecorder { return m.recorder } // CancelContext mocks base method. func (m *MockBaseWorkUnitForWorkUnit) CancelContext() { m.ctrl.T.Helper() m.ctrl.Call(m, "CancelContext") } // CancelContext indicates an expected call of CancelContext. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) CancelContext() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelContext", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).CancelContext)) } // GetCancel mocks base method. func (m *MockBaseWorkUnitForWorkUnit) GetCancel() context.CancelFunc { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetCancel") ret0, _ := ret[0].(context.CancelFunc) return ret0 } // GetCancel indicates an expected call of GetCancel. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) GetCancel() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCancel", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).GetCancel)) } // GetContext mocks base method. func (m *MockBaseWorkUnitForWorkUnit) GetContext() context.Context { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetContext") ret0, _ := ret[0].(context.Context) return ret0 } // GetContext indicates an expected call of GetContext. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) GetContext() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContext", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).GetContext)) } // GetStatusCopy mocks base method. func (m *MockBaseWorkUnitForWorkUnit) GetStatusCopy() workceptor.StatusFileData { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetStatusCopy") ret0, _ := ret[0].(workceptor.StatusFileData) return ret0 } // GetStatusCopy indicates an expected call of GetStatusCopy. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) GetStatusCopy() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatusCopy", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).GetStatusCopy)) } // GetStatusLock mocks base method. func (m *MockBaseWorkUnitForWorkUnit) GetStatusLock() *sync.RWMutex { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetStatusLock") ret0, _ := ret[0].(*sync.RWMutex) return ret0 } // GetStatusLock indicates an expected call of GetStatusLock. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) GetStatusLock() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatusLock", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).GetStatusLock)) } // GetStatusWithoutExtraData mocks base method. func (m *MockBaseWorkUnitForWorkUnit) GetStatusWithoutExtraData() *workceptor.StatusFileData { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetStatusWithoutExtraData") ret0, _ := ret[0].(*workceptor.StatusFileData) return ret0 } // GetStatusWithoutExtraData indicates an expected call of GetStatusWithoutExtraData. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) GetStatusWithoutExtraData() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatusWithoutExtraData", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).GetStatusWithoutExtraData)) } // GetWorkceptor mocks base method. func (m *MockBaseWorkUnitForWorkUnit) GetWorkceptor() *workceptor.Workceptor { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetWorkceptor") ret0, _ := ret[0].(*workceptor.Workceptor) return ret0 } // GetWorkceptor indicates an expected call of GetWorkceptor. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) GetWorkceptor() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkceptor", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).GetWorkceptor)) } // ID mocks base method. func (m *MockBaseWorkUnitForWorkUnit) ID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ID") ret0, _ := ret[0].(string) return ret0 } // ID indicates an expected call of ID. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) ID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).ID)) } // Init mocks base method. func (m *MockBaseWorkUnitForWorkUnit) Init(w *workceptor.Workceptor, unitID, workType string, fs workceptor.FileSystemer, watcher workceptor.WatcherWrapper) { m.ctrl.T.Helper() m.ctrl.Call(m, "Init", w, unitID, workType, fs, watcher) } // Init indicates an expected call of Init. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) Init(w, unitID, workType, fs, watcher any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Init", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).Init), w, unitID, workType, fs, watcher) } // LastUpdateError mocks base method. func (m *MockBaseWorkUnitForWorkUnit) LastUpdateError() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastUpdateError") ret0, _ := ret[0].(error) return ret0 } // LastUpdateError indicates an expected call of LastUpdateError. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) LastUpdateError() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastUpdateError", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).LastUpdateError)) } // Load mocks base method. func (m *MockBaseWorkUnitForWorkUnit) Load() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Load") ret0, _ := ret[0].(error) return ret0 } // Load indicates an expected call of Load. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) Load() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).Load)) } // MonitorLocalStatus mocks base method. func (m *MockBaseWorkUnitForWorkUnit) MonitorLocalStatus() { m.ctrl.T.Helper() m.ctrl.Call(m, "MonitorLocalStatus") } // MonitorLocalStatus indicates an expected call of MonitorLocalStatus. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) MonitorLocalStatus() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MonitorLocalStatus", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).MonitorLocalStatus)) } // Release mocks base method. func (m *MockBaseWorkUnitForWorkUnit) Release(force bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Release", force) ret0, _ := ret[0].(error) return ret0 } // Release indicates an expected call of Release. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) Release(force any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Release", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).Release), force) } // Save mocks base method. func (m *MockBaseWorkUnitForWorkUnit) Save() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Save") ret0, _ := ret[0].(error) return ret0 } // Save indicates an expected call of Save. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) Save() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Save", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).Save)) } // SetFromParams mocks base method. func (m *MockBaseWorkUnitForWorkUnit) SetFromParams(arg0 map[string]string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetFromParams", arg0) ret0, _ := ret[0].(error) return ret0 } // SetFromParams indicates an expected call of SetFromParams. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) SetFromParams(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFromParams", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).SetFromParams), arg0) } // SetStatusExtraData mocks base method. func (m *MockBaseWorkUnitForWorkUnit) SetStatusExtraData(arg0 any) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetStatusExtraData", arg0) } // SetStatusExtraData indicates an expected call of SetStatusExtraData. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) SetStatusExtraData(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetStatusExtraData", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).SetStatusExtraData), arg0) } // SetWorkceptor mocks base method. func (m *MockBaseWorkUnitForWorkUnit) SetWorkceptor(arg0 *workceptor.Workceptor) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetWorkceptor", arg0) } // SetWorkceptor indicates an expected call of SetWorkceptor. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) SetWorkceptor(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetWorkceptor", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).SetWorkceptor), arg0) } // Status mocks base method. func (m *MockBaseWorkUnitForWorkUnit) Status() *workceptor.StatusFileData { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Status") ret0, _ := ret[0].(*workceptor.StatusFileData) return ret0 } // Status indicates an expected call of Status. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) Status() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).Status)) } // StatusFileName mocks base method. func (m *MockBaseWorkUnitForWorkUnit) StatusFileName() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StatusFileName") ret0, _ := ret[0].(string) return ret0 } // StatusFileName indicates an expected call of StatusFileName. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) StatusFileName() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatusFileName", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).StatusFileName)) } // StdoutFileName mocks base method. func (m *MockBaseWorkUnitForWorkUnit) StdoutFileName() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StdoutFileName") ret0, _ := ret[0].(string) return ret0 } // StdoutFileName indicates an expected call of StdoutFileName. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) StdoutFileName() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StdoutFileName", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).StdoutFileName)) } // UnitDir mocks base method. func (m *MockBaseWorkUnitForWorkUnit) UnitDir() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UnitDir") ret0, _ := ret[0].(string) return ret0 } // UnitDir indicates an expected call of UnitDir. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) UnitDir() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnitDir", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).UnitDir)) } // UnredactedStatus mocks base method. func (m *MockBaseWorkUnitForWorkUnit) UnredactedStatus() *workceptor.StatusFileData { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UnredactedStatus") ret0, _ := ret[0].(*workceptor.StatusFileData) return ret0 } // UnredactedStatus indicates an expected call of UnredactedStatus. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) UnredactedStatus() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnredactedStatus", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).UnredactedStatus)) } // UpdateBasicStatus mocks base method. func (m *MockBaseWorkUnitForWorkUnit) UpdateBasicStatus(state int, detail string, stdoutSize int64) { m.ctrl.T.Helper() m.ctrl.Call(m, "UpdateBasicStatus", state, detail, stdoutSize) } // UpdateBasicStatus indicates an expected call of UpdateBasicStatus. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) UpdateBasicStatus(state, detail, stdoutSize any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBasicStatus", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).UpdateBasicStatus), state, detail, stdoutSize) } // UpdateFullStatus mocks base method. func (m *MockBaseWorkUnitForWorkUnit) UpdateFullStatus(statusFunc func(*workceptor.StatusFileData)) { m.ctrl.T.Helper() m.ctrl.Call(m, "UpdateFullStatus", statusFunc) } // UpdateFullStatus indicates an expected call of UpdateFullStatus. func (mr *MockBaseWorkUnitForWorkUnitMockRecorder) UpdateFullStatus(statusFunc any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFullStatus", reflect.TypeOf((*MockBaseWorkUnitForWorkUnit)(nil).UpdateFullStatus), statusFunc) } receptor-1.5.3/pkg/workceptor/mock_workceptor/interfaces.go000066400000000000000000000247521475465740700242420ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/workceptor/interfaces.go // // Generated by this command: // // mockgen -source=pkg/workceptor/interfaces.go -destination=pkg/workceptor/mock_workceptor/interfaces.go // // Package mock_workceptor is a generated GoMock package. package mock_workceptor import ( reflect "reflect" workceptor "github.com/ansible/receptor/pkg/workceptor" gomock "go.uber.org/mock/gomock" ) // MockWorkUnit is a mock of WorkUnit interface. type MockWorkUnit struct { ctrl *gomock.Controller recorder *MockWorkUnitMockRecorder isgomock struct{} } // MockWorkUnitMockRecorder is the mock recorder for MockWorkUnit. type MockWorkUnitMockRecorder struct { mock *MockWorkUnit } // NewMockWorkUnit creates a new mock instance. func NewMockWorkUnit(ctrl *gomock.Controller) *MockWorkUnit { mock := &MockWorkUnit{ctrl: ctrl} mock.recorder = &MockWorkUnitMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockWorkUnit) EXPECT() *MockWorkUnitMockRecorder { return m.recorder } // Cancel mocks base method. func (m *MockWorkUnit) Cancel() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Cancel") ret0, _ := ret[0].(error) return ret0 } // Cancel indicates an expected call of Cancel. func (mr *MockWorkUnitMockRecorder) Cancel() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cancel", reflect.TypeOf((*MockWorkUnit)(nil).Cancel)) } // ID mocks base method. func (m *MockWorkUnit) ID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ID") ret0, _ := ret[0].(string) return ret0 } // ID indicates an expected call of ID. func (mr *MockWorkUnitMockRecorder) ID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockWorkUnit)(nil).ID)) } // LastUpdateError mocks base method. func (m *MockWorkUnit) LastUpdateError() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LastUpdateError") ret0, _ := ret[0].(error) return ret0 } // LastUpdateError indicates an expected call of LastUpdateError. func (mr *MockWorkUnitMockRecorder) LastUpdateError() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastUpdateError", reflect.TypeOf((*MockWorkUnit)(nil).LastUpdateError)) } // Load mocks base method. func (m *MockWorkUnit) Load() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Load") ret0, _ := ret[0].(error) return ret0 } // Load indicates an expected call of Load. func (mr *MockWorkUnitMockRecorder) Load() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Load", reflect.TypeOf((*MockWorkUnit)(nil).Load)) } // Release mocks base method. func (m *MockWorkUnit) Release(force bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Release", force) ret0, _ := ret[0].(error) return ret0 } // Release indicates an expected call of Release. func (mr *MockWorkUnitMockRecorder) Release(force any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Release", reflect.TypeOf((*MockWorkUnit)(nil).Release), force) } // Restart mocks base method. func (m *MockWorkUnit) Restart() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Restart") ret0, _ := ret[0].(error) return ret0 } // Restart indicates an expected call of Restart. func (mr *MockWorkUnitMockRecorder) Restart() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Restart", reflect.TypeOf((*MockWorkUnit)(nil).Restart)) } // Save mocks base method. func (m *MockWorkUnit) Save() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Save") ret0, _ := ret[0].(error) return ret0 } // Save indicates an expected call of Save. func (mr *MockWorkUnitMockRecorder) Save() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Save", reflect.TypeOf((*MockWorkUnit)(nil).Save)) } // SetFromParams mocks base method. func (m *MockWorkUnit) SetFromParams(params map[string]string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SetFromParams", params) ret0, _ := ret[0].(error) return ret0 } // SetFromParams indicates an expected call of SetFromParams. func (mr *MockWorkUnitMockRecorder) SetFromParams(params any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetFromParams", reflect.TypeOf((*MockWorkUnit)(nil).SetFromParams), params) } // Start mocks base method. func (m *MockWorkUnit) Start() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Start") ret0, _ := ret[0].(error) return ret0 } // Start indicates an expected call of Start. func (mr *MockWorkUnitMockRecorder) Start() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockWorkUnit)(nil).Start)) } // Status mocks base method. func (m *MockWorkUnit) Status() *workceptor.StatusFileData { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Status") ret0, _ := ret[0].(*workceptor.StatusFileData) return ret0 } // Status indicates an expected call of Status. func (mr *MockWorkUnitMockRecorder) Status() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockWorkUnit)(nil).Status)) } // StatusFileName mocks base method. func (m *MockWorkUnit) StatusFileName() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StatusFileName") ret0, _ := ret[0].(string) return ret0 } // StatusFileName indicates an expected call of StatusFileName. func (mr *MockWorkUnitMockRecorder) StatusFileName() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StatusFileName", reflect.TypeOf((*MockWorkUnit)(nil).StatusFileName)) } // StdoutFileName mocks base method. func (m *MockWorkUnit) StdoutFileName() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StdoutFileName") ret0, _ := ret[0].(string) return ret0 } // StdoutFileName indicates an expected call of StdoutFileName. func (mr *MockWorkUnitMockRecorder) StdoutFileName() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StdoutFileName", reflect.TypeOf((*MockWorkUnit)(nil).StdoutFileName)) } // UnitDir mocks base method. func (m *MockWorkUnit) UnitDir() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UnitDir") ret0, _ := ret[0].(string) return ret0 } // UnitDir indicates an expected call of UnitDir. func (mr *MockWorkUnitMockRecorder) UnitDir() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnitDir", reflect.TypeOf((*MockWorkUnit)(nil).UnitDir)) } // UnredactedStatus mocks base method. func (m *MockWorkUnit) UnredactedStatus() *workceptor.StatusFileData { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "UnredactedStatus") ret0, _ := ret[0].(*workceptor.StatusFileData) return ret0 } // UnredactedStatus indicates an expected call of UnredactedStatus. func (mr *MockWorkUnitMockRecorder) UnredactedStatus() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnredactedStatus", reflect.TypeOf((*MockWorkUnit)(nil).UnredactedStatus)) } // UpdateBasicStatus mocks base method. func (m *MockWorkUnit) UpdateBasicStatus(state int, detail string, stdoutSize int64) { m.ctrl.T.Helper() m.ctrl.Call(m, "UpdateBasicStatus", state, detail, stdoutSize) } // UpdateBasicStatus indicates an expected call of UpdateBasicStatus. func (mr *MockWorkUnitMockRecorder) UpdateBasicStatus(state, detail, stdoutSize any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateBasicStatus", reflect.TypeOf((*MockWorkUnit)(nil).UpdateBasicStatus), state, detail, stdoutSize) } // UpdateFullStatus mocks base method. func (m *MockWorkUnit) UpdateFullStatus(statusFunc func(*workceptor.StatusFileData)) { m.ctrl.T.Helper() m.ctrl.Call(m, "UpdateFullStatus", statusFunc) } // UpdateFullStatus indicates an expected call of UpdateFullStatus. func (mr *MockWorkUnitMockRecorder) UpdateFullStatus(statusFunc any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFullStatus", reflect.TypeOf((*MockWorkUnit)(nil).UpdateFullStatus), statusFunc) } // MockWorkerConfig is a mock of WorkerConfig interface. type MockWorkerConfig struct { ctrl *gomock.Controller recorder *MockWorkerConfigMockRecorder isgomock struct{} } // MockWorkerConfigMockRecorder is the mock recorder for MockWorkerConfig. type MockWorkerConfigMockRecorder struct { mock *MockWorkerConfig } // NewMockWorkerConfig creates a new mock instance. func NewMockWorkerConfig(ctrl *gomock.Controller) *MockWorkerConfig { mock := &MockWorkerConfig{ctrl: ctrl} mock.recorder = &MockWorkerConfigMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockWorkerConfig) EXPECT() *MockWorkerConfigMockRecorder { return m.recorder } // GetVerifySignature mocks base method. func (m *MockWorkerConfig) GetVerifySignature() bool { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetVerifySignature") ret0, _ := ret[0].(bool) return ret0 } // GetVerifySignature indicates an expected call of GetVerifySignature. func (mr *MockWorkerConfigMockRecorder) GetVerifySignature() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVerifySignature", reflect.TypeOf((*MockWorkerConfig)(nil).GetVerifySignature)) } // GetWorkType mocks base method. func (m *MockWorkerConfig) GetWorkType() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetWorkType") ret0, _ := ret[0].(string) return ret0 } // GetWorkType indicates an expected call of GetWorkType. func (mr *MockWorkerConfigMockRecorder) GetWorkType() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkType", reflect.TypeOf((*MockWorkerConfig)(nil).GetWorkType)) } // NewWorker mocks base method. func (m *MockWorkerConfig) NewWorker(bwu workceptor.BaseWorkUnitForWorkUnit, w *workceptor.Workceptor, unitID, workType string) workceptor.WorkUnit { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewWorker", bwu, w, unitID, workType) ret0, _ := ret[0].(workceptor.WorkUnit) return ret0 } // NewWorker indicates an expected call of NewWorker. func (mr *MockWorkerConfigMockRecorder) NewWorker(bwu, w, unitID, workType any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewWorker", reflect.TypeOf((*MockWorkerConfig)(nil).NewWorker), bwu, w, unitID, workType) } receptor-1.5.3/pkg/workceptor/mock_workceptor/kubernetes.go000066400000000000000000000323411475465740700242570ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/workceptor/kubernetes.go // // Generated by this command: // // mockgen -source=pkg/workceptor/kubernetes.go -destination=pkg/workceptor/mock_workceptor/kubernetes.go // // Package mock_workceptor is a generated GoMock package. package mock_workceptor import ( context "context" url "net/url" reflect "reflect" gomock "go.uber.org/mock/gomock" v1 "k8s.io/api/core/v1" errors "k8s.io/apimachinery/pkg/api/errors" v10 "k8s.io/apimachinery/pkg/apis/meta/v1" fields "k8s.io/apimachinery/pkg/fields" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" watch "k8s.io/apimachinery/pkg/watch" kubernetes "k8s.io/client-go/kubernetes" rest "k8s.io/client-go/rest" cache "k8s.io/client-go/tools/cache" clientcmd "k8s.io/client-go/tools/clientcmd" remotecommand "k8s.io/client-go/tools/remotecommand" watch0 "k8s.io/client-go/tools/watch" flowcontrol "k8s.io/client-go/util/flowcontrol" ) // MockKubeAPIer is a mock of KubeAPIer interface. type MockKubeAPIer struct { ctrl *gomock.Controller recorder *MockKubeAPIerMockRecorder isgomock struct{} } // MockKubeAPIerMockRecorder is the mock recorder for MockKubeAPIer. type MockKubeAPIerMockRecorder struct { mock *MockKubeAPIer } // NewMockKubeAPIer creates a new mock instance. func NewMockKubeAPIer(ctrl *gomock.Controller) *MockKubeAPIer { mock := &MockKubeAPIer{ctrl: ctrl} mock.recorder = &MockKubeAPIerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockKubeAPIer) EXPECT() *MockKubeAPIerMockRecorder { return m.recorder } // BuildConfigFromFlags mocks base method. func (m *MockKubeAPIer) BuildConfigFromFlags(arg0, arg1 string) (*rest.Config, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "BuildConfigFromFlags", arg0, arg1) ret0, _ := ret[0].(*rest.Config) ret1, _ := ret[1].(error) return ret0, ret1 } // BuildConfigFromFlags indicates an expected call of BuildConfigFromFlags. func (mr *MockKubeAPIerMockRecorder) BuildConfigFromFlags(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildConfigFromFlags", reflect.TypeOf((*MockKubeAPIer)(nil).BuildConfigFromFlags), arg0, arg1) } // Create mocks base method. func (m *MockKubeAPIer) Create(arg0 context.Context, arg1 *kubernetes.Clientset, arg2 string, arg3 *v1.Pod, arg4 v10.CreateOptions) (*v1.Pod, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Create", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(*v1.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // Create indicates an expected call of Create. func (mr *MockKubeAPIerMockRecorder) Create(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockKubeAPIer)(nil).Create), arg0, arg1, arg2, arg3, arg4) } // Delete mocks base method. func (m *MockKubeAPIer) Delete(arg0 context.Context, arg1 *kubernetes.Clientset, arg2, arg3 string, arg4 v10.DeleteOptions) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Delete", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // Delete indicates an expected call of Delete. func (mr *MockKubeAPIerMockRecorder) Delete(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockKubeAPIer)(nil).Delete), arg0, arg1, arg2, arg3, arg4) } // Get mocks base method. func (m *MockKubeAPIer) Get(arg0 context.Context, arg1 *kubernetes.Clientset, arg2, arg3 string, arg4 v10.GetOptions) (*v1.Pod, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(*v1.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // Get indicates an expected call of Get. func (mr *MockKubeAPIerMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockKubeAPIer)(nil).Get), arg0, arg1, arg2, arg3, arg4) } // GetLogs mocks base method. func (m *MockKubeAPIer) GetLogs(arg0 *kubernetes.Clientset, arg1, arg2 string, arg3 *v1.PodLogOptions) *rest.Request { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLogs", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*rest.Request) return ret0 } // GetLogs indicates an expected call of GetLogs. func (mr *MockKubeAPIerMockRecorder) GetLogs(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogs", reflect.TypeOf((*MockKubeAPIer)(nil).GetLogs), arg0, arg1, arg2, arg3) } // InClusterConfig mocks base method. func (m *MockKubeAPIer) InClusterConfig() (*rest.Config, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "InClusterConfig") ret0, _ := ret[0].(*rest.Config) ret1, _ := ret[1].(error) return ret0, ret1 } // InClusterConfig indicates an expected call of InClusterConfig. func (mr *MockKubeAPIerMockRecorder) InClusterConfig() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InClusterConfig", reflect.TypeOf((*MockKubeAPIer)(nil).InClusterConfig)) } // List mocks base method. func (m *MockKubeAPIer) List(arg0 context.Context, arg1 *kubernetes.Clientset, arg2 string, arg3 v10.ListOptions) (*v1.PodList, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "List", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*v1.PodList) ret1, _ := ret[1].(error) return ret0, ret1 } // List indicates an expected call of List. func (mr *MockKubeAPIerMockRecorder) List(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockKubeAPIer)(nil).List), arg0, arg1, arg2, arg3) } // NewClientConfigFromBytes mocks base method. func (m *MockKubeAPIer) NewClientConfigFromBytes(arg0 []byte) (clientcmd.ClientConfig, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewClientConfigFromBytes", arg0) ret0, _ := ret[0].(clientcmd.ClientConfig) ret1, _ := ret[1].(error) return ret0, ret1 } // NewClientConfigFromBytes indicates an expected call of NewClientConfigFromBytes. func (mr *MockKubeAPIerMockRecorder) NewClientConfigFromBytes(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewClientConfigFromBytes", reflect.TypeOf((*MockKubeAPIer)(nil).NewClientConfigFromBytes), arg0) } // NewDefaultClientConfigLoadingRules mocks base method. func (m *MockKubeAPIer) NewDefaultClientConfigLoadingRules() *clientcmd.ClientConfigLoadingRules { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewDefaultClientConfigLoadingRules") ret0, _ := ret[0].(*clientcmd.ClientConfigLoadingRules) return ret0 } // NewDefaultClientConfigLoadingRules indicates an expected call of NewDefaultClientConfigLoadingRules. func (mr *MockKubeAPIerMockRecorder) NewDefaultClientConfigLoadingRules() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewDefaultClientConfigLoadingRules", reflect.TypeOf((*MockKubeAPIer)(nil).NewDefaultClientConfigLoadingRules)) } // NewFakeAlwaysRateLimiter mocks base method. func (m *MockKubeAPIer) NewFakeAlwaysRateLimiter() flowcontrol.RateLimiter { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewFakeAlwaysRateLimiter") ret0, _ := ret[0].(flowcontrol.RateLimiter) return ret0 } // NewFakeAlwaysRateLimiter indicates an expected call of NewFakeAlwaysRateLimiter. func (mr *MockKubeAPIerMockRecorder) NewFakeAlwaysRateLimiter() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFakeAlwaysRateLimiter", reflect.TypeOf((*MockKubeAPIer)(nil).NewFakeAlwaysRateLimiter)) } // NewFakeNeverRateLimiter mocks base method. func (m *MockKubeAPIer) NewFakeNeverRateLimiter() flowcontrol.RateLimiter { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewFakeNeverRateLimiter") ret0, _ := ret[0].(flowcontrol.RateLimiter) return ret0 } // NewFakeNeverRateLimiter indicates an expected call of NewFakeNeverRateLimiter. func (mr *MockKubeAPIerMockRecorder) NewFakeNeverRateLimiter() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFakeNeverRateLimiter", reflect.TypeOf((*MockKubeAPIer)(nil).NewFakeNeverRateLimiter)) } // NewForConfig mocks base method. func (m *MockKubeAPIer) NewForConfig(arg0 *rest.Config) (*kubernetes.Clientset, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewForConfig", arg0) ret0, _ := ret[0].(*kubernetes.Clientset) ret1, _ := ret[1].(error) return ret0, ret1 } // NewForConfig indicates an expected call of NewForConfig. func (mr *MockKubeAPIerMockRecorder) NewForConfig(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewForConfig", reflect.TypeOf((*MockKubeAPIer)(nil).NewForConfig), arg0) } // NewNotFound mocks base method. func (m *MockKubeAPIer) NewNotFound(arg0 schema.GroupResource, arg1 string) *errors.StatusError { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewNotFound", arg0, arg1) ret0, _ := ret[0].(*errors.StatusError) return ret0 } // NewNotFound indicates an expected call of NewNotFound. func (mr *MockKubeAPIerMockRecorder) NewNotFound(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewNotFound", reflect.TypeOf((*MockKubeAPIer)(nil).NewNotFound), arg0, arg1) } // NewSPDYExecutor mocks base method. func (m *MockKubeAPIer) NewSPDYExecutor(arg0 *rest.Config, arg1 string, arg2 *url.URL) (remotecommand.Executor, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewSPDYExecutor", arg0, arg1, arg2) ret0, _ := ret[0].(remotecommand.Executor) ret1, _ := ret[1].(error) return ret0, ret1 } // NewSPDYExecutor indicates an expected call of NewSPDYExecutor. func (mr *MockKubeAPIerMockRecorder) NewSPDYExecutor(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewSPDYExecutor", reflect.TypeOf((*MockKubeAPIer)(nil).NewSPDYExecutor), arg0, arg1, arg2) } // OneTermEqualSelector mocks base method. func (m *MockKubeAPIer) OneTermEqualSelector(arg0, arg1 string) fields.Selector { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "OneTermEqualSelector", arg0, arg1) ret0, _ := ret[0].(fields.Selector) return ret0 } // OneTermEqualSelector indicates an expected call of OneTermEqualSelector. func (mr *MockKubeAPIerMockRecorder) OneTermEqualSelector(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OneTermEqualSelector", reflect.TypeOf((*MockKubeAPIer)(nil).OneTermEqualSelector), arg0, arg1) } // StreamWithContext mocks base method. func (m *MockKubeAPIer) StreamWithContext(arg0 context.Context, arg1 remotecommand.Executor, arg2 remotecommand.StreamOptions) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "StreamWithContext", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // StreamWithContext indicates an expected call of StreamWithContext. func (mr *MockKubeAPIerMockRecorder) StreamWithContext(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWithContext", reflect.TypeOf((*MockKubeAPIer)(nil).StreamWithContext), arg0, arg1, arg2) } // SubResource mocks base method. func (m *MockKubeAPIer) SubResource(arg0 *kubernetes.Clientset, arg1, arg2 string) *rest.Request { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SubResource", arg0, arg1, arg2) ret0, _ := ret[0].(*rest.Request) return ret0 } // SubResource indicates an expected call of SubResource. func (mr *MockKubeAPIerMockRecorder) SubResource(arg0, arg1, arg2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubResource", reflect.TypeOf((*MockKubeAPIer)(nil).SubResource), arg0, arg1, arg2) } // UntilWithSync mocks base method. func (m *MockKubeAPIer) UntilWithSync(arg0 context.Context, arg1 cache.ListerWatcher, arg2 runtime.Object, arg3 watch0.PreconditionFunc, arg4 ...watch0.ConditionFunc) (*watch.Event, error) { m.ctrl.T.Helper() varargs := []any{arg0, arg1, arg2, arg3} for _, a := range arg4 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "UntilWithSync", varargs...) ret0, _ := ret[0].(*watch.Event) ret1, _ := ret[1].(error) return ret0, ret1 } // UntilWithSync indicates an expected call of UntilWithSync. func (mr *MockKubeAPIerMockRecorder) UntilWithSync(arg0, arg1, arg2, arg3 any, arg4 ...any) *gomock.Call { mr.mock.ctrl.T.Helper() varargs := append([]any{arg0, arg1, arg2, arg3}, arg4...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UntilWithSync", reflect.TypeOf((*MockKubeAPIer)(nil).UntilWithSync), varargs...) } // Watch mocks base method. func (m *MockKubeAPIer) Watch(arg0 context.Context, arg1 *kubernetes.Clientset, arg2 string, arg3 v10.ListOptions) (watch.Interface, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Watch", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(watch.Interface) ret1, _ := ret[1].(error) return ret0, ret1 } // Watch indicates an expected call of Watch. func (mr *MockKubeAPIerMockRecorder) Watch(arg0, arg1, arg2, arg3 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Watch", reflect.TypeOf((*MockKubeAPIer)(nil).Watch), arg0, arg1, arg2, arg3) } receptor-1.5.3/pkg/workceptor/mock_workceptor/stdio_utils.go000066400000000000000000000144651475465740700244610ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/workceptor/stdio_utils.go // // Generated by this command: // // mockgen -source=pkg/workceptor/stdio_utils.go -destination=pkg/workceptor/mock_workceptor/stdio_utils.go // // Package mock_workceptor is a generated GoMock package. package mock_workceptor import ( os "os" reflect "reflect" gomock "go.uber.org/mock/gomock" ) // MockFileSystemer is a mock of FileSystemer interface. type MockFileSystemer struct { ctrl *gomock.Controller recorder *MockFileSystemerMockRecorder isgomock struct{} } // MockFileSystemerMockRecorder is the mock recorder for MockFileSystemer. type MockFileSystemerMockRecorder struct { mock *MockFileSystemer } // NewMockFileSystemer creates a new mock instance. func NewMockFileSystemer(ctrl *gomock.Controller) *MockFileSystemer { mock := &MockFileSystemer{ctrl: ctrl} mock.recorder = &MockFileSystemerMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockFileSystemer) EXPECT() *MockFileSystemerMockRecorder { return m.recorder } // Open mocks base method. func (m *MockFileSystemer) Open(name string) (*os.File, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Open", name) ret0, _ := ret[0].(*os.File) ret1, _ := ret[1].(error) return ret0, ret1 } // Open indicates an expected call of Open. func (mr *MockFileSystemerMockRecorder) Open(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Open", reflect.TypeOf((*MockFileSystemer)(nil).Open), name) } // OpenFile mocks base method. func (m *MockFileSystemer) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "OpenFile", name, flag, perm) ret0, _ := ret[0].(*os.File) ret1, _ := ret[1].(error) return ret0, ret1 } // OpenFile indicates an expected call of OpenFile. func (mr *MockFileSystemerMockRecorder) OpenFile(name, flag, perm any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenFile", reflect.TypeOf((*MockFileSystemer)(nil).OpenFile), name, flag, perm) } // RemoveAll mocks base method. func (m *MockFileSystemer) RemoveAll(path string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RemoveAll", path) ret0, _ := ret[0].(error) return ret0 } // RemoveAll indicates an expected call of RemoveAll. func (mr *MockFileSystemerMockRecorder) RemoveAll(path any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAll", reflect.TypeOf((*MockFileSystemer)(nil).RemoveAll), path) } // Stat mocks base method. func (m *MockFileSystemer) Stat(name string) (os.FileInfo, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Stat", name) ret0, _ := ret[0].(os.FileInfo) ret1, _ := ret[1].(error) return ret0, ret1 } // Stat indicates an expected call of Stat. func (mr *MockFileSystemerMockRecorder) Stat(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockFileSystemer)(nil).Stat), name) } // MockFileWriteCloser is a mock of FileWriteCloser interface. type MockFileWriteCloser struct { ctrl *gomock.Controller recorder *MockFileWriteCloserMockRecorder isgomock struct{} } // MockFileWriteCloserMockRecorder is the mock recorder for MockFileWriteCloser. type MockFileWriteCloserMockRecorder struct { mock *MockFileWriteCloser } // NewMockFileWriteCloser creates a new mock instance. func NewMockFileWriteCloser(ctrl *gomock.Controller) *MockFileWriteCloser { mock := &MockFileWriteCloser{ctrl: ctrl} mock.recorder = &MockFileWriteCloserMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockFileWriteCloser) EXPECT() *MockFileWriteCloserMockRecorder { return m.recorder } // Close mocks base method. func (m *MockFileWriteCloser) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockFileWriteCloserMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockFileWriteCloser)(nil).Close)) } // Write mocks base method. func (m *MockFileWriteCloser) Write(p []byte) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Write", p) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // Write indicates an expected call of Write. func (mr *MockFileWriteCloserMockRecorder) Write(p any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockFileWriteCloser)(nil).Write), p) } // MockFileReadCloser is a mock of FileReadCloser interface. type MockFileReadCloser struct { ctrl *gomock.Controller recorder *MockFileReadCloserMockRecorder isgomock struct{} } // MockFileReadCloserMockRecorder is the mock recorder for MockFileReadCloser. type MockFileReadCloserMockRecorder struct { mock *MockFileReadCloser } // NewMockFileReadCloser creates a new mock instance. func NewMockFileReadCloser(ctrl *gomock.Controller) *MockFileReadCloser { mock := &MockFileReadCloser{ctrl: ctrl} mock.recorder = &MockFileReadCloserMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockFileReadCloser) EXPECT() *MockFileReadCloserMockRecorder { return m.recorder } // Close mocks base method. func (m *MockFileReadCloser) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockFileReadCloserMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockFileReadCloser)(nil).Close)) } // Read mocks base method. func (m *MockFileReadCloser) Read(p []byte) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Read", p) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // Read indicates an expected call of Read. func (mr *MockFileReadCloserMockRecorder) Read(p any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockFileReadCloser)(nil).Read), p) } receptor-1.5.3/pkg/workceptor/mock_workceptor/workceptor.go000066400000000000000000000232431475465740700243100ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/workceptor/workceptor.go // // Generated by this command: // // mockgen -source=pkg/workceptor/workceptor.go -destination=pkg/workceptor/mock_workceptor/workceptor.go // // Package mock_workceptor is a generated GoMock package. package mock_workceptor import ( context "context" tls "crypto/tls" fs "io/fs" net "net" reflect "reflect" controlsvc "github.com/ansible/receptor/pkg/controlsvc" logger "github.com/ansible/receptor/pkg/logger" netceptor "github.com/ansible/receptor/pkg/netceptor" gomock "go.uber.org/mock/gomock" ) // MockNetceptorForWorkceptor is a mock of NetceptorForWorkceptor interface. type MockNetceptorForWorkceptor struct { ctrl *gomock.Controller recorder *MockNetceptorForWorkceptorMockRecorder isgomock struct{} } // MockNetceptorForWorkceptorMockRecorder is the mock recorder for MockNetceptorForWorkceptor. type MockNetceptorForWorkceptorMockRecorder struct { mock *MockNetceptorForWorkceptor } // NewMockNetceptorForWorkceptor creates a new mock instance. func NewMockNetceptorForWorkceptor(ctrl *gomock.Controller) *MockNetceptorForWorkceptor { mock := &MockNetceptorForWorkceptor{ctrl: ctrl} mock.recorder = &MockNetceptorForWorkceptorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockNetceptorForWorkceptor) EXPECT() *MockNetceptorForWorkceptorMockRecorder { return m.recorder } // AddWorkCommand mocks base method. func (m *MockNetceptorForWorkceptor) AddWorkCommand(typeName string, verifySignature bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AddWorkCommand", typeName, verifySignature) ret0, _ := ret[0].(error) return ret0 } // AddWorkCommand indicates an expected call of AddWorkCommand. func (mr *MockNetceptorForWorkceptorMockRecorder) AddWorkCommand(typeName, verifySignature any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWorkCommand", reflect.TypeOf((*MockNetceptorForWorkceptor)(nil).AddWorkCommand), typeName, verifySignature) } // DialContext mocks base method. func (m *MockNetceptorForWorkceptor) DialContext(ctx context.Context, node, service string, tlscfg *tls.Config) (*netceptor.Conn, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DialContext", ctx, node, service, tlscfg) ret0, _ := ret[0].(*netceptor.Conn) ret1, _ := ret[1].(error) return ret0, ret1 } // DialContext indicates an expected call of DialContext. func (mr *MockNetceptorForWorkceptorMockRecorder) DialContext(ctx, node, service, tlscfg any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DialContext", reflect.TypeOf((*MockNetceptorForWorkceptor)(nil).DialContext), ctx, node, service, tlscfg) } // GetClientTLSConfig mocks base method. func (m *MockNetceptorForWorkceptor) GetClientTLSConfig(name, expectedHostName string, expectedHostNameType netceptor.ExpectedHostnameType) (*tls.Config, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetClientTLSConfig", name, expectedHostName, expectedHostNameType) ret0, _ := ret[0].(*tls.Config) ret1, _ := ret[1].(error) return ret0, ret1 } // GetClientTLSConfig indicates an expected call of GetClientTLSConfig. func (mr *MockNetceptorForWorkceptorMockRecorder) GetClientTLSConfig(name, expectedHostName, expectedHostNameType any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClientTLSConfig", reflect.TypeOf((*MockNetceptorForWorkceptor)(nil).GetClientTLSConfig), name, expectedHostName, expectedHostNameType) } // GetLogger mocks base method. func (m *MockNetceptorForWorkceptor) GetLogger() *logger.ReceptorLogger { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLogger") ret0, _ := ret[0].(*logger.ReceptorLogger) return ret0 } // GetLogger indicates an expected call of GetLogger. func (mr *MockNetceptorForWorkceptorMockRecorder) GetLogger() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLogger", reflect.TypeOf((*MockNetceptorForWorkceptor)(nil).GetLogger)) } // NodeID mocks base method. func (m *MockNetceptorForWorkceptor) NodeID() string { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NodeID") ret0, _ := ret[0].(string) return ret0 } // NodeID indicates an expected call of NodeID. func (mr *MockNetceptorForWorkceptorMockRecorder) NodeID() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockNetceptorForWorkceptor)(nil).NodeID)) } // MockServerForWorkceptor is a mock of ServerForWorkceptor interface. type MockServerForWorkceptor struct { ctrl *gomock.Controller recorder *MockServerForWorkceptorMockRecorder isgomock struct{} } // MockServerForWorkceptorMockRecorder is the mock recorder for MockServerForWorkceptor. type MockServerForWorkceptorMockRecorder struct { mock *MockServerForWorkceptor } // NewMockServerForWorkceptor creates a new mock instance. func NewMockServerForWorkceptor(ctrl *gomock.Controller) *MockServerForWorkceptor { mock := &MockServerForWorkceptor{ctrl: ctrl} mock.recorder = &MockServerForWorkceptorMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockServerForWorkceptor) EXPECT() *MockServerForWorkceptorMockRecorder { return m.recorder } // AddControlFunc mocks base method. func (m *MockServerForWorkceptor) AddControlFunc(name string, cType controlsvc.ControlCommandType) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "AddControlFunc", name, cType) ret0, _ := ret[0].(error) return ret0 } // AddControlFunc indicates an expected call of AddControlFunc. func (mr *MockServerForWorkceptorMockRecorder) AddControlFunc(name, cType any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddControlFunc", reflect.TypeOf((*MockServerForWorkceptor)(nil).AddControlFunc), name, cType) } // ConnectionListener mocks base method. func (m *MockServerForWorkceptor) ConnectionListener(ctx context.Context, listener net.Listener) { m.ctrl.T.Helper() m.ctrl.Call(m, "ConnectionListener", ctx, listener) } // ConnectionListener indicates an expected call of ConnectionListener. func (mr *MockServerForWorkceptorMockRecorder) ConnectionListener(ctx, listener any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConnectionListener", reflect.TypeOf((*MockServerForWorkceptor)(nil).ConnectionListener), ctx, listener) } // RunControlSession mocks base method. func (m *MockServerForWorkceptor) RunControlSession(conn net.Conn) { m.ctrl.T.Helper() m.ctrl.Call(m, "RunControlSession", conn) } // RunControlSession indicates an expected call of RunControlSession. func (mr *MockServerForWorkceptorMockRecorder) RunControlSession(conn any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunControlSession", reflect.TypeOf((*MockServerForWorkceptor)(nil).RunControlSession), conn) } // RunControlSvc mocks base method. func (m *MockServerForWorkceptor) RunControlSvc(ctx context.Context, service string, tlscfg *tls.Config, unixSocket string, unixSocketPermissions fs.FileMode, tcpListen string, tcptls *tls.Config) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "RunControlSvc", ctx, service, tlscfg, unixSocket, unixSocketPermissions, tcpListen, tcptls) ret0, _ := ret[0].(error) return ret0 } // RunControlSvc indicates an expected call of RunControlSvc. func (mr *MockServerForWorkceptorMockRecorder) RunControlSvc(ctx, service, tlscfg, unixSocket, unixSocketPermissions, tcpListen, tcptls any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunControlSvc", reflect.TypeOf((*MockServerForWorkceptor)(nil).RunControlSvc), ctx, service, tlscfg, unixSocket, unixSocketPermissions, tcpListen, tcptls) } // SetServerNet mocks base method. func (m *MockServerForWorkceptor) SetServerNet(n controlsvc.Neter) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetServerNet", n) } // SetServerNet indicates an expected call of SetServerNet. func (mr *MockServerForWorkceptorMockRecorder) SetServerNet(n any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetServerNet", reflect.TypeOf((*MockServerForWorkceptor)(nil).SetServerNet), n) } // SetServerTLS mocks base method. func (m *MockServerForWorkceptor) SetServerTLS(t controlsvc.Tlser) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetServerTLS", t) } // SetServerTLS indicates an expected call of SetServerTLS. func (mr *MockServerForWorkceptorMockRecorder) SetServerTLS(t any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetServerTLS", reflect.TypeOf((*MockServerForWorkceptor)(nil).SetServerTLS), t) } // SetServerUtils mocks base method. func (m *MockServerForWorkceptor) SetServerUtils(u controlsvc.Utiler) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetServerUtils", u) } // SetServerUtils indicates an expected call of SetServerUtils. func (mr *MockServerForWorkceptorMockRecorder) SetServerUtils(u any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetServerUtils", reflect.TypeOf((*MockServerForWorkceptor)(nil).SetServerUtils), u) } // SetupConnection mocks base method. func (m *MockServerForWorkceptor) SetupConnection(conn net.Conn) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetupConnection", conn) } // SetupConnection indicates an expected call of SetupConnection. func (mr *MockServerForWorkceptorMockRecorder) SetupConnection(conn any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupConnection", reflect.TypeOf((*MockServerForWorkceptor)(nil).SetupConnection), conn) } receptor-1.5.3/pkg/workceptor/mock_workceptor/workunitbase.go000066400000000000000000000066271475465740700246350ustar00rootroot00000000000000// Code generated by MockGen. DO NOT EDIT. // Source: pkg/workceptor/workunitbase.go // // Generated by this command: // // mockgen -source=pkg/workceptor/workunitbase.go -destination=pkg/workceptor/mock_workceptor/workunitbase.go // // Package mock_workceptor is a generated GoMock package. package mock_workceptor import ( reflect "reflect" fsnotify "github.com/fsnotify/fsnotify" gomock "go.uber.org/mock/gomock" ) // MockWatcherWrapper is a mock of WatcherWrapper interface. type MockWatcherWrapper struct { ctrl *gomock.Controller recorder *MockWatcherWrapperMockRecorder isgomock struct{} } // MockWatcherWrapperMockRecorder is the mock recorder for MockWatcherWrapper. type MockWatcherWrapperMockRecorder struct { mock *MockWatcherWrapper } // NewMockWatcherWrapper creates a new mock instance. func NewMockWatcherWrapper(ctrl *gomock.Controller) *MockWatcherWrapper { mock := &MockWatcherWrapper{ctrl: ctrl} mock.recorder = &MockWatcherWrapperMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. func (m *MockWatcherWrapper) EXPECT() *MockWatcherWrapperMockRecorder { return m.recorder } // Add mocks base method. func (m *MockWatcherWrapper) Add(name string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Add", name) ret0, _ := ret[0].(error) return ret0 } // Add indicates an expected call of Add. func (mr *MockWatcherWrapperMockRecorder) Add(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockWatcherWrapper)(nil).Add), name) } // Close mocks base method. func (m *MockWatcherWrapper) Close() error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Close") ret0, _ := ret[0].(error) return ret0 } // Close indicates an expected call of Close. func (mr *MockWatcherWrapperMockRecorder) Close() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockWatcherWrapper)(nil).Close)) } // ErrorChannel mocks base method. func (m *MockWatcherWrapper) ErrorChannel() chan error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ErrorChannel") ret0, _ := ret[0].(chan error) return ret0 } // ErrorChannel indicates an expected call of ErrorChannel. func (mr *MockWatcherWrapperMockRecorder) ErrorChannel() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ErrorChannel", reflect.TypeOf((*MockWatcherWrapper)(nil).ErrorChannel)) } // EventChannel mocks base method. func (m *MockWatcherWrapper) EventChannel() chan fsnotify.Event { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EventChannel") ret0, _ := ret[0].(chan fsnotify.Event) return ret0 } // EventChannel indicates an expected call of EventChannel. func (mr *MockWatcherWrapperMockRecorder) EventChannel() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EventChannel", reflect.TypeOf((*MockWatcherWrapper)(nil).EventChannel)) } // Remove mocks base method. func (m *MockWatcherWrapper) Remove(path string) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Remove", path) ret0, _ := ret[0].(error) return ret0 } // Remove indicates an expected call of Remove. func (mr *MockWatcherWrapperMockRecorder) Remove(path any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockWatcherWrapper)(nil).Remove), path) } receptor-1.5.3/pkg/workceptor/python.go000066400000000000000000000056341475465740700202260ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "encoding/json" "errors" "fmt" "os/exec" "github.com/ghjm/cmdline" "github.com/spf13/viper" ) // pythonUnit implements the WorkUnit interface. type pythonUnit struct { commandUnit plugin string function string config map[string]interface{} } // Start launches a job with given parameters. func (pw *pythonUnit) Start() error { pw.UpdateBasicStatus(WorkStatePending, "[DEPRECATION WARNING] '--work-python' option is not currently being used. This feature will be removed from receptor in a future release.", 0) pw.UpdateBasicStatus(WorkStatePending, "Launching Python runner", 0) config := make(map[string]interface{}) for k, v := range pw.config { config[k] = v } config["params"] = pw.Status().ExtraData.(*CommandExtraData).Params configJSON, err := json.Marshal(config) if err != nil { return err } cmd := exec.Command("receptor-python-worker", fmt.Sprintf("%s:%s", pw.plugin, pw.function), pw.UnitDir(), string(configJSON)) return pw.runCommand(cmd) } // ************************************************************************** // Command line // ************************************************************************** // workPythonCfg is the cmdline configuration object for a Python worker plugin. type WorkPythonCfg struct { WorkType string `required:"true" description:"Name for this worker type"` Plugin string `required:"true" description:"Python module name of the worker plugin"` Function string `required:"true" description:"Receptor-exported function to call"` Config map[string]interface{} `description:"Plugin-specific configuration"` } // NewWorker is a factory to produce worker instances. func (cfg WorkPythonCfg) NewWorker(_ BaseWorkUnitForWorkUnit, w *Workceptor, unitID string, workType string) WorkUnit { cw := &pythonUnit{ commandUnit: commandUnit{ BaseWorkUnitForWorkUnit: &BaseWorkUnit{ status: StatusFileData{ ExtraData: &CommandExtraData{}, }, }, }, plugin: cfg.Plugin, function: cfg.Function, config: cfg.Config, } cw.BaseWorkUnitForWorkUnit.Init(w, unitID, workType, FileSystem{}, nil) return cw } // Run runs the action. func (cfg WorkPythonCfg) Run() error { err := MainInstance.RegisterWorker(cfg.WorkType, cfg.NewWorker, false) errWithDeprecation := errors.Join(err, errors.New("[DEPRECATION WARNING] 'work-python' option is not currently being used. This feature will be removed from receptor in a future release")) return errWithDeprecation } func init() { version := viper.GetInt("version") if version > 1 { return } cmdline.RegisterConfigTypeForApp("receptor-workers", "work-python", "Run a worker using a Python plugin\n[DEPRECATION WARNING] This option is not currently being used. This feature will be removed from receptor in a future release.", WorkPythonCfg{}, cmdline.Section(workersSection)) } receptor-1.5.3/pkg/workceptor/remote_work.go000066400000000000000000000524611475465740700212420ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "bufio" "context" "encoding/json" "fmt" "io" "net" "os" "path" "regexp" "strings" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/utils" ) // remoteUnit implements the WorkUnit interface for the Receptor remote worker plugin. type remoteUnit struct { BaseWorkUnitForWorkUnit topJC *utils.JobContext logger *logger.ReceptorLogger } // RemoteExtraData is the content of the ExtraData JSON field for a remote work unit. type RemoteExtraData struct { RemoteNode string RemoteWorkType string RemoteParams map[string]string RemoteUnitID string RemoteStarted bool LocalCancelled bool LocalReleased bool SignWork bool TLSClient string Expiration time.Time } type actionFunc func(context.Context, net.Conn, *bufio.Reader) error // connectToRemote establishes a control socket connection to a remote node. func (rw *remoteUnit) connectToRemote(ctx context.Context) (net.Conn, *bufio.Reader, error) { status := rw.Status() red, ok := status.ExtraData.(*RemoteExtraData) if !ok { return nil, nil, fmt.Errorf("remote ExtraData missing") } tlsConfig, err := rw.GetWorkceptor().nc.GetClientTLSConfig(red.TLSClient, red.RemoteNode, netceptor.ExpectedHostnameTypeReceptor) if err != nil { return nil, nil, err } conn, err := rw.GetWorkceptor().nc.DialContext(ctx, red.RemoteNode, "control", tlsConfig) if err != nil { return nil, nil, err } reader := bufio.NewReader(conn) ctxChild, ctxCancel := context.WithTimeout(ctx, 5*time.Second) defer ctxCancel() hello, err := utils.ReadStringContext(ctxChild, reader, '\n') if err != nil { conn.CloseConnection() return nil, nil, err } if !strings.Contains(hello, red.RemoteNode) { conn.CloseConnection() return nil, nil, fmt.Errorf("while expecting node ID %s, got message: %s", red.RemoteNode, strings.TrimRight(hello, "\n")) } return conn, reader, nil } // getConnection retries connectToRemote until connected or the context expires. func (rw *remoteUnit) getConnection(ctx context.Context) (net.Conn, *bufio.Reader) { connectDelay := utils.NewIncrementalDuration(SuccessWorkSleep, MaxWorkSleep, 1.5) for { conn, reader, err := rw.connectToRemote(ctx) if err == nil { return conn, reader } rw.GetWorkceptor().nc.GetLogger().Info("Connection to %s failed with error: %s", rw.Status().ExtraData.(*RemoteExtraData).RemoteNode, err) errStr := err.Error() if strings.Contains(errStr, "CRYPTO_ERROR") { shouldExit := false rw.UpdateFullStatus(func(status *StatusFileData) { status.Detail = fmt.Sprintf("TLS error connecting to remote service: %s", errStr) if !status.ExtraData.(*RemoteExtraData).RemoteStarted { shouldExit = true status.State = WorkStateFailed } }) if shouldExit { return nil, nil } } select { case <-ctx.Done(): return nil, nil case <-connectDelay.NextTimeout(): } } } // connectAndRun makes a single attempt to connect to a remote node and runs an action function. func (rw *remoteUnit) connectAndRun(ctx context.Context, action actionFunc) error { conn, reader, err := rw.connectToRemote(ctx) if err != nil { return utils.WrapErrorWithKind(err, "connection") } return action(ctx, conn, reader) } // getConnectionAndRun retries connecting to a host and, once the connection succeeds, runs an action function. // If firstTimeSync is true, a single attempt is made on the calling goroutine. If the initial attempt fails to // connect or firstTimeSync is false, we run return ErrPending to the caller. func (rw *remoteUnit) getConnectionAndRun(ctx context.Context, firstTimeSync bool, action actionFunc, failure func()) error { if firstTimeSync { err := rw.connectAndRun(ctx, action) if err == nil { return nil } else if !utils.ErrorIsKind(err, "connection") { return err } } go func() { conn, reader := rw.getConnection(ctx) if conn != nil { err := action(ctx, conn, reader) if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("Error running action function: %s", err) } } else { failure() } }() return ErrPending } // startRemoteUnit makes a single attempt to start a remote unit. func (rw *remoteUnit) startRemoteUnit(ctx context.Context, conn net.Conn, reader *bufio.Reader) error { defer conn.(interface{ CloseConnection() error }).CloseConnection() red := rw.UnredactedStatus().ExtraData.(*RemoteExtraData) workSubmitCmd := make(map[string]interface{}) for k, v := range red.RemoteParams { workSubmitCmd[k] = v } workSubmitCmd["workUnitID"] = rw.ID() workSubmitCmd["command"] = "work" workSubmitCmd["subcommand"] = "submit" workSubmitCmd["node"] = red.RemoteNode workSubmitCmd["worktype"] = red.RemoteWorkType workSubmitCmd["tlsclient"] = red.TLSClient if red.SignWork { signature, err := rw.GetWorkceptor().createSignature(red.RemoteNode) if err != nil { return err } workSubmitCmd["signature"] = signature } wscBytes, err := json.Marshal(workSubmitCmd) if err != nil { return fmt.Errorf("error constructing work submit command: %s", err) } _, err = conn.Write(wscBytes) if err == nil { _, err = conn.Write([]byte("\n")) } if err != nil { return fmt.Errorf("write error sending to %s: %s", red.RemoteNode, err) } response, err := utils.ReadStringContext(ctx, reader, '\n') if err != nil { return fmt.Errorf("read error reading from %s: %s", red.RemoteNode, err) } submitIDRegex := regexp.MustCompile(`with ID ([a-zA-Z0-9]+)\.`) match := submitIDRegex.FindSubmatch([]byte(response)) if match == nil || len(match) != 2 { return fmt.Errorf("could not parse response: %s", strings.TrimRight(response, "\n")) } red.RemoteUnitID = string(match[1]) rw.UpdateFullStatus(func(status *StatusFileData) { ed := status.ExtraData.(*RemoteExtraData) ed.RemoteUnitID = red.RemoteUnitID }) stdin, err := os.Open(path.Join(rw.UnitDir(), "stdin")) if err != nil { return fmt.Errorf("error opening stdin file: %s", err) } defer func() { err := stdin.Close() if err != nil { MainInstance.nc.GetLogger().Error("Error closing %s: %s", path.Join(rw.UnitDir(), "stdin"), err) } }() _, err = io.Copy(conn, stdin) if err != nil { return fmt.Errorf("error sending stdin file: %s", err) } err = conn.Close() if err != nil { return fmt.Errorf("error closing stdin file: %s", err) } response, err = utils.ReadStringContext(ctx, reader, '\n') if err != nil { return fmt.Errorf("read error reading from %s: %s", red.RemoteNode, err) } resultErrorRegex := regexp.MustCompile("ERROR: (.*)") match = resultErrorRegex.FindSubmatch([]byte(response)) if match != nil { return fmt.Errorf("error from remote: %s", match[1]) } rw.UpdateFullStatus(func(status *StatusFileData) { ed := status.ExtraData.(*RemoteExtraData) ed.RemoteStarted = true }) return nil } // cancelOrReleaseRemoteUnit makes a single attempt to cancel or release a remote unit. func (rw *remoteUnit) cancelOrReleaseRemoteUnit(ctx context.Context, conn net.Conn, reader *bufio.Reader, release bool, ) error { defer conn.(interface{ CloseConnection() error }).CloseConnection() red := rw.Status().ExtraData.(*RemoteExtraData) var workCmd string if release { workCmd = "release" } else { workCmd = "cancel" } workSubmitCmd := make(map[string]interface{}) workSubmitCmd["command"] = "work" workSubmitCmd["subcommand"] = workCmd workSubmitCmd["unitid"] = red.RemoteUnitID if red.SignWork { signature, err := rw.GetWorkceptor().createSignature(red.RemoteNode) if err != nil { return err } workSubmitCmd["signature"] = signature } wscBytes, err := json.Marshal(workSubmitCmd) if err != nil { return fmt.Errorf("error constructing work %s command: %s", workCmd, err) } wscBytes = append(wscBytes, '\n') _, err = conn.Write(wscBytes) if err != nil { return fmt.Errorf("write error sending to %s: %s", red.RemoteNode, err) } response, err := utils.ReadStringContext(ctx, reader, '\n') if err != nil { return fmt.Errorf("read error reading from %s: %s", red.RemoteNode, err) } if response[:5] == "ERROR" { return fmt.Errorf("error cancelling remote unit: %s", response[6:]) } return nil } // monitorRemoteStatus monitors the remote status file and copies results to the local one. func (rw *remoteUnit) monitorRemoteStatus(mw *utils.JobContext, forRelease bool) { defer func() { mw.Cancel() mw.WorkerDone() }() status := rw.Status() red, ok := status.ExtraData.(*RemoteExtraData) if !ok { rw.GetWorkceptor().nc.GetLogger().Error("remote ExtraData missing") return } remoteNode := red.RemoteNode remoteUnitID := red.RemoteUnitID conn, reader := rw.getConnection(mw) defer func() { if conn != nil { conn.(interface{ CloseConnection() error }).CloseConnection() } }() if conn == nil { return } writeStatusFailures := 0 for { if conn == nil { conn, reader = rw.getConnection(mw) if conn == nil { return } } _, err := conn.Write([]byte(fmt.Sprintf("work status %s\n", remoteUnitID))) if err != nil { rw.GetWorkceptor().nc.GetLogger().Debug("Write error sending to %s: %s\n", remoteUnitID, err) cerr := conn.(interface{ CloseConnection() error }).CloseConnection() if cerr != nil { rw.GetWorkceptor().nc.GetLogger().Error("Error closing connection to remote work unit %s: %s", remoteUnitID, cerr) } conn = nil continue } status, err := utils.ReadStringContext(mw, reader, '\n') if err != nil { rw.GetWorkceptor().nc.GetLogger().Debug("Read error reading from %s: %s\n", remoteNode, err) cerr := conn.(interface{ CloseConnection() error }).CloseConnection() if cerr != nil { rw.GetWorkceptor().nc.GetLogger().Error("Error closing connection from node %s: %s", remoteNode, cerr) } conn = nil continue } if status[:5] == "ERROR" { if strings.Contains(status, "unknown work unit") { if !forRelease { rw.GetWorkceptor().nc.GetLogger().Debug("Work unit %s on node %s is gone.\n", remoteUnitID, remoteNode) rw.UpdateFullStatus(func(status *StatusFileData) { status.State = WorkStateFailed status.Detail = "Remote work unit is gone" }) } return } rw.GetWorkceptor().nc.GetLogger().Error("Remote error: %s\n", strings.TrimRight(status[6:], "\n")) return } si := StatusFileData{} err = json.Unmarshal([]byte(status), &si) if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("Error unmarshalling JSON: %s\n", status) return } rw.UpdateBasicStatus(si.State, si.Detail, si.StdoutSize) if rw.LastUpdateError() != nil { writeStatusFailures++ if writeStatusFailures > 3 { rw.GetWorkceptor().nc.GetLogger().Error("Exceeded retries for updating status file for work unit %s", rw.ID()) return } } else { writeStatusFailures = 0 } if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("Error saving local status file: %s\n", err) return } if sleepOrDone(mw.Done(), 1*time.Second) { return } } } // monitorRemoteStdout copies the remote stdout stream to the local buffer. func (rw *remoteUnit) monitorRemoteStdout(mw *utils.JobContext) { defer func() { mw.Cancel() mw.WorkerDone() }() firstTime := true status := rw.Status() red, ok := status.ExtraData.(*RemoteExtraData) if !ok { rw.GetWorkceptor().nc.GetLogger().Error("remote ExtraData missing") return } remoteNode := red.RemoteNode remoteUnitID := red.RemoteUnitID stdout, err := os.OpenFile(rw.StdoutFileName(), os.O_CREATE+os.O_APPEND+os.O_WRONLY, 0o600) if err == nil { err = stdout.Close() } if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("Could not open stdout file %s: %s\n", rw.StdoutFileName(), err) return } for { if firstTime { firstTime = false if mw.Err() != nil { return } } else if sleepOrDone(mw.Done(), 1*time.Second) { return } err := rw.Load() if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("Could not read status file %s: %s\n", rw.StatusFileName(), err) return } status := rw.Status() diskStdoutSize := stdoutSize(rw.UnitDir()) remoteStdoutSize := status.StdoutSize if IsComplete(status.State) && diskStdoutSize >= remoteStdoutSize { return } else if diskStdoutSize < remoteStdoutSize { conn, reader := rw.getConnection(mw) defer func() { if conn != nil { cerr := conn.(interface{ CloseConnection() error }).CloseConnection() if cerr != nil { rw.GetWorkceptor().nc.GetLogger().Error("Error closing connection to %s: %s", remoteUnitID, cerr) } } }() if conn == nil { return } workSubmitCmd := make(map[string]interface{}) workSubmitCmd["command"] = "work" workSubmitCmd["subcommand"] = "results" workSubmitCmd["unitid"] = remoteUnitID workSubmitCmd["startpos"] = diskStdoutSize if red.SignWork { signature, err := rw.GetWorkceptor().createSignature(red.RemoteNode) if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("could not create signature to get results") return } workSubmitCmd["signature"] = signature } wscBytes, err := json.Marshal(workSubmitCmd) if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("error constructing work results command: %s", err) return } wscBytes = append(wscBytes, '\n') _, err = conn.Write(wscBytes) if err != nil { rw.GetWorkceptor().nc.GetLogger().Warning("Write error sending to %s: %s\n", remoteNode, err) continue } status, err := utils.ReadStringContext(mw, reader, '\n') if err != nil { rw.GetWorkceptor().nc.GetLogger().Warning("Read error reading from %s: %s\n", remoteNode, err) continue } if !strings.Contains(status, "Streaming results") { rw.GetWorkceptor().nc.GetLogger().Warning("Remote node %s did not stream results\n", remoteNode) continue } stdout, err := os.OpenFile(rw.StdoutFileName(), os.O_CREATE+os.O_APPEND+os.O_WRONLY, 0o600) if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("Could not open stdout file %s: %s\n", rw.StdoutFileName(), err) return } defer func() { err := stdout.Close() if err != nil { MainInstance.nc.GetLogger().Error("Error closing %s: %s", rw.StdoutFileName(), err) } }() doneChan := make(chan struct{}) go func() { select { case <-doneChan: return case <-mw.Done(): cr, ok := conn.(interface{ CancelRead() }) if ok { cr.CancelRead() } cerr := conn.(interface{ CloseConnection() error }).CloseConnection() if cerr != nil { rw.GetWorkceptor().nc.GetLogger().Error("Error closing connection to %s: %s", remoteUnitID, cerr) } return } }() _, err = io.Copy(stdout, reader) close(doneChan) if err != nil { var errmsg string if strings.HasSuffix(err.Error(), "error code 499") { errmsg = "read operation cancelled" } else { errmsg = err.Error() } rw.GetWorkceptor().nc.GetLogger().Warning("Could not copy to stdout file %s: %s\n", rw.StdoutFileName(), errmsg) continue } } } } // monitorRemoteUnit watches a remote unit on another node and maintains local status. func (rw *remoteUnit) monitorRemoteUnit(ctx context.Context, forRelease bool) { subJC := &utils.JobContext{} if forRelease { subJC.NewJob(ctx, 1, false) go rw.monitorRemoteStatus(subJC, true) } else { subJC.NewJob(ctx, 2, false) go rw.monitorRemoteStatus(subJC, false) go rw.monitorRemoteStdout(subJC) } subJC.Wait() } // SetFromParams sets the in-memory state from parameters. func (rw *remoteUnit) SetFromParams(params map[string]string) error { for k, v := range params { rw.GetStatusCopy().ExtraData.(*RemoteExtraData).RemoteParams[k] = v } return nil } // Status returns a copy of the status currently loaded in memory. func (rw *remoteUnit) Status() *StatusFileData { status := rw.UnredactedStatus() ed, ok := status.ExtraData.(*RemoteExtraData) if ok { keysToDelete := make([]string, 0) for k := range ed.RemoteParams { if strings.HasPrefix(strings.ToLower(k), "secret_") { keysToDelete = append(keysToDelete, k) } } for i := range keysToDelete { delete(ed.RemoteParams, keysToDelete[i]) } } return status } // UnredactedStatus returns a copy of the status currently loaded in memory, including secrets. func (rw *remoteUnit) UnredactedStatus() *StatusFileData { rw.GetStatusLock().RLock() status := rw.GetStatusWithoutExtraData() ed, ok := rw.GetStatusCopy().ExtraData.(*RemoteExtraData) if ok { edCopy := *ed edCopy.RemoteParams = make(map[string]string) for k, v := range ed.RemoteParams { edCopy.RemoteParams[k] = v } status.ExtraData = &edCopy } rw.GetStatusLock().RUnlock() return status } // runAndMonitor waits for a connection to be available, then starts the remote unit and monitors it. func (rw *remoteUnit) runAndMonitor(mw *utils.JobContext, forRelease bool, action actionFunc) error { return rw.getConnectionAndRun(mw, true, func(ctx context.Context, conn net.Conn, reader *bufio.Reader) error { err := action(ctx, conn, reader) if err != nil { mw.WorkerDone() return err } go func() { rw.monitorRemoteUnit(ctx, forRelease) if forRelease { err := rw.BaseWorkUnitForWorkUnit.Release(false) if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("Error releasing unit %s: %s", rw.UnitDir(), err) } } mw.WorkerDone() }() return nil }, func() { mw.WorkerDone() }) } func (rw *remoteUnit) setExpiration(mw *utils.JobContext) { red := rw.Status().ExtraData.(*RemoteExtraData) dur := time.Until(red.Expiration) select { case <-mw.Done(): case <-time.After(dur): red := rw.Status().ExtraData.(*RemoteExtraData) if !red.RemoteStarted { rw.UpdateFullStatus(func(status *StatusFileData) { status.Detail = fmt.Sprintf("Work unit expired on %s", red.Expiration.Format("Mon Jan 2 15:04:05")) status.State = WorkStateFailed }) mw.Cancel() } } } // startOrRestart is a shared implementation of Start() and Restart(). func (rw *remoteUnit) startOrRestart(start bool) error { red := rw.Status().ExtraData.(*RemoteExtraData) if start && red.RemoteStarted { return fmt.Errorf("unit was already started") } newJobStarted := rw.topJC.NewJob(rw.GetWorkceptor().ctx, 1, true) if !newJobStarted { return fmt.Errorf("start or monitor process already running") } if start || !red.RemoteStarted { if !red.Expiration.IsZero() { go rw.setExpiration(rw.topJC) } return rw.runAndMonitor(rw.topJC, false, rw.startRemoteUnit) } else if red.LocalReleased || red.LocalCancelled { return rw.runAndMonitor(rw.topJC, true, func(ctx context.Context, conn net.Conn, reader *bufio.Reader) error { return rw.cancelOrReleaseRemoteUnit(ctx, conn, reader, red.LocalReleased) }) } go func() { rw.monitorRemoteUnit(rw.topJC, false) rw.topJC.WorkerDone() }() return nil } // Start launches a job with given parameters. If the remote node is unreachable, returns ErrPending // and continues to retry in the background. func (rw *remoteUnit) Start() error { return rw.startOrRestart(true) } // Restart resumes monitoring a job after a Receptor restart. func (rw *remoteUnit) Restart() error { red := rw.Status().ExtraData.(*RemoteExtraData) if red.RemoteStarted { return rw.startOrRestart(false) } return fmt.Errorf("remote work had not previously started") } // cancelOrRelease is a shared implementation of Cancel() and Release(). func (rw *remoteUnit) cancelOrRelease(release bool, force bool) error { // Update the status file that the unit is locally cancelled/released var remoteStarted bool rw.UpdateFullStatus(func(status *StatusFileData) { status.ExtraData.(*RemoteExtraData).LocalCancelled = true if release { status.ExtraData.(*RemoteExtraData).LocalReleased = true } remoteStarted = status.ExtraData.(*RemoteExtraData).RemoteStarted }) // if remote work has not started, don't attempt to connect to remote if !remoteStarted { rw.topJC.Cancel() rw.topJC.Wait() if release { return rw.BaseWorkUnitForWorkUnit.Release(true) } rw.UpdateBasicStatus(WorkStateFailed, "Locally Cancelled", 0) return nil } if release && force { err := rw.connectAndRun(rw.GetWorkceptor().ctx, func(ctx context.Context, conn net.Conn, reader *bufio.Reader) error { return rw.cancelOrReleaseRemoteUnit(ctx, conn, reader, true) }) if err != nil { rw.GetWorkceptor().nc.GetLogger().Error("Error with connect and run: %s", err) } return rw.BaseWorkUnitForWorkUnit.Release(true) } rw.topJC.NewJob(rw.GetWorkceptor().ctx, 1, false) return rw.runAndMonitor(rw.topJC, release, func(ctx context.Context, conn net.Conn, reader *bufio.Reader) error { return rw.cancelOrReleaseRemoteUnit(ctx, conn, reader, release) }) } // Cancel stops a running job. func (rw *remoteUnit) Cancel() error { return rw.cancelOrRelease(false, false) } // Release releases resources associated with a job. Implies Cancel. func (rw *remoteUnit) Release(force bool) error { return rw.cancelOrRelease(true, force) } func NewRemoteWorker(bwu BaseWorkUnitForWorkUnit, w *Workceptor, unitID, workType string) WorkUnit { return newRemoteWorker(bwu, w, unitID, workType) } func newRemoteWorker(bwu BaseWorkUnitForWorkUnit, w *Workceptor, unitID, workType string) WorkUnit { if bwu == nil { bwu = &BaseWorkUnit{} } rw := &remoteUnit{ BaseWorkUnitForWorkUnit: bwu, logger: w.nc.GetLogger(), } rw.BaseWorkUnitForWorkUnit.Init(w, unitID, workType, FileSystem{}, nil) red := &RemoteExtraData{} red.RemoteParams = make(map[string]string) rw.SetStatusExtraData(red) rw.topJC = &utils.JobContext{} return rw } receptor-1.5.3/pkg/workceptor/remote_work_test.go000066400000000000000000000033351475465740700222750ustar00rootroot00000000000000package workceptor_test import ( "context" "sync" "testing" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/pkg/workceptor/mock_workceptor" "go.uber.org/mock/gomock" ) func createRemoteWorkTestSetup(t *testing.T) (workceptor.WorkUnit, *mock_workceptor.MockBaseWorkUnitForWorkUnit, *mock_workceptor.MockNetceptorForWorkceptor, *workceptor.Workceptor) { ctrl := gomock.NewController(t) ctx := context.Background() mockBaseWorkUnit := mock_workceptor.NewMockBaseWorkUnitForWorkUnit(ctrl) mockNetceptor := mock_workceptor.NewMockNetceptorForWorkceptor(ctrl) mockNetceptor.EXPECT().NodeID().Return("NodeID") mockNetceptor.EXPECT().GetLogger() w, err := workceptor.New(ctx, mockNetceptor, "/tmp") if err != nil { t.Errorf("Error while creating Workceptor: %v", err) } mockBaseWorkUnit.EXPECT().Init(w, "", "", workceptor.FileSystem{}, nil) mockBaseWorkUnit.EXPECT().SetStatusExtraData(gomock.Any()) workUnit := workceptor.NewRemoteWorker(mockBaseWorkUnit, w, "", "") return workUnit, mockBaseWorkUnit, mockNetceptor, w } func TestRemoteWorkUnredactedStatus(t *testing.T) { t.Parallel() wu, mockBaseWorkUnit, _, _ := createRemoteWorkTestSetup(t) restartTestCases := []struct { name string }{ {name: "test1"}, {name: "test2"}, } statusLock := &sync.RWMutex{} for _, testCase := range restartTestCases { t.Run(testCase.name, func(t *testing.T) { t.Parallel() mockBaseWorkUnit.EXPECT().GetStatusLock().Return(statusLock).Times(2) mockBaseWorkUnit.EXPECT().GetStatusWithoutExtraData().Return(&workceptor.StatusFileData{}) mockBaseWorkUnit.EXPECT().GetStatusCopy().Return(workceptor.StatusFileData{ ExtraData: &workceptor.RemoteExtraData{}, }) wu.UnredactedStatus() }) } } receptor-1.5.3/pkg/workceptor/stdio_utils.go000066400000000000000000000110601475465740700212350ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "errors" "io" "os" "path" "strconv" "strings" "sync" ) // FileSystemer represents a filesystem. type FileSystemer interface { OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) Stat(name string) (os.FileInfo, error) Open(name string) (*os.File, error) RemoveAll(path string) error } // FileSystem represents the real filesystem. type FileSystem struct{} // OpenFile opens a file on the filesystem. func (FileSystem) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) { return os.OpenFile(name, flag, perm) } // Stat retrieves the FileInfo for a given file name. func (FileSystem) Stat(name string) (os.FileInfo, error) { return os.Stat(name) } // Open opens a file. func (FileSystem) Open(name string) (*os.File, error) { return os.Open(name) } // RemoveAll removes path and any children it contains. func (FileSystem) RemoveAll(path string) error { return os.RemoveAll(path) } // FileWriteCloser wraps io.WriteCloser. type FileWriteCloser interface { io.WriteCloser } // FileReadCloser wraps io.ReadCloser. type FileReadCloser interface { io.ReadCloser } // saveStdoutSize only the stdout size in the status metadata file in the unitdir. func saveStdoutSize(unitdir string, stdoutSize int64) error { statusFilename := path.Join(unitdir, "status") si := &StatusFileData{} return si.UpdateFullStatus(statusFilename, func(status *StatusFileData) { status.StdoutSize = stdoutSize }) } // STDoutWriter writes to a stdout file while also updating the status file. type STDoutWriter struct { unitdir string writer FileWriteCloser bytesWritten int64 } // NewStdoutWriter allocates a new stdoutWriter, which writes to both the stdout and status files. func NewStdoutWriter(fs FileSystemer, unitdir string) (*STDoutWriter, error) { writer, err := fs.OpenFile(path.Join(unitdir, "stdout"), os.O_CREATE+os.O_WRONLY+os.O_SYNC, 0o600) if err != nil { return nil, err } return &STDoutWriter{ unitdir: unitdir, writer: writer, bytesWritten: 0, }, nil } // Write writes data to the stdout file and status file, implementing io.Writer. func (sw *STDoutWriter) Write(p []byte) (n int, err error) { wn, werr := sw.writer.Write(p) var serr error if wn > 0 { sw.bytesWritten += int64(wn) serr = saveStdoutSize(sw.unitdir, sw.bytesWritten) } if werr != nil { return wn, werr } return wn, serr } // Size returns the current size of the stdout file. func (sw *STDoutWriter) Size() int64 { return sw.bytesWritten } // SetWriter sets the writer var. func (sw *STDoutWriter) SetWriter(writer FileWriteCloser) { sw.writer = writer } // STDinReader reads from a stdin file and provides a Done function. type STDinReader struct { reader FileReadCloser workUnit string lasterr error doneChan chan struct{} doneOnce sync.Once } var errFileSizeZero = errors.New("file is empty") // NewStdinReader allocates a new stdinReader, which reads from a stdin file and provides a Done function. func NewStdinReader(fs FileSystemer, unitdir string) (*STDinReader, error) { splitUnitDir := strings.Split(unitdir, "/") workUnitID := splitUnitDir[len(splitUnitDir)-1] stdinpath := path.Join(unitdir, "stdin") stat, err := fs.Stat(stdinpath) if err != nil { return nil, err } if stat.Size() == 0 { return nil, errFileSizeZero } reader, err := fs.Open(stdinpath) if err != nil { return nil, err } return &STDinReader{ reader: reader, workUnit: workUnitID, lasterr: nil, doneChan: make(chan struct{}), doneOnce: sync.Once{}, }, nil } // Read reads data from the stdout file, implementing io.Reader. func (sr *STDinReader) Read(p []byte) (n int, err error) { payloadDebug, _ := strconv.Atoi(os.Getenv("RECEPTOR_PAYLOAD_TRACE_LEVEL")) if payloadDebug != 0 { isNotEmpty := func() bool { for _, v := range p { if v != 0 { return true } } return false }() if isNotEmpty { payload := string(p) MainInstance.nc.GetLogger().DebugPayload(payloadDebug, payload, sr.workUnit, "kube api") } } n, err = sr.reader.Read(p) if err != nil { sr.lasterr = err sr.doneOnce.Do(func() { close(sr.doneChan) }) } return } // Done returns a channel that will be closed on error (including EOF) in the reader. func (sr *STDinReader) Done() <-chan struct{} { return sr.doneChan } // Error returns the most recent error encountered in the reader. func (sr *STDinReader) Error() error { return sr.lasterr } // SetReader sets the reader var. func (sr *STDinReader) SetReader(reader FileReadCloser) { sr.reader = reader } receptor-1.5.3/pkg/workceptor/stdio_utils_test.go000066400000000000000000000156451475465740700223110ustar00rootroot00000000000000package workceptor_test import ( "errors" "os" "testing" "time" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/pkg/workceptor/mock_workceptor" "go.uber.org/mock/gomock" ) const errorMsgFmt = "Expected error: %s, got: %v" // checkErrorAndNum checks common return types against expected values. func checkErrorAndNum(err error, expectedErr string, num int, expectedNum int, t *testing.T) { if expectedErr == "" && err != nil { t.Errorf("Expected no error, got: %v", err) } else if expectedErr != "" && (err == nil || err.Error() != expectedErr) { t.Errorf(errorMsgFmt, expectedErr, err) } if num != expectedNum { t.Errorf("Expected num to be %d, got: %d", expectedNum, num) } } func setup(t *testing.T) (*gomock.Controller, *mock_workceptor.MockFileSystemer) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockfilesystemer := mock_workceptor.NewMockFileSystemer(ctrl) return ctrl, mockfilesystemer } func setupWriter(t *testing.T) (*gomock.Controller, *workceptor.STDoutWriter) { ctrl, mockfilesystemer := setup(t) mockfilesystemer.EXPECT().OpenFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(&os.File{}, nil) wc, err := workceptor.NewStdoutWriter(mockfilesystemer, "") if err != nil { t.Errorf("Error while creating std writer: %v", err) } return ctrl, wc } func setupReader(t *testing.T) (*gomock.Controller, *workceptor.STDinReader) { ctrl, mockfilesystemer := setup(t) statObj := NewInfo("test", 1, 0, time.Now()) mockfilesystemer.EXPECT().Stat(gomock.Any()).Return(statObj, nil) mockfilesystemer.EXPECT().Open(gomock.Any()).Return(&os.File{}, nil) wc, err := workceptor.NewStdinReader(mockfilesystemer, "") if err != nil { t.Errorf(stdinError) //nolint:staticcheck } return ctrl, wc } func TestWrite(t *testing.T) { ctrl, wc := setupWriter(t) mockfilewc := mock_workceptor.NewMockFileWriteCloser(ctrl) wc.SetWriter(mockfilewc) writeTestCases := []struct { name string returnNum int returnErr error expectedNum int expectedErr string }{ {"Write OK", 0, nil, 0, ""}, {"Write OK, correct num returned", 1, nil, 1, ""}, {"Write Error", 0, errors.New("Write error"), 0, "Write error"}, } for _, testCase := range writeTestCases { t.Run(testCase.name, func(t *testing.T) { mockfilewc.EXPECT().Write(gomock.Any()).Return(testCase.returnNum, testCase.returnErr) num, err := wc.Write([]byte(gomock.Any().String())) checkErrorAndNum(err, testCase.expectedErr, num, testCase.expectedNum, t) }) } } func TestWriteSize(t *testing.T) { _, wc := setupWriter(t) sizeTestCases := []struct { name string expectedSize int64 }{ {name: "Size return OK", expectedSize: 0}, } for _, testCase := range sizeTestCases { t.Run(testCase.name, func(t *testing.T) { num := wc.Size() if num != testCase.expectedSize { t.Errorf("Expected size to be %d, got: %d", testCase.expectedSize, num) } }) } } type Info struct { name string size int64 mode os.FileMode modTime time.Time } func NewInfo(name string, size int64, mode os.FileMode, modTime time.Time) *Info { return &Info{ name: name, size: size, mode: mode, modTime: modTime, } } func (i *Info) Name() string { return i.name } func (i *Info) Size() int64 { return i.size } func (i *Info) IsDir() bool { return i.mode.IsDir() } func (i *Info) Mode() os.FileMode { return i.mode } func (i *Info) ModTime() time.Time { return i.modTime } func (i *Info) Sys() interface{} { return nil } const stdinError = "Error creating stdinReader" func TestRead(t *testing.T) { ctrl, wc := setupReader(t) mockReadClose := mock_workceptor.NewMockFileReadCloser(ctrl) wc.SetReader(mockReadClose) readTestCases := []struct { name string returnNum int returnErr error expectedNum int expectedErr string }{ {"Read ok", 1, nil, 1, ""}, {"Read Error", 1, errors.New("Read error"), 1, "Read error"}, } for _, testCase := range readTestCases { t.Run(testCase.name, func(t *testing.T) { mockReadClose.EXPECT().Read(gomock.Any()).Return(testCase.returnNum, testCase.returnErr) num, err := wc.Read([]byte(gomock.Any().String())) checkErrorAndNum(err, testCase.expectedErr, num, testCase.expectedNum, t) }) } } func TestDone(t *testing.T) { _, wc := setupReader(t) channel := wc.Done() if channel == nil { t.Errorf("Done chan is set to nil") } } func TestError(t *testing.T) { _, wc := setupReader(t) err := wc.Error() if err != nil { t.Errorf("Unexpected error returned from stdreader") } } func TestNewStdoutWriter(t *testing.T) { _, mockfilesystemer := setup(t) newWriterTestCases := []struct { name string returnErr error expectedErr string }{ {"Create Writer OK", nil, ""}, {"Create Writer Error", errors.New("Create Write error"), "Create Write error"}, } for _, testCase := range newWriterTestCases { t.Run(testCase.name, func(t *testing.T) { mockfilesystemer.EXPECT().OpenFile(gomock.Any(), gomock.Any(), gomock.Any()).Return(&os.File{}, testCase.returnErr) _, err := workceptor.NewStdoutWriter(mockfilesystemer, "") checkErrorAndNum(err, testCase.expectedErr, 0, 0, t) }) } } func checkErrorsReader(err error, expectedStatErr string, expectedOpenErr string, expectedStatSize int, t *testing.T) { switch { case expectedStatErr == "" && expectedOpenErr == "" && expectedStatSize > 0 && err != nil: t.Errorf("Expected no error, got: %v", err) case expectedStatErr != "" && (err == nil || err.Error() != expectedStatErr): t.Errorf(errorMsgFmt, expectedStatErr, err) case expectedOpenErr != "" && (err == nil || err.Error() != expectedOpenErr): t.Errorf(errorMsgFmt, expectedOpenErr, err) case expectedStatSize < 1 && (err == nil || err.Error() != "file is empty"): t.Errorf(errorMsgFmt, "file is empty", err) } } func TestNewStdinReader(t *testing.T) { _, mockfilesystemer := setup(t) readTestCases := []struct { name string returnStatSize int returnStatErr error expectedStatSize int expectedStatErr string returnOpenErr error expectedOpenErr string mockOpen bool }{ {"Create Read ok", 1, nil, 1, "", nil, "", true}, {"Create Read Stat Error", 1, errors.New("Create Read Stat error"), 1, "Create Read Stat error", nil, "", false}, {"Create Read Size Error", 0, nil, 0, "", nil, "", false}, {"Create Read Open Error", 1, nil, 1, "", errors.New("Create Read Open error"), "Create Read Open error", true}, } for _, testCase := range readTestCases { t.Run(testCase.name, func(t *testing.T) { statObj := NewInfo("test", int64(testCase.returnStatSize), 0, time.Now()) mockfilesystemer.EXPECT().Stat(gomock.Any()).Return(statObj, testCase.returnStatErr) if testCase.mockOpen { mockfilesystemer.EXPECT().Open(gomock.Any()).Return(&os.File{}, testCase.returnOpenErr) } _, err := workceptor.NewStdinReader(mockfilesystemer, "") checkErrorsReader(err, testCase.expectedStatErr, testCase.expectedOpenErr, testCase.expectedStatSize, t) }) } } receptor-1.5.3/pkg/workceptor/workceptor.go000066400000000000000000000405751475465740700211070ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "context" "crypto/tls" "fmt" "io" "io/fs" "net" "os" "path" "reflect" "regexp" "strings" "sync" "time" "github.com/ansible/receptor/pkg/certificates" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/randstr" "github.com/ansible/receptor/pkg/utils" "github.com/golang-jwt/jwt/v4" ) // NetceptorForWorkceptor is a interface to decouple workceptor from netceptor. // it includes only the functions that workceptor uses. type NetceptorForWorkceptor interface { NodeID() string AddWorkCommand(typeName string, verifySignature bool) error GetClientTLSConfig(name string, expectedHostName string, expectedHostNameType netceptor.ExpectedHostnameType) (*tls.Config, error) // have a common pkg for types GetLogger() *logger.ReceptorLogger DialContext(ctx context.Context, node string, service string, tlscfg *tls.Config) (*netceptor.Conn, error) // create an interface for Conn } type ServerForWorkceptor interface { AddControlFunc(name string, cType controlsvc.ControlCommandType) error ConnectionListener(ctx context.Context, listener net.Listener) RunControlSession(conn net.Conn) RunControlSvc(ctx context.Context, service string, tlscfg *tls.Config, unixSocket string, unixSocketPermissions fs.FileMode, tcpListen string, tcptls *tls.Config) error SetServerNet(n controlsvc.Neter) SetServerTLS(t controlsvc.Tlser) SetServerUtils(u controlsvc.Utiler) SetupConnection(conn net.Conn) } // Workceptor is the main object that handles unit-of-work management. type Workceptor struct { ctx context.Context Cancel context.CancelFunc nc NetceptorForWorkceptor dataDir string workTypesLock *sync.RWMutex workTypes map[string]*workType activeUnitsLock *sync.RWMutex activeUnits map[string]WorkUnit SigningKey string SigningExpiration time.Duration VerifyingKey string } // workType is the record for a registered type of work. type workType struct { newWorkerFunc NewWorkerFunc verifySignature bool } // New constructs a new Workceptor instance. func New(ctx context.Context, nc NetceptorForWorkceptor, dataDir string) (*Workceptor, error) { if dataDir == "" { dataDir = path.Join(os.TempDir(), "receptor") } dataDir = path.Join(dataDir, nc.NodeID()) c, cancel := context.WithCancel(ctx) w := &Workceptor{ ctx: c, Cancel: cancel, nc: nc, dataDir: dataDir, workTypesLock: &sync.RWMutex{}, workTypes: make(map[string]*workType), activeUnitsLock: &sync.RWMutex{}, activeUnits: make(map[string]WorkUnit), SigningKey: "", SigningExpiration: 5 * time.Minute, VerifyingKey: "", } err := w.RegisterWorker("remote", newRemoteWorker, false) if err != nil { return nil, fmt.Errorf("could not register remote worker function: %s", err) } return w, nil } // MainInstance is the global instance of Workceptor instantiated by the command-line main() function. var MainInstance *Workceptor // stdoutSize returns size of stdout, if it exists, or 0 otherwise. func stdoutSize(unitdir string) int64 { stat, err := os.Stat(path.Join(unitdir, "stdout")) if err != nil { return 0 } return stat.Size() } // RegisterWithControlService registers this workceptor instance with a control service instance. func (w *Workceptor) RegisterWithControlService(cs ServerForWorkceptor) error { err := cs.AddControlFunc("work", &workceptorCommandType{ w: w, }) if err != nil { return fmt.Errorf("could not add work control function: %s", err) } return nil } // RegisterWorker notifies the Workceptor of a new kind of work that can be done. func (w *Workceptor) RegisterWorker(typeName string, newWorkerFunc NewWorkerFunc, verifySignature bool) error { w.workTypesLock.Lock() _, ok := w.workTypes[typeName] if ok { w.workTypesLock.Unlock() return fmt.Errorf("work type %s already registered", typeName) } w.workTypes[typeName] = &workType{ newWorkerFunc: newWorkerFunc, verifySignature: verifySignature, } if typeName != "remote" { // all workceptors get a remote command by default w.nc.AddWorkCommand(typeName, verifySignature) } w.workTypesLock.Unlock() // Check if any unknown units have now become known w.activeUnitsLock.Lock() for id, worker := range w.activeUnits { _, ok := worker.(*unknownUnit) if ok && worker.Status().WorkType == typeName { delete(w.activeUnits, id) } } w.activeUnitsLock.Unlock() w.scanForUnits() return nil } func (w *Workceptor) generateUnitID(lock bool, workUnitID string) (string, error) { if lock { w.activeUnitsLock.RLock() defer w.activeUnitsLock.RUnlock() } var ident string for { if workUnitID == "" { rstr := randstr.RandomString(8) nid := regexp.MustCompile(`[^a-zA-Z0-9 ]+`).ReplaceAllString(w.nc.NodeID(), "") ident = fmt.Sprintf("%s%s", nid, rstr) } else { ident = workUnitID unitdir := path.Join(w.dataDir, ident) _, err := os.Stat(unitdir) if err == nil { return "", fmt.Errorf("workunit ID %s is already in use, cannot use the same workunit ID more than once", ident) } } _, ok := w.activeUnits[ident] if !ok { unitdir := path.Join(w.dataDir, ident) _, err := os.Stat(unitdir) if err == nil { continue } return ident, os.MkdirAll(unitdir, 0o700) } } } func (w *Workceptor) createSignature(nodeID string) (string, error) { if w.SigningKey == "" { return "", fmt.Errorf("cannot sign work: signing key is empty") } exp := time.Now().Add(w.SigningExpiration) claims := &jwt.RegisteredClaims{ ExpiresAt: jwt.NewNumericDate(exp), Audience: []string{nodeID}, } rsaPrivateKey, err := certificates.LoadPrivateKey(w.SigningKey, &certificates.OsWrapper{}) if err != nil { return "", fmt.Errorf("could not load signing key file: %s", err.Error()) } token := jwt.NewWithClaims(jwt.SigningMethodRS512, claims) tokenString, err := token.SignedString(rsaPrivateKey) if err != nil { return "", err } return tokenString, nil } func (w *Workceptor) ShouldVerifySignature(workType string, signWork bool) bool { // if work unit is remote, just get the signWork boolean from the // remote extra data field if workType == "remote" { return signWork } w.workTypesLock.RLock() wt, ok := w.workTypes[workType] w.workTypesLock.RUnlock() if ok && wt.verifySignature { return true } return false } func (w *Workceptor) VerifySignature(signature string) error { if signature == "" { return fmt.Errorf("could not verify signature: signature is empty") } if w.VerifyingKey == "" { return fmt.Errorf("could not verify signature: verifying key not specified") } rsaPublicKey, err := certificates.LoadPublicKey(w.VerifyingKey, &certificates.OsWrapper{}) if err != nil { return fmt.Errorf("could not load verifying key file: %s", err.Error()) } token, err := jwt.ParseWithClaims(signature, &jwt.RegisteredClaims{}, func(token *jwt.Token) (interface{}, error) { return rsaPublicKey, nil }) if err != nil { return fmt.Errorf("could not verify signature: %s", err.Error()) } if !token.Valid { return fmt.Errorf("token not valid") } claims := token.Claims.(*jwt.RegisteredClaims) ok := claims.VerifyAudience(w.nc.NodeID(), true) if !ok { return fmt.Errorf("token audience did not match node ID") } return nil } // AllocateUnit creates a new local work unit and generates an identifier for it. func (w *Workceptor) AllocateUnit(workTypeName string, workUnitID string, params map[string]string) (WorkUnit, error) { w.workTypesLock.RLock() wt, ok := w.workTypes[workTypeName] w.workTypesLock.RUnlock() if !ok { return nil, fmt.Errorf("unknown work type %s", workTypeName) } w.activeUnitsLock.Lock() defer w.activeUnitsLock.Unlock() ident, err := w.generateUnitID(false, workUnitID) if err != nil { return nil, err } worker := wt.newWorkerFunc(nil, w, ident, workTypeName) err = worker.SetFromParams(params) if err == nil { err = worker.Save() } if err != nil { return nil, err } w.activeUnits[ident] = worker return worker, nil } // AllocateRemoteUnit creates a new remote work unit and generates a local identifier for it. func (w *Workceptor) AllocateRemoteUnit(remoteNode, remoteWorkType, workUnitID string, tlsClient, ttl string, signWork bool, params map[string]string) (WorkUnit, error) { if tlsClient != "" { _, err := w.nc.GetClientTLSConfig(tlsClient, "testhost", netceptor.ExpectedHostnameTypeReceptor) if err != nil { return nil, err } } hasSecrets := false for k := range params { if strings.HasPrefix(strings.ToLower(k), "secret_") { hasSecrets = true break } } if hasSecrets && tlsClient == "" { return nil, fmt.Errorf("cannot send secrets over a non-TLS connection") } rw, err := w.AllocateUnit("remote", workUnitID, params) if err != nil { return nil, err } var expiration time.Time if ttl != "" { duration, err := time.ParseDuration(ttl) if err != nil { w.nc.GetLogger().Error("Failed to parse provided ttl -- valid examples include '1.5h', '30m', '30m10s'") return nil, err } if signWork && duration > w.SigningExpiration { w.nc.GetLogger().Warning("json web token expires before ttl") } expiration = time.Now().Add(duration) } else { expiration = time.Time{} } rw.UpdateFullStatus(func(status *StatusFileData) { ed := status.ExtraData.(*RemoteExtraData) ed.RemoteNode = remoteNode ed.RemoteWorkType = remoteWorkType ed.TLSClient = tlsClient ed.Expiration = expiration ed.SignWork = signWork }) if rw.LastUpdateError() != nil { return nil, rw.LastUpdateError() } return rw, nil } func (w *Workceptor) scanForUnit(unitID string) { unitdir := path.Join(w.dataDir, unitID) fi, _ := os.Stat(unitdir) if fi == nil || !fi.IsDir() { w.nc.GetLogger().Error("Error locating unit: %s", unitID) return } ident := fi.Name() w.activeUnitsLock.RLock() _, ok := w.activeUnits[ident] //nolint:ifshort w.activeUnitsLock.RUnlock() if !ok { statusFilename := path.Join(unitdir, "status") sfd := &StatusFileData{} serr := sfd.Load(statusFilename) if serr != nil { w.nc.GetLogger().Error("Error loading %s: %s", statusFilename, serr) } w.workTypesLock.RLock() wt, ok := w.workTypes[sfd.WorkType] w.workTypesLock.RUnlock() var worker WorkUnit if ok { worker = wt.newWorkerFunc(nil, w, ident, sfd.WorkType) } else { worker = newUnknownWorker(w, ident, sfd.WorkType) } if _, err := os.Stat(statusFilename); os.IsNotExist(err) { w.nc.GetLogger().Error("Status file has disappeared for %s.", ident) return } err := worker.Load() if err != nil { w.nc.GetLogger().Warning("Failed to restart worker %s due to read error: %s", unitdir, err) worker.UpdateBasicStatus(WorkStateFailed, fmt.Sprintf("Failed to restart: %s", err), stdoutSize(unitdir)) } err = worker.Restart() if err != nil && !IsPending(err) { w.nc.GetLogger().Warning("Failed to restart worker %s: %s", unitdir, err) worker.UpdateBasicStatus(WorkStateFailed, fmt.Sprintf("Failed to restart: %s", err), stdoutSize(unitdir)) } w.activeUnitsLock.Lock() defer w.activeUnitsLock.Unlock() w.activeUnits[ident] = worker } } func (w *Workceptor) scanForUnits() { files, err := os.ReadDir(w.dataDir) if err != nil { return } for i := range files { fi := files[i] w.scanForUnit(fi.Name()) } } func (w *Workceptor) findUnit(unitID string) (WorkUnit, error) { w.activeUnitsLock.RLock() defer w.activeUnitsLock.RUnlock() unit, ok := w.activeUnits[unitID] if ok { return unit, nil } // if not in active units, rescan work unit dir and recheck w.scanForUnit(unitID) unit, ok = w.activeUnits[unitID] if !ok { return nil, fmt.Errorf("unknown work unit %s", unitID) } return unit, nil } // StartUnit starts a unit of work. func (w *Workceptor) StartUnit(unitID string) error { unit, err := w.findUnit(unitID) if err != nil { return err } return unit.Start() } // ListKnownUnitIDs returns a slice containing the known unit IDs. func (w *Workceptor) ListKnownUnitIDs() []string { w.activeUnitsLock.RLock() defer w.activeUnitsLock.RUnlock() result := make([]string, 0, len(w.activeUnits)) for id := range w.activeUnits { result = append(result, id) } return result } // UnitStatus returns the state of a unit. func (w *Workceptor) UnitStatus(unitID string) (*StatusFileData, error) { unit, err := w.findUnit(unitID) if err != nil { return nil, err } return unit.Status(), nil } // CancelUnit cancels a unit of work, killing any processes. func (w *Workceptor) CancelUnit(unitID string) error { unit, err := w.findUnit(unitID) if err != nil { return err } return unit.Cancel() } // ReleaseUnit releases (deletes) resources from a unit of work, including stdout. Release implies Cancel. func (w *Workceptor) ReleaseUnit(unitID string, force bool) error { unit, err := w.findUnit(unitID) if err != nil { return err } return unit.Release(force) } // unitStatusForCFR returns status information as a map, suitable for a control function return value. func (w *Workceptor) unitStatusForCFR(unitID string) (map[string]interface{}, error) { status, err := w.UnitStatus(unitID) if err != nil { return nil, err } retMap := make(map[string]interface{}) v := reflect.ValueOf(*status) t := reflect.TypeOf(*status) for i := 0; i < v.NumField(); i++ { retMap[t.Field(i).Name] = v.Field(i).Interface() } retMap["StateName"] = WorkStateToString(status.State) return retMap, nil } // sleepOrDone sleeps until a timeout or the done channel is signaled. func sleepOrDone(doneChan <-chan struct{}, interval time.Duration) bool { select { case <-doneChan: return true case <-time.After(interval): return false } } // GetResults returns a live stream of the results of a unit. func (w *Workceptor) GetResults(ctx context.Context, unitID string, startPos int64) (chan []byte, error) { unit, err := w.findUnit(unitID) if err != nil { return nil, err } resultChan := make(chan []byte) closeOnce := sync.Once{} resultClose := func() { closeOnce.Do(func() { close(resultChan) }) } unitdir := path.Join(w.dataDir, unitID) stdoutFilename := path.Join(unitdir, "stdout") var stdout *os.File ctxChild, cancel := context.WithCancel(ctx) go func() { defer func() { err = stdout.Close() if err != nil { w.nc.GetLogger().Error("Error closing stdout %s", stdoutFilename) } resultClose() cancel() }() // Wait for stdout file to exist for { stdout, err = os.Open(stdoutFilename) switch { case err == nil: case os.IsNotExist(err): if IsComplete(unit.Status().State) { w.nc.GetLogger().Warning("Unit completed without producing any stdout\n") return } if sleepOrDone(ctx.Done(), 500*time.Millisecond) { return } continue default: w.nc.GetLogger().Error("Error accessing stdout file: %s\n", err) return } break } filePos := startPos statChan := make(chan struct{}, 1) go func() { failures := 0 for { select { case <-ctxChild.Done(): return case <-time.After(1 * time.Second): _, err := os.Stat(stdoutFilename) if os.IsNotExist(err) { failures++ if failures > 3 { w.nc.GetLogger().Error("Exceeded retries for reading stdout %s", stdoutFilename) statChan <- struct{}{} return } } else { failures = 0 } } } }() for { if sleepOrDone(ctx.Done(), 250*time.Millisecond) { return } for { select { case <-ctx.Done(): return case <-statChan: return default: var newPos int64 newPos, err = stdout.Seek(filePos, 0) if err != nil { w.nc.GetLogger().Warning("Seek error processing stdout: %s\n", err) return } if newPos != filePos { w.nc.GetLogger().Warning("Seek error processing stdout\n") return } var n int buf := make([]byte, utils.NormalBufferSize) n, err = stdout.Read(buf) if n > 0 { filePos += int64(n) select { case <-ctx.Done(): return case resultChan <- buf[:n]: } } } if err != nil { break } } if err == io.EOF { unitStatus := unit.Status() if IsComplete(unitStatus.State) && filePos >= unitStatus.StdoutSize { w.nc.GetLogger().Debug("Stdout complete - closing channel for: %s \n", unitID) return } } else if err != nil { w.nc.GetLogger().Error("Error reading stdout: %s\n", err) return } } }() return resultChan, nil } receptor-1.5.3/pkg/workceptor/workceptor_stub.go000066400000000000000000000051251475465740700221340ustar00rootroot00000000000000//go:build no_workceptor // +build no_workceptor package workceptor // Stub file to satisfy dependencies when Workceptor is not compiled in import ( "context" "fmt" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/netceptor" ) // ErrNotImplemented is returned from functions that are stubbed out var ErrNotImplemented = fmt.Errorf("not implemented") // Workceptor is the main object that handles unit-of-work management type Workceptor struct{} // New constructs a new Workceptor instance func New(ctx context.Context, nc *netceptor.Netceptor, dataDir string) (*Workceptor, error) { return &Workceptor{}, nil } // MainInstance is the global instance of Workceptor instantiated by the command-line main() function var MainInstance *Workceptor // RegisterWithControlService registers this workceptor instance with a control service instance func (w *Workceptor) RegisterWithControlService(cs *controlsvc.Server) error { return nil } // RegisterWorker notifies the Workceptor of a new kind of work that can be done func (w *Workceptor) RegisterWorker(typeName string, newWorkerFunc NewWorkerFunc, verifySignature bool) error { return ErrNotImplemented } // AllocateUnit creates a new local work unit and generates an identifier for it func (w *Workceptor) AllocateUnit(workTypeName, params string) (WorkUnit, error) { return nil, ErrNotImplemented } // AllocateRemoteUnit creates a new remote work unit and generates a local identifier for it func (w *Workceptor) AllocateRemoteUnit(remoteNode string, remoteWorkType string, tlsclient string, ttl string, signWork bool, params string) (WorkUnit, error) { return nil, ErrNotImplemented } // StartUnit starts a unit of work func (w *Workceptor) StartUnit(unitID string) error { return ErrNotImplemented } // ListKnownUnitIDs returns a slice containing the known unit IDs func (w *Workceptor) ListKnownUnitIDs() []string { return []string{} } // UnitStatus returns the state of a unit func (w *Workceptor) UnitStatus(unitID string) (*StatusFileData, error) { return nil, ErrNotImplemented } // CancelUnit cancels a unit of work, killing any processes func (w *Workceptor) CancelUnit(unitID string) error { return ErrNotImplemented } // ReleaseUnit releases (deletes) resources from a unit of work, including stdout. Release implies Cancel. func (w *Workceptor) ReleaseUnit(unitID string, force bool) error { return ErrNotImplemented } // GetResults returns a live stream of the results of a unit func (w *Workceptor) GetResults(unitID string, startPos int64, doneChan chan struct{}) (chan []byte, error) { return nil, ErrNotImplemented } receptor-1.5.3/pkg/workceptor/workceptor_test.go000066400000000000000000000253011475465740700221340ustar00rootroot00000000000000package workceptor_test import ( "context" "crypto/tls" "errors" "fmt" "os" "testing" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/pkg/workceptor/mock_workceptor" "github.com/ansible/receptor/tests/utils" "go.uber.org/mock/gomock" ) func testSetup(t *testing.T) (*gomock.Controller, *mock_workceptor.MockNetceptorForWorkceptor, *workceptor.Workceptor) { ctrl := gomock.NewController(t) defer ctrl.Finish() ctx := context.Background() mockNetceptor := mock_workceptor.NewMockNetceptorForWorkceptor(ctrl) mockNetceptor.EXPECT().NodeID().Return("test").AnyTimes() logger := logger.NewReceptorLogger("") mockNetceptor.EXPECT().GetLogger().AnyTimes().Return(logger) w, err := workceptor.New(ctx, mockNetceptor, "/tmp") if err != nil { t.Errorf("Error while creating Workceptor: %v", err) } return ctrl, mockNetceptor, w } func TestAllocateUnit(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockWorkUnit := mock_workceptor.NewMockWorkUnit(ctrl) ctx := context.Background() mockNetceptor := mock_workceptor.NewMockNetceptorForWorkceptor(ctrl) logger := logger.NewReceptorLogger("") mockNetceptor.EXPECT().GetLogger().AnyTimes().Return(logger) workFunc := func(bwu workceptor.BaseWorkUnitForWorkUnit, w *workceptor.Workceptor, unitID string, workType string) workceptor.WorkUnit { return mockWorkUnit } mockNetceptor.EXPECT().NodeID().Return("test").Times(4) w, err := workceptor.New(ctx, mockNetceptor, "/tmp") if err != nil { t.Errorf("Error while creating Workceptor: %v", err) } const testType = "testType" mockNetceptor.EXPECT().AddWorkCommand(gomock.Any(), gomock.Any()).Return(nil) w.RegisterWorker(testType, workFunc, false) const paramError = "SetFromParams error" const saveError = "Save error" testCases := []struct { name string workType string setFromParamsError error saveError error mockSetParam bool mockSave bool expectedError string }{ { name: "normal case", workType: testType, setFromParamsError: nil, saveError: nil, mockSetParam: true, mockSave: true, expectedError: "", }, { name: "work type doesn't exist", workType: "nonexistentType", setFromParamsError: nil, saveError: nil, mockSetParam: false, mockSave: false, expectedError: fmt.Sprintf("unknown work type %s", "nonexistentType"), }, { name: paramError, workType: testType, setFromParamsError: errors.New(paramError), saveError: nil, mockSetParam: true, mockSave: false, expectedError: paramError, }, { name: saveError, workType: testType, setFromParamsError: nil, saveError: errors.New(saveError), mockSetParam: true, mockSave: true, expectedError: saveError, }, } checkError := func(err error, expectedError string, t *testing.T) { if expectedError == "" && err != nil { t.Errorf("Expected no error, got: %v", err) } else if expectedError != "" && (err == nil || err.Error() != expectedError) { t.Errorf("Expected error: %s, got: %v", expectedError, err) } } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { if tc.mockSetParam { mockWorkUnit.EXPECT().SetFromParams(gomock.Any()).Return(tc.setFromParamsError).Times(1) } if tc.mockSave { mockWorkUnit.EXPECT().Save().Return(tc.saveError).Times(1) } _, err := w.AllocateUnit(tc.workType, "", map[string]string{"param": "value"}) checkError(err, tc.expectedError, t) }) } } func TestRegisterWithControlService(t *testing.T) { ctrl, _, w := testSetup(t) mockServer := mock_workceptor.NewMockServerForWorkceptor(ctrl) testCases := []struct { name string hasError bool expectedCalls func() }{ { name: "normal case 1", hasError: false, expectedCalls: func() { mockServer.EXPECT().AddControlFunc(gomock.Any(), gomock.Any()).Return(nil) }, }, { name: "error registering work", hasError: true, expectedCalls: func() { mockServer.EXPECT().AddControlFunc(gomock.Any(), gomock.Any()).Return(errors.New("terminated")) }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tc.expectedCalls() err := w.RegisterWithControlService(mockServer) if tc.hasError && err.Error() != "could not add work control function: terminated" { t.Error(err) } if !tc.hasError && err != nil { t.Error(err) } }) } } func TestRegisterWorker(t *testing.T) { _, mockNetceptor, w := testSetup(t) testCases := []struct { name string typeName string hasError bool errorMsg string expectedCalls func() }{ { name: "already registered", typeName: "remote", hasError: true, errorMsg: "work type remote already registered", expectedCalls: func() { // For testing purposes }, }, { name: "normal with active unit", typeName: "test", hasError: false, expectedCalls: func() { mockNetceptor.EXPECT().AddWorkCommand(gomock.Any(), gomock.Any()) w.AllocateUnit("remote", "", map[string]string{}) }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tc.expectedCalls() err := w.RegisterWorker(tc.typeName, workceptor.NewRemoteWorker, false) if tc.hasError && err.Error() != tc.errorMsg { t.Error(err) } if !tc.hasError && err != nil { t.Error(err) } }) } } func TestShouldVerifySignature(t *testing.T) { _, _, w := testSetup(t) testCases := []struct { name string workType string }{ { name: "return with remote true", workType: "remote", }, { name: "return with false", workType: "", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { shouldVerifySignature := w.ShouldVerifySignature(tc.workType, true) t.Log(shouldVerifySignature) if tc.workType == "remote" && !shouldVerifySignature { t.Errorf("expected: true, received: %t", shouldVerifySignature) } if tc.workType == "" && shouldVerifySignature { t.Errorf("expected: false, received: %t", shouldVerifySignature) } }) } } func TestVerifySignature(t *testing.T) { _, _, w := testSetup(t) _, public, err := utils.GenerateRSAPair() if err != nil { t.Error(err) } testCases := []struct { name string signature string verifyingKey string errorMsg string }{ { name: "signature is empty error", signature: "", errorMsg: "could not verify signature: signature is empty", }, { name: "verifying key not specified error", signature: "sig", verifyingKey: "", errorMsg: "could not verify signature: verifying key not specified", }, { name: "no such key file error", signature: "sig", verifyingKey: "/tmp/nowhere.pub", errorMsg: "could not load verifying key file: open /tmp/nowhere.pub: no such file or directory", }, { name: "token invalid number of segments error", signature: "sig", verifyingKey: public, errorMsg: "could not verify signature: token contains an invalid number of segments", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { w.VerifyingKey = tc.verifyingKey err := w.VerifySignature(tc.signature) if tc.errorMsg != err.Error() { t.Errorf("expected: %s, received: %s", tc.errorMsg, err) } }) } } func TestAllocateRemoteUnit(t *testing.T) { _, mockNetceptor, w := testSetup(t) testCases := []struct { name string workUnitID string tlsClient string ttl string signWork bool params map[string]string errorMsg string expectedCalls func() }{ { name: "get client tls config error", workUnitID: "", tlsClient: "something", errorMsg: "terminated", expectedCalls: func() { mockNetceptor.EXPECT().GetClientTLSConfig(gomock.Any(), gomock.Any(), gomock.Any()).Return(&tls.Config{}, errors.New("terminated")) }, }, { name: "sending secrets over non tls connection error", workUnitID: "", tlsClient: "", params: map[string]string{"secret_": "secret"}, errorMsg: "cannot send secrets over a non-TLS connection", expectedCalls: func() { // For testing purposes }, }, { name: "invalid duration error", workUnitID: "", tlsClient: "", ttl: "ttl", errorMsg: "time: invalid duration \"ttl\"", expectedCalls: func() { // For testing purposes }, }, { name: "normal case", workUnitID: "", tlsClient: "", ttl: "1.5h", errorMsg: "", signWork: true, expectedCalls: func() { // For testing purposes }, }, { name: "pass workUnitID", workUnitID: "testID12345678", tlsClient: "", ttl: "1.5h", errorMsg: "", signWork: true, expectedCalls: func() { // For testing purposes }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tc.expectedCalls() wu, err := w.AllocateRemoteUnit("", "", tc.workUnitID, tc.tlsClient, tc.ttl, tc.signWork, tc.params) if tc.errorMsg != "" && tc.errorMsg != err.Error() && err != nil { t.Errorf("expected: %s, received: %s", tc.errorMsg, err) } if tc.errorMsg == "" && err != nil { t.Error(err) } if tc.workUnitID != "" { wuID := wu.ID() if tc.workUnitID != wuID { t.Errorf("expected workUnitID to equal %s but got %s", tc.workUnitID, wuID) } } }) t.Cleanup(func() { if tc.workUnitID != "" { err := os.RemoveAll(fmt.Sprintf("/tmp/test/%s", tc.workUnitID)) if err != nil { t.Errorf("removal of test directory /tmp/test/%s failed", tc.workUnitID) } } }) } } func TestUnitStatus(t *testing.T) { _, _, w := testSetup(t) activeUnitsIDs := w.ListKnownUnitIDs() _, err := w.UnitStatus(activeUnitsIDs[0]) if err != nil { t.Error(err) } } func TestCancelUnit(t *testing.T) { _, _, w := testSetup(t) activeUnitsIDs := w.ListKnownUnitIDs() err := w.CancelUnit(activeUnitsIDs[0]) if err != nil { t.Error(err) } } func TestReleaseUnit(t *testing.T) { _, _, w := testSetup(t) activeUnitsIDs := w.ListKnownUnitIDs() err := w.ReleaseUnit(activeUnitsIDs[0], true) if err != nil { t.Error(err) } } func TestListKnownUnitIDs(t *testing.T) { t.Parallel() _, _, w := testSetup(t) testCases := []struct { name string workType string }{ { name: "parallel test 1", }, { name: "parallel test 2", }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { t.Parallel() w.ListKnownUnitIDs() }) } } receptor-1.5.3/pkg/workceptor/workunitbase.go000066400000000000000000000373641475465740700214270ustar00rootroot00000000000000//go:build !no_workceptor // +build !no_workceptor package workceptor import ( "context" "encoding/json" "fmt" "io" "os" "path" "strconv" "sync" "time" "github.com/fsnotify/fsnotify" "github.com/rogpeppe/go-internal/lockedfile" ) // Work sleep constants. const ( SuccessWorkSleep = 1 * time.Second // Normal time to wait between checks MaxWorkSleep = 1 * time.Minute // Max time to ever wait between checks ) // Work state constants. const ( WorkStatePending = 0 WorkStateRunning = 1 WorkStateSucceeded = 2 WorkStateFailed = 3 WorkStateCanceled = 4 ) // WatcherWrapper is wrapping the fsnofity Watcher struct and exposing the Event chan within. type WatcherWrapper interface { Add(name string) error Remove(path string) error Close() error ErrorChannel() chan error EventChannel() chan fsnotify.Event } type RealWatcher struct { watcher *fsnotify.Watcher } func (rw *RealWatcher) Add(name string) error { return rw.watcher.Add(name) } func (rw *RealWatcher) Remove(path string) error { return rw.watcher.Remove(path) } func (rw *RealWatcher) Close() error { return rw.watcher.Close() } func (rw *RealWatcher) ErrorChannel() chan error { return rw.watcher.Errors } func (rw *RealWatcher) EventChannel() chan fsnotify.Event { return rw.watcher.Events } // IsComplete returns true if a given WorkState indicates the job is finished. func IsComplete(workState int) bool { return workState == WorkStateSucceeded || workState == WorkStateFailed } // WorkStateToString returns a string representation of a WorkState. func WorkStateToString(workState int) string { switch workState { case WorkStatePending: return "Pending" case WorkStateRunning: return "Running" case WorkStateSucceeded: return "Succeeded" case WorkStateFailed: return "Failed" case WorkStateCanceled: return "Canceled" default: return "Unknown: " + strconv.Itoa(workState) } } // ErrPending is returned when an operation hasn't succeeded or failed yet. var ErrPending = fmt.Errorf("operation pending") // IsPending returns true if the error is an ErrPending. func IsPending(err error) bool { return err == ErrPending } // BaseWorkUnit includes data common to all work units, and partially implements the WorkUnit interface. type BaseWorkUnit struct { w *Workceptor status StatusFileData unitID string unitDir string statusFileName string stdoutFileName string statusLock *sync.RWMutex lastUpdateError error lastUpdateErrorLock *sync.RWMutex ctx context.Context cancel context.CancelFunc fs FileSystemer watcher WatcherWrapper } // Init initializes the basic work unit data, in memory only. func (bwu *BaseWorkUnit) Init(w *Workceptor, unitID string, workType string, fs FileSystemer, watcher WatcherWrapper) { bwu.w = w bwu.status.State = WorkStatePending bwu.status.Detail = "Unit Created" bwu.status.StdoutSize = 0 bwu.status.WorkType = workType bwu.unitID = unitID bwu.unitDir = path.Join(w.dataDir, unitID) bwu.statusFileName = path.Join(bwu.unitDir, "status") bwu.stdoutFileName = path.Join(bwu.unitDir, "stdout") bwu.statusLock = &sync.RWMutex{} bwu.lastUpdateErrorLock = &sync.RWMutex{} bwu.ctx, bwu.cancel = context.WithCancel(w.ctx) bwu.fs = fs if watcher != nil { bwu.watcher = watcher } else { watcher, err := fsnotify.NewWatcher() if err == nil { bwu.watcher = &RealWatcher{watcher: watcher} } else { bwu.w.nc.GetLogger().Info("fsnotify.NewWatcher returned %s", err) bwu.watcher = nil } } } // Error logs message with unitID prepended. func (bwu *BaseWorkUnit) Error(format string, v ...interface{}) { format = fmt.Sprintf("[%s] %s", bwu.unitID, format) bwu.w.nc.GetLogger().Error(format, v...) } // Warning logs message with unitID prepended. func (bwu *BaseWorkUnit) Warning(format string, v ...interface{}) { format = fmt.Sprintf("[%s] %s", bwu.unitID, format) bwu.w.nc.GetLogger().Warning(format, v...) } // Info logs message with unitID prepended. func (bwu *BaseWorkUnit) Info(format string, v ...interface{}) { format = fmt.Sprintf("[%s] %s", bwu.unitID, format) bwu.w.nc.GetLogger().Info(format, v...) } // Debug logs message with unitID prepended. func (bwu *BaseWorkUnit) Debug(format string, v ...interface{}) { format = fmt.Sprintf("[%s] %s", bwu.unitID, format) bwu.w.nc.GetLogger().Debug(format, v...) } // SetFromParams sets the in-memory state from parameters. func (bwu *BaseWorkUnit) SetFromParams(_ map[string]string) error { return nil } // UnitDir returns the unit directory of this work unit. func (bwu *BaseWorkUnit) UnitDir() string { return bwu.unitDir } // ID returns the unique identifier of this work unit. func (bwu *BaseWorkUnit) ID() string { return bwu.unitID } // StatusFileName returns the full path to the status file in the unit dir. func (bwu *BaseWorkUnit) StatusFileName() string { return bwu.statusFileName } // StdoutFileName returns the full path to the stdout file in the unit dir. func (bwu *BaseWorkUnit) StdoutFileName() string { return bwu.stdoutFileName } // lockStatusFile gains a lock on the status file. func (sfd *StatusFileData) lockStatusFile(filename string) (*lockedfile.File, error) { lockFileName := filename + ".lock" lockFile, err := lockedfile.OpenFile(lockFileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) if err != nil { return nil, err } return lockFile, nil } // unlockStatusFile releases the lock on the status file. func (sfd *StatusFileData) unlockStatusFile(filename string, lockFile *lockedfile.File) { if err := lockFile.Close(); err != nil { MainInstance.nc.GetLogger().Error("Error closing %s.lock: %s", filename, err) } } // saveToFile saves status to an already-open file. func (sfd *StatusFileData) saveToFile(file io.Writer) error { jsonBytes, err := json.Marshal(sfd) if err != nil { return err } jsonBytes = append(jsonBytes, '\n') _, err = file.Write(jsonBytes) return err } // Save saves status to a file. func (sfd *StatusFileData) Save(filename string) error { lockFile, err := sfd.lockStatusFile(filename) if err != nil { return err } defer sfd.unlockStatusFile(filename, lockFile) file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0o600) if err != nil { return err } err = sfd.saveToFile(file) if err != nil { serr := file.Close() if serr != nil { MainInstance.nc.GetLogger().Error("Error closing %s: %s", filename, serr) } return err } return file.Close() } // Save saves status to a file. func (bwu *BaseWorkUnit) Save() error { bwu.statusLock.RLock() defer bwu.statusLock.RUnlock() return bwu.status.Save(bwu.statusFileName) } // loadFromFile loads status from an already open file. func (sfd *StatusFileData) loadFromFile(file io.Reader) error { jsonBytes, err := io.ReadAll(file) if err != nil { return err } return json.Unmarshal(jsonBytes, sfd) } // Load loads status from a file. func (sfd *StatusFileData) Load(filename string) error { lockFile, err := sfd.lockStatusFile(filename) if err != nil { return err } defer sfd.unlockStatusFile(filename, lockFile) file, err := os.Open(filename) if err != nil { return err } err = sfd.loadFromFile(file) if err != nil { lerr := file.Close() if lerr != nil { MainInstance.nc.GetLogger().Error("Error closing %s: %s", filename, lerr) } return err } return file.Close() } // Load loads status from a file. func (bwu *BaseWorkUnit) Load() error { bwu.statusLock.Lock() defer bwu.statusLock.Unlock() return bwu.status.Load(bwu.statusFileName) } // UpdateFullStatus atomically updates the status metadata file. Changes should be made in the callback function. // Errors are logged rather than returned. func (sfd *StatusFileData) UpdateFullStatus(filename string, statusFunc func(*StatusFileData)) error { lockFile, err := sfd.lockStatusFile(filename) if err != nil { return err } defer sfd.unlockStatusFile(filename, lockFile) file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0o600) if err != nil { return err } defer func() { err := file.Close() if err != nil { MainInstance.nc.GetLogger().Error("Error closing %s: %s", filename, err) } }() size, err := file.Seek(0, 2) if err != nil { return err } _, err = file.Seek(0, 0) if err != nil { return err } if size > 0 { err = sfd.loadFromFile(file) if err != nil { return err } } statusFunc(sfd) _, err = file.Seek(0, 0) if err != nil { return err } err = file.Truncate(0) if err != nil { return err } err = sfd.saveToFile(file) if err != nil { return err } return nil } // UpdateFullStatus atomically updates the whole status record. Changes should be made in the callback function. // Errors are logged rather than returned. func (bwu *BaseWorkUnit) UpdateFullStatus(statusFunc func(*StatusFileData)) { bwu.statusLock.Lock() defer bwu.statusLock.Unlock() err := bwu.status.UpdateFullStatus(bwu.statusFileName, statusFunc) bwu.lastUpdateErrorLock.Lock() defer bwu.lastUpdateErrorLock.Unlock() bwu.lastUpdateError = err if err != nil { bwu.w.nc.GetLogger().Error("Error updating status file %s: %s.", bwu.statusFileName, err) } } // UpdateBasicStatus atomically updates key fields in the status metadata file. Errors are logged rather than returned. // Passing -1 as stdoutSize leaves it unchanged. func (sfd *StatusFileData) UpdateBasicStatus(filename string, state int, detail string, stdoutSize int64) error { return sfd.UpdateFullStatus(filename, func(status *StatusFileData) { status.State = state status.Detail = detail if stdoutSize >= 0 { status.StdoutSize = stdoutSize } }) } // UpdateBasicStatus atomically updates key fields in the status metadata file. Errors are logged rather than returned. // Passing -1 as stdoutSize leaves it unchanged. func (bwu *BaseWorkUnit) UpdateBasicStatus(state int, detail string, stdoutSize int64) { bwu.statusLock.Lock() defer bwu.statusLock.Unlock() err := bwu.status.UpdateBasicStatus(bwu.statusFileName, state, detail, stdoutSize) bwu.lastUpdateErrorLock.Lock() defer bwu.lastUpdateErrorLock.Unlock() bwu.lastUpdateError = err if err != nil { bwu.w.nc.GetLogger().Error("Error updating status file %s: %s.", bwu.statusFileName, err) } } // LastUpdateError returns the last error (including nil) resulting from an UpdateBasicStatus or UpdateFullStatus. func (bwu *BaseWorkUnit) LastUpdateError() error { bwu.lastUpdateErrorLock.RLock() defer bwu.lastUpdateErrorLock.RUnlock() return bwu.lastUpdateError } // MonitorLocalStatus watches a unit dir and keeps the in-memory workUnit up to date with status changes. func (bwu *BaseWorkUnit) MonitorLocalStatus() { statusFile := path.Join(bwu.UnitDir(), "status") var watcherEvents chan fsnotify.Event watcherEvents = make(chan fsnotify.Event) var watcherErrors chan error watcherErrors = make(chan error) if bwu.watcher != nil { bwu.statusLock.Lock() err := bwu.watcher.Add(statusFile) bwu.statusLock.Unlock() if err == nil { defer func() { bwu.watcher.Remove(statusFile) err = bwu.watcher.Close() if err != nil { bwu.w.nc.GetLogger().Error("Error closing watcher: %v", err) } }() watcherEvents = bwu.watcher.EventChannel() watcherErrors = bwu.watcher.ErrorChannel() } else { werr := bwu.watcher.Close() if werr != nil { bwu.w.nc.GetLogger().Error("Error closing %s: %s", statusFile, err) } bwu.watcher = nil } } fi, err := bwu.fs.Stat(statusFile) if err != nil { bwu.w.nc.GetLogger().Error("Error retrieving stat for %s: %s", statusFile, err) fi = nil } loop: for { select { case <-bwu.ctx.Done(): break loop case event := <-watcherEvents: switch { case event.Has(fsnotify.Create): bwu.w.nc.GetLogger().Debug("Watcher Event create of %s", statusFile) case event.Op&fsnotify.Write == fsnotify.Write: err = bwu.Load() if err != nil { bwu.w.nc.GetLogger().Error("Watcher Events Error reading %s: %s", statusFile, err) } case event.Op&fsnotify.Remove == fsnotify.Remove: err = bwu.Load() if err != nil { bwu.w.nc.GetLogger().Debug("Watcher Events Remove reading %s: %s", statusFile, err) } case event.Op&fsnotify.Rename == fsnotify.Rename: err = bwu.Load() if err != nil { bwu.w.nc.GetLogger().Debug("Watcher Events Rename reading %s: %s", statusFile, err) } } case <-time.After(time.Second): newFi, err := bwu.fs.Stat(statusFile) if err == nil && (fi == nil || fi.ModTime() != newFi.ModTime()) { fi = newFi err = bwu.Load() if err != nil { bwu.w.nc.GetLogger().Error("Work unit load Error reading %s: %s", statusFile, err) } } case err, ok := <-watcherErrors: if !ok { return } bwu.w.nc.GetLogger().Error("fsnotify Error reading %s: %s", statusFile, err) } complete := IsComplete(bwu.Status().State) if complete { break } } } // getStatus returns a copy of the base status (no ExtraData). The caller must already hold the statusLock. func (bwu *BaseWorkUnit) getStatus() *StatusFileData { status := bwu.status status.ExtraData = nil return &status } // Status returns a copy of the status currently loaded in memory (use Load to get it from disk). func (bwu *BaseWorkUnit) Status() *StatusFileData { return bwu.UnredactedStatus() } // UnredactedStatus returns a copy of the status currently loaded in memory, including secrets. func (bwu *BaseWorkUnit) UnredactedStatus() *StatusFileData { bwu.statusLock.RLock() defer bwu.statusLock.RUnlock() return bwu.getStatus() } // Release releases this unit of work, deleting its files. func (bwu *BaseWorkUnit) Release(force bool) error { bwu.statusLock.Lock() defer bwu.statusLock.Unlock() attemptsLeft := 3 for { err := bwu.fs.RemoveAll(bwu.UnitDir()) if force { break } else if err != nil { attemptsLeft-- if attemptsLeft > 0 { bwu.w.nc.GetLogger().Warning("Error removing directory for %s. Retrying %d more times.", bwu.unitID, attemptsLeft) time.Sleep(time.Second) continue } bwu.w.nc.GetLogger().Error("Error removing directory for %s. No more retries left.", bwu.unitID) return err } break } bwu.w.activeUnitsLock.Lock() defer bwu.w.activeUnitsLock.Unlock() delete(bwu.w.activeUnits, bwu.unitID) return nil } func (bwu *BaseWorkUnit) CancelContext() { bwu.cancel() } func (bwu *BaseWorkUnit) GetStatusCopy() StatusFileData { return bwu.status } func (bwu *BaseWorkUnit) GetStatusWithoutExtraData() *StatusFileData { return bwu.getStatus() } func (bwu *BaseWorkUnit) SetStatusExtraData(ed interface{}) { bwu.status.ExtraData = ed } func (bwu *BaseWorkUnit) GetStatusLock() *sync.RWMutex { return bwu.statusLock } func (bwu *BaseWorkUnit) GetWorkceptor() *Workceptor { return bwu.w } func (bwu *BaseWorkUnit) SetWorkceptor(w *Workceptor) { bwu.w = w } func (bwu *BaseWorkUnit) GetContext() context.Context { return bwu.ctx } func (bwu *BaseWorkUnit) GetCancel() context.CancelFunc { return bwu.cancel } // =============================================================================================== // func newUnknownWorker(w *Workceptor, unitID string, workType string) WorkUnit { uu := &unknownUnit{} uu.BaseWorkUnit.Init(w, unitID, workType, FileSystem{}, nil) return uu } // unknownUnit is used to represent units we find on disk, but don't recognize their WorkType. type unknownUnit struct { BaseWorkUnit } // Start starts the unit. Since we don't know what this unit is, we do nothing. func (uu *unknownUnit) Start() error { return nil } // Restart restarts the unit. Since we don't know what this unit is, we do nothing. func (uu *unknownUnit) Restart() error { return nil } // Cancel cancels a running unit. Since we don't know what this unit is, we do nothing. func (uu *unknownUnit) Cancel() error { return nil } func (uu *unknownUnit) Status() *StatusFileData { status := uu.BaseWorkUnit.Status() status.ExtraData = "Unknown WorkType" return status } receptor-1.5.3/pkg/workceptor/workunitbase_test.go000066400000000000000000000313101475465740700224470ustar00rootroot00000000000000package workceptor_test import ( "bytes" "context" "errors" "fmt" "os" "path" "strings" "testing" "time" "github.com/ansible/receptor/pkg/logger" "github.com/ansible/receptor/pkg/randstr" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/pkg/workceptor/mock_workceptor" "github.com/fsnotify/fsnotify" "go.uber.org/mock/gomock" ) func TestIsComplete(t *testing.T) { testCases := []struct { name string workState int isComplete bool }{ {"Pending Work is Incomplete", workceptor.WorkStatePending, false}, {"Running Work is Incomplete", workceptor.WorkStateRunning, false}, {"Succeeded Work is Complete", workceptor.WorkStateSucceeded, true}, {"Failed Work is Complete", workceptor.WorkStateFailed, true}, {"Unknown Work is Incomplete", 999, false}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { if result := workceptor.IsComplete(tc.workState); result != tc.isComplete { t.Errorf("expected %v, got %v", tc.isComplete, result) } }) } } func TestWorkStateToString(t *testing.T) { testCases := []struct { name string workState int description string }{ {"Pending Work Description", workceptor.WorkStatePending, "Pending"}, {"Running Work Description", workceptor.WorkStateRunning, "Running"}, {"Succeeded Work Description", workceptor.WorkStateSucceeded, "Succeeded"}, {"Failed Work Description", workceptor.WorkStateFailed, "Failed"}, {"Canceled Work Description", workceptor.WorkStateCanceled, "Canceled"}, {"Unknown Work Description", 999, "Unknown: 999"}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { if result := workceptor.WorkStateToString(tc.workState); result != tc.description { t.Errorf("expected %s, got %s", tc.description, result) } }) } } func TestIsPending(t *testing.T) { testCases := []struct { name string err error isPending bool }{ {"Pending Error", workceptor.ErrPending, true}, {"Non-pending Error", errors.New("test error"), false}, {"Nil Error", nil, false}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { if result := workceptor.IsPending(tc.err); result != tc.isPending { t.Errorf("expected %v, got %v", tc.isPending, result) } }) } } func setUp(t *testing.T) (*gomock.Controller, workceptor.BaseWorkUnit, *workceptor.Workceptor, *mock_workceptor.MockNetceptorForWorkceptor, *logger.ReceptorLogger) { ctrl := gomock.NewController(t) mockNetceptor := mock_workceptor.NewMockNetceptorForWorkceptor(ctrl) // attach logger to the mock netceptor and return any number of times logger.SetGlobalLogLevel(4) logger := logger.NewReceptorLogger("") mockNetceptor.EXPECT().GetLogger().AnyTimes().Return(logger) mockNetceptor.EXPECT().NodeID().Return("NodeID") ctx := context.Background() w, err := workceptor.New(ctx, mockNetceptor, "/tmp") if err != nil { t.Errorf("Error while creating Workceptor: %v", err) } bwu := workceptor.BaseWorkUnit{} return ctrl, bwu, w, mockNetceptor, logger } func TestInit(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "test", workceptor.FileSystem{}, nil) ctrl.Finish() } func TestErrorLog(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "test", workceptor.FileSystem{}, &workceptor.RealWatcher{}) bwu.Error("test error") ctrl.Finish() } func TestWarningLog(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "test", workceptor.FileSystem{}, &workceptor.RealWatcher{}) bwu.Warning("test warning") ctrl.Finish() } func TestInfoLog(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "test", workceptor.FileSystem{}, &workceptor.RealWatcher{}) bwu.Info("test info") ctrl.Finish() } func TestDebugLog(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "test", workceptor.FileSystem{}, &workceptor.RealWatcher{}) bwu.Error("test debug") ctrl.Finish() } func TestSetFromParams(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "test", workceptor.FileSystem{}, &workceptor.RealWatcher{}) err := bwu.SetFromParams(nil) if err != nil { t.Errorf("SetFromParams should return nil: got %v", err) } ctrl.Finish() } const ( rootDir = "/tmp" testDir = "NodeID/test" dirError = "no such file or directory" ) func TestUnitDir(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "test", workceptor.FileSystem{}, &workceptor.RealWatcher{}) expectedUnitDir := path.Join(rootDir, testDir) if unitDir := bwu.UnitDir(); unitDir != expectedUnitDir { t.Errorf("UnitDir returned wrong value: got %s, want %s", unitDir, expectedUnitDir) } ctrl.Finish() } func TestID(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "test", workceptor.FileSystem{}, &workceptor.RealWatcher{}) if id := bwu.ID(); id != "test" { t.Errorf("ID returned wrong value: got %s, want %s", id, "test") } ctrl.Finish() } func TestStatusFileName(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "", workceptor.FileSystem{}, &workceptor.RealWatcher{}) expectedUnitDir := path.Join(rootDir, testDir) expectedStatusFileName := path.Join(expectedUnitDir, "status") if statusFileName := bwu.StatusFileName(); statusFileName != expectedStatusFileName { t.Errorf("StatusFileName returned wrong value: got %s, want %s", statusFileName, expectedStatusFileName) } ctrl.Finish() } func TestStdoutFileName(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "", workceptor.FileSystem{}, &workceptor.RealWatcher{}) expectedUnitDir := path.Join(rootDir, testDir) expectedStdoutFileName := path.Join(expectedUnitDir, "stdout") if stdoutFileName := bwu.StdoutFileName(); stdoutFileName != expectedStdoutFileName { t.Errorf("StdoutFileName returned wrong value: got %s, want %s", stdoutFileName, expectedStdoutFileName) } ctrl.Finish() } func TestBaseSave(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "", workceptor.FileSystem{}, &workceptor.RealWatcher{}) err := bwu.Save() if !strings.Contains(err.Error(), dirError) { t.Errorf("Base Work Unit Save, no such file or directory expected, instead %s", err.Error()) } ctrl.Finish() } func TestBaseLoad(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "", workceptor.FileSystem{}, &workceptor.RealWatcher{}) err := bwu.Load() if !strings.Contains(err.Error(), dirError) { t.Errorf("TestBaseLoad, no such file or directory expected, instead %s", err.Error()) } ctrl.Finish() } func TestBaseUpdateFullStatus(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "", workceptor.FileSystem{}, &workceptor.RealWatcher{}) sf := func(sfd *workceptor.StatusFileData) { // Do nothing } bwu.UpdateFullStatus(sf) err := bwu.LastUpdateError() if !strings.Contains(err.Error(), dirError) { t.Errorf("TestBaseUpdateFullStatus, no such file or directory expected, instead %s", err.Error()) } ctrl.Finish() } func TestBaseUpdateBasicStatus(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "", workceptor.FileSystem{}, &workceptor.RealWatcher{}) bwu.UpdateBasicStatus(1, "Details", 0) err := bwu.LastUpdateError() if !strings.Contains(err.Error(), dirError) { t.Errorf("TestBaseUpdateBasicStatus, no such file or directory expected, instead %s", err.Error()) } ctrl.Finish() } func TestBaseStatus(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) bwu.Init(w, "test", "", workceptor.FileSystem{}, &workceptor.RealWatcher{}) status := bwu.Status() if status.State != workceptor.WorkStatePending { t.Errorf("TestBaseStatus, expected work state pending, received %d", status.State) } ctrl.Finish() } func TestBaseRelease(t *testing.T) { ctrl, bwu, w, _, _ := setUp(t) mockFileSystem := mock_workceptor.NewMockFileSystemer(ctrl) bwu.Init(w, "test12345", "", mockFileSystem, &workceptor.RealWatcher{}) const removeError = "RemoveAll Error" testCases := []struct { name string err error force bool calls func() }{ { name: removeError, err: errors.New(removeError), force: false, calls: func() { mockFileSystem.EXPECT().RemoveAll(gomock.Any()).Return(errors.New(removeError)).Times(3) }, }, { name: "No remote error without force", err: nil, force: false, calls: func() { mockFileSystem.EXPECT().RemoveAll(gomock.Any()).Return(nil) }, }, { name: "No remote error with force", err: nil, force: true, calls: func() { mockFileSystem.EXPECT().RemoveAll(gomock.Any()).Return(nil) }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tc.calls() err := bwu.Release(tc.force) if err != nil && err.Error() != tc.err.Error() { t.Errorf("Error returned dosent match, err received %s, expected %s", err, tc.err) } }) } ctrl.Finish() } func TestMonitorLocalStatus(t *testing.T) { tests := []struct { name string statObj *Info statObjLater *Info addWatcherErr error statErr error fsNotifyEvent *fsnotify.Event // using pointer to allow nil logOutput string sleepDuration time.Duration }{ { name: "Handle Write Event", statObj: NewInfo("test", 1, 0, time.Now()), addWatcherErr: nil, statErr: nil, fsNotifyEvent: &fsnotify.Event{Op: fsnotify.Write}, logOutput: "Watcher Events Error reading", sleepDuration: 100 * time.Millisecond, }, { name: "Error Adding Watcher", statObj: NewInfo("test", 1, 0, time.Now()), addWatcherErr: fmt.Errorf("error adding watcher"), statErr: nil, fsNotifyEvent: nil, logOutput: "", sleepDuration: 100 * time.Millisecond, }, { name: "Error Reading Status", statObj: nil, addWatcherErr: fmt.Errorf("error adding watcher"), statErr: fmt.Errorf("stat error"), fsNotifyEvent: nil, logOutput: "", sleepDuration: 100 * time.Millisecond, }, { name: "Handle Context Cancellation", statObj: NewInfo("test", 1, 0, time.Now()), addWatcherErr: nil, statErr: nil, fsNotifyEvent: &fsnotify.Event{Op: fsnotify.Write}, logOutput: "Watcher Events Error reading", sleepDuration: 100 * time.Millisecond, }, { name: "Handle File Update Without Event", statObj: NewInfo("test", 1, 0, time.Now()), statObjLater: NewInfo("test", 1, 0, time.Now().Add(10*time.Second)), addWatcherErr: nil, statErr: nil, fsNotifyEvent: &fsnotify.Event{Op: fsnotify.Write}, logOutput: "Watcher Events Error reading", sleepDuration: 500 * time.Millisecond, }, { name: "Handle Remove Event", statObj: NewInfo("test", 1, 0, time.Now()), addWatcherErr: nil, statErr: nil, fsNotifyEvent: &fsnotify.Event{Op: fsnotify.Remove}, logOutput: "Watcher Events Remove reading", sleepDuration: 100 * time.Millisecond, }, { name: "Handle Rename Event", statObj: NewInfo("test", 1, 0, time.Now()), addWatcherErr: nil, statErr: nil, fsNotifyEvent: &fsnotify.Event{Op: fsnotify.Rename}, logOutput: "Watcher Events Rename reading", sleepDuration: 100 * time.Millisecond, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctrl, bwu, w, _, l := setUp(t) defer ctrl.Finish() randstring := randstr.RandomString(4) logFilePath := fmt.Sprintf("/tmp/monitorLocalStatusLog%s", randstring) mockWatcher := mock_workceptor.NewMockWatcherWrapper(ctrl) mockFileSystem := mock_workceptor.NewMockFileSystemer(ctrl) bwu.Init(w, "test", "", mockFileSystem, mockWatcher) mockFileSystem.EXPECT().Stat(gomock.Any()).Return(tc.statObj, tc.statErr).AnyTimes() if tc.statObjLater != nil { mockFileSystem.EXPECT().Stat(gomock.Any()).Return(tc.statObjLater, nil).AnyTimes() } mockWatcher.EXPECT().Add(gomock.Any()).Return(tc.addWatcherErr) mockWatcher.EXPECT().Remove(gomock.Any()).AnyTimes() mockWatcher.EXPECT().Close().AnyTimes() if tc.fsNotifyEvent != nil { logFile, err := os.OpenFile(logFilePath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o600) if err != nil { t.Error("error creating monitorLocalStatusLog file") } l.SetOutput(logFile) eventCh := make(chan fsnotify.Event, 1) mockWatcher.EXPECT().EventChannel().Return(eventCh).AnyTimes() go func() { eventCh <- *tc.fsNotifyEvent }() errorCh := make(chan error, 1) mockWatcher.EXPECT().ErrorChannel().Return(errorCh).AnyTimes() } go bwu.MonitorLocalStatus() time.Sleep(tc.sleepDuration) if tc.fsNotifyEvent != nil { logOutput, err := os.ReadFile(logFilePath) if err != nil && len(logOutput) == 0 { t.Errorf("error reading %s file", logFilePath) } if !bytes.Contains(logOutput, []byte(tc.logOutput)) { t.Errorf("expected log to be: %s, got %s", tc.logOutput, string(logOutput)) } } bwu.CancelContext() }) } } receptor-1.5.3/receptor-python-worker/000077500000000000000000000000001475465740700200375ustar00rootroot00000000000000receptor-1.5.3/receptor-python-worker/.gitignore000066400000000000000000000022071475465740700220300ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class .mypy_cache # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml report.xml report.pylama *,cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # IPython Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # dotenv .env # virtualenv venv/ ENV/ # Spyder project settings .spyderproject # Rope project settings .ropeproject # vim *.swp # mac OS *.DS_Store # pytest *.pytest_cache # PyCharm .idea/ receptor-1.5.3/receptor-python-worker/README.md000066400000000000000000000001571475465740700213210ustar00rootroot00000000000000The receptor-python-worker command is called by Receptor to supervise the operation of a Python worker plugin. receptor-1.5.3/receptor-python-worker/pyproject.toml000066400000000000000000000001211475465740700227450ustar00rootroot00000000000000[build-system] requires = ["setuptools"] build-backend = "setuptools.build_meta" receptor-1.5.3/receptor-python-worker/receptor_python_worker/000077500000000000000000000000001475465740700246545ustar00rootroot00000000000000receptor-1.5.3/receptor-python-worker/receptor_python_worker/__init__.py000066400000000000000000000000251475465740700267620ustar00rootroot00000000000000from .work import runreceptor-1.5.3/receptor-python-worker/receptor_python_worker/__main__.py000066400000000000000000000000501475465740700267410ustar00rootroot00000000000000import sys from .work import run run() receptor-1.5.3/receptor-python-worker/receptor_python_worker/plugin_utils.py000066400000000000000000000036031475465740700277460ustar00rootroot00000000000000BYTES_PAYLOAD = "bytes" """ Inform Receptor that the given plugin expects BYTES for the message data """ BUFFER_PAYLOAD = "buffer" """ Inform Receptor that the given plugin expects a buffered reader for the message data """ FILE_PAYLOAD = "file" """ Inform Receptor that the given plugin expects a file path for the message data """ def plugin_export(payload_type): """ A decorator intended to be used by Receptor plugins in conjunction with entrypoints typically defined in your setup.py file:: entry_points={ 'receptor.worker': 'your_package_name = your_package_name.your_module', } ``your_package_name.your_module`` should then contain a function decorated with ``plugin_export`` as such:: @receptor.plugin_export(payload_type=receptor.BYTES_PAYLOAD): def execute(message, config, result_queue): result_queue.put("My plugin ran!") You can then send messages to this plugin across the Receptor mesh with the directive ``your_package_name:execute`` Depending on what kind of data you expect to receive you can select from one of 3 different incoming payload types. This determines the incoming type of the ``message`` data type: * BYTES_PAYLOAD: This will give you literal python bytes that you can then read and interpret. * BUFFER_PAYLOAD: This will send you a buffer that you can read(). This buffer will be automatically closed and its contents discarded when your plugin returns. * FILE_PAYLOAD: This will return you a file path that you can open() or manage in any way you see fit. It will be automatically removed after your plugin returns. For more information about developing plugins see :ref:`plugins`. """ def decorator(func): func.receptor_export = True func.payload_type = payload_type return func return decorator receptor-1.5.3/receptor-python-worker/receptor_python_worker/work.py000066400000000000000000000123151475465740700262120ustar00rootroot00000000000000import sys import os import threading import signal import queue from pathlib import Path import json import pkg_resources from .plugin_utils import BUFFER_PAYLOAD, BYTES_PAYLOAD, FILE_PAYLOAD # Allow existing worker plugins to "import receptor" and get our version of plugin_utils sys.modules['receptor'] = sys.modules[__package__+'.plugin_utils'] # WorkState constants WorkStatePending = 0 WorkStateRunning = 1 WorkStateSucceeded = 2 WorkStateFailed = 3 class WorkPluginRunner: def __init__(self, plugin_directive, unitdir, config): try: self.plugin_namespace, self.plugin_action = plugin_directive.split(":", 1) except ValueError as e: raise ValueError("Plugin directive must be of the form namespace:function") self.config = config self.unitdir = unitdir self.status_filename = os.path.join(unitdir, "status") if not os.path.exists(self.status_filename): raise ValueError("Status file does not exist in unitdir") self.stdin_filename = os.path.join(unitdir, "stdin") if not os.path.exists(self.stdin_filename): raise ValueError("Stdin file does not exist in unitdir") self.stdout_filename = os.path.join(unitdir, "stdout") Path(self.stdout_filename).touch(mode=0o0600, exist_ok=True) with open(self.status_filename) as file: self.status = json.load(file) self.plugin_worker = None self.plugin_action_method = None self.payload_input_type = None self.stdout_size = 0 self.response_queue = queue.Queue() self.monitor_thread = None self.shutting_down = False def load_plugin(self): entry_points = [ x for x in filter( lambda x: x.name == self.plugin_namespace, pkg_resources.iter_entry_points("receptor.worker") ) ] if not entry_points: raise ValueError(f"Plugin {self.plugin_namespace} not found") self.plugin_worker = entry_points[0].load() self.plugin_action_method = getattr(self.plugin_worker, self.plugin_action, False) if not self.plugin_action_method: raise ValueError(f"Function {self.plugin_action} does not exist in {self.plugin_namespace}") if not getattr(self.plugin_action_method, "receptor_export", False): raise ValueError(f"Not allowed to call non-exported {self.plugin_action} in {self.plugin_namespace}") self.payload_input_type = getattr(self.plugin_action_method, "payload_type", BYTES_PAYLOAD) def save_status(self, state, detail): self.status['State'] = state self.status['Detail'] = detail self.status['StdoutSize'] = self.stdout_size with open(self.status_filename, 'w') as file: json.dump(self.status, file) def write_stdout(self, data): with open(self.stdout_filename, 'ab') as file: file.write(data) self.stdout_size = file.tell() def queue_monitor(self): self.save_status(WorkStateRunning, "Running") while not self.shutting_down: item = self.response_queue.get() if not self.shutting_down: self.write_stdout(bytes(json.dumps(item)+"\n", 'UTF-8')) self.save_status(WorkStateRunning, "Running") self.response_queue.task_done() def run(self): self.save_status(WorkStatePending, "Starting Python worker plugin") if self.payload_input_type == FILE_PAYLOAD: payload = self.stdin_filename elif self.payload_input_type == BUFFER_PAYLOAD: payload = open(self.stdin_filename, 'rb') elif self.payload_input_type == BYTES_PAYLOAD: with open(self.stdin_filename, 'rb') as file: payload = file.read() else: raise ValueError("Unknown plugin action method") self.monitor_thread = threading.Thread(target=self.queue_monitor, daemon=True) self.monitor_thread.start() self.plugin_action_method(payload, self.config, self.response_queue) self.response_queue.join() self.save_status(WorkStateSucceeded, "Complete") def run(): if len(sys.argv) != 4: print("Invalid command line usage") sys.exit(1) try: wpr = WorkPluginRunner(sys.argv[1], sys.argv[2], json.loads(sys.argv[3])) except Exception as e: print(f"Error initializing worker object: {repr(e)}") sys.exit(1) def signal_handler(signum, frame): try: wpr.shutting_down = True wpr.save_status(WorkStateFailed, f"Killed by signal {signum}") except Exception as e: print(f"Error saving status: {repr(e)}") sys.exit(1) sys.exit(0) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) try: message = "Error loading worker" wpr.load_plugin() message = "Error running worker" wpr.run() except Exception as e: try: wpr.shutting_down = True wpr.save_status(WorkStateFailed, f"{message}: {repr(e)}") except Exception as e: print(f"Error saving status: {repr(e)}") sys.exit(1) sys.exit(0)receptor-1.5.3/receptor-python-worker/setup.cfg000066400000000000000000000010631475465740700216600ustar00rootroot00000000000000[metadata] name = receptor-python-worker author = Red Hat author_email = info@ansible.com summary = "The receptor-python-worker command is called by Receptor to supervise the operation of a Python worker plugin." home_page = https://github.com/ansible/receptor/tree/devel/receptor-python-worker description_file = README.md description_content_type = text/markdown version = file: .VERSION [options] packages = find: [options.entry_points] console_scripts = receptor-python-worker = receptor_python_worker:run [files] packages = receptor-python-worker receptor-1.5.3/receptorctl/000077500000000000000000000000001475465740700157145ustar00rootroot00000000000000receptor-1.5.3/receptorctl/.coveragerc000066400000000000000000000000251475465740700200320ustar00rootroot00000000000000[run] omit = tests/* receptor-1.5.3/receptorctl/.gitignore000066400000000000000000000022071475465740700177050ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class .mypy_cache # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml report.xml report.pylama *,cover .hypothesis/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # IPython Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # dotenv .env # virtualenv venv/ ENV/ # Spyder project settings .spyderproject # Rope project settings .ropeproject # vim *.swp # mac OS *.DS_Store # pytest *.pytest_cache # PyCharm .idea/ receptor-1.5.3/receptorctl/.pip-tools.toml000066400000000000000000000001401475465740700206100ustar00rootroot00000000000000[tool.pip-tools] resolver = "backtracking" allow-unsafe = true strip-extras = true quiet = true receptor-1.5.3/receptorctl/MANIFEST.in000066400000000000000000000002271475465740700174530ustar00rootroot00000000000000recursive-include receptorctl *.py include .VERSION exclude .gitignore exclude noxfile.py exclude build-requirements.txt exclude test-requirements.txt receptor-1.5.3/receptorctl/README.md000066400000000000000000000044031475465740700171740ustar00rootroot00000000000000# Receptorctl Receptorctl is a front-end CLI and importable Python library that interacts with Receptor over its control socket interface. ## Setting up nox This project includes a `nox` configuration to automate tests, checks, and other functions in a reproducible way using isolated environments. Before you submit a PR, you should install `nox` and verify your changes. > To run `make receptorctl-lint` and `receptorctl-test` from the repository root, you must first install `nox`. 1. Install `nox` using `python3 -m pip install nox` or your distribution's package manager. 2. Run `nox --list` from the `receptorctl` directory to view available sessions. You can run `nox` with no arguments to execute all checks and tests. Alternatively, you can run only certain tasks as outlined in the following sections. > By default nox sessions install pinned dependencies from the `requirements` directory. You can use unpinned dependencies as follows: ```bash PINNED=false nox -s lint ``` ## Checking changes to Receptorctl Run the following `nox` sessions to check for code style and formatting issues: * Run all checks. ```bash nox -s lint ``` * Check code style. ```bash nox -s check_style ``` * Check formatting. ```bash nox -s check_format ``` * Format code if the check fails. ```bash nox -s format ``` ## Running Receptorctl tests Run the following `nox` sessions to test Receptorctl changes: * Run tests against the complete matrix of Python versions. ```bash nox -s tests ``` * Run tests against a specific Python version. ```bash # For example, this command tests Receptorctl against Python 3.12. nox -s tests-3.12 ``` ## Updating dependencies Update dependencies in the `requirements` directory as follows: 1. Add any packages or pins to the `*.in` file. 2. Do one of the following from the `receptorctl` directory: * Update all dependencies. ```bash nox -s pip-compile ``` * Generate the full dependency tree for a single set of dependencies, for example: ```bash nox -s "pip-compile-3.12(tests)" ``` > You can also pass the `--no-upgrade` flag when adding a new package. > This avoids bumping transitive dependencies for other packages in the `*.in` file. ```bash nox -s pip-compile -- --no-upgrade ``` receptor-1.5.3/receptorctl/noxfile.py000066400000000000000000000103631475465740700177350ustar00rootroot00000000000000import os import subprocess from glob import iglob from pathlib import Path import nox.command LATEST_PYTHON_VERSION = ["3.12"] python_versions = ["3.8", "3.9", "3.10", "3.11", "3.12"] LINT_FILES: tuple[str, ...] = (*iglob("**/*.py"),) PINNED = os.environ.get("PINNED", "true").lower() in {"1", "true"} requirements_directory = Path("requirements").resolve() requirements_files = [requirements_input_file_path.stem for requirements_input_file_path in requirements_directory.glob("*.in")] def install(session: nox.Session, *args, req: str, **kwargs): if PINNED: pip_constraint = requirements_directory / f"{req}.txt" kwargs.setdefault("env", {})["PIP_CONSTRAINT"] = pip_constraint session.log(f"export PIP_CONSTRAINT={pip_constraint!r}") session.install("-r", f"{requirements_directory}/{req}.in", *args, **kwargs) def version(session: nox.Session): """ Create a .VERSION file. """ try: official_version = session.run_install( "git", "describe", "--exact-match", "--tags", external=True, stderr=subprocess.DEVNULL, ) except nox.command.CommandFailed: official_version = None print("Using the closest annotated tag instead of an exact match.") if official_version: version = official_version.strip() else: tag = session.run_install("git", "describe", "--tags", "--always", silent=True, external=True) rev = session.run_install("git", "rev-parse", "--short", "HEAD", silent=True, external=True) version = tag.split("-")[0] + "+" + rev Path(".VERSION").write_text(version) @nox.session(python=LATEST_PYTHON_VERSION) def coverage(session: nox.Session): """ Run receptorctl tests with code coverage """ install(session, req="tests") version(session) session.install("-e", ".") session.run( "pytest", "--cov", "--cov-report", "term-missing:skip-covered", "--cov-report", "xml:receptorctl_coverage.xml", "--verbose", "tests", *session.posargs, ) @nox.session(python=python_versions) def tests(session: nox.Session): """ Run receptorctl tests """ install(session, req="tests") version(session) session.install("-e", ".") session.run("pytest", "-v", "tests", *session.posargs) @nox.session def check_style(session: nox.Session): """ Check receptorctl Python code style """ install(session, req="lint") session.run("flake8", *session.posargs, *LINT_FILES) @nox.session def check_format(session: nox.Session): """ Check receptorctl Python file formatting without making changes """ install(session, req="lint") session.run("black", "--check", *session.posargs, *LINT_FILES) @nox.session def format(session: nox.Session): """ Format receptorctl Python files """ install(session, req="lint") session.run("black", *session.posargs, *LINT_FILES) @nox.session def lint(session: nox.Session): """ Check receptorctl for code style and formatting """ session.notify("check_style") session.notify("check_format") @nox.session(name="pip-compile", python=["3.12"]) @nox.parametrize(["req"], arg_values_list=requirements_files, ids=requirements_files) def pip_compile(session: nox.Session, req: str): """Generate lock files from input files or upgrade packages in lock files.""" # fmt: off session.install( "-r", str(requirements_directory / "pip-tools.in"), "-c", str(requirements_directory / "pip-tools.txt"), ) # fmt: on # Use --upgrade by default unless a user passes -P. upgrade_related_cli_flags = ("-P", "--upgrade-package", "--no-upgrade") has_upgrade_related_cli_flags = any(arg.startswith(upgrade_related_cli_flags) for arg in session.posargs) injected_extra_cli_args = () if has_upgrade_related_cli_flags else ("--upgrade",) output_file = os.path.relpath(Path(requirements_directory / f"{req}.txt")) input_file = os.path.relpath(Path(requirements_directory / f"{req}.in")) session.run( "pip-compile", "--output-file", str(output_file), *session.posargs, *injected_extra_cli_args, str(input_file), ) receptor-1.5.3/receptorctl/pyproject.toml000066400000000000000000000001711475465740700206270ustar00rootroot00000000000000[build-system] requires = ["setuptools"] build-backend = "setuptools.build_meta" [tool.black] exclude = "(build|.eggs)" receptor-1.5.3/receptorctl/receptorctl/000077500000000000000000000000001475465740700202425ustar00rootroot00000000000000receptor-1.5.3/receptorctl/receptorctl/__init__.py000066400000000000000000000001511475465740700223500ustar00rootroot00000000000000from .cli import run from .socket_interface import ReceptorControl __all__ = ["run", "ReceptorControl"] receptor-1.5.3/receptorctl/receptorctl/__main__.py000066400000000000000000000000341475465740700223310ustar00rootroot00000000000000from .cli import run run() receptor-1.5.3/receptorctl/receptorctl/cli.py000066400000000000000000000434541475465740700213750ustar00rootroot00000000000000import sys import os import time import select import fcntl import tty import termios import click import json from functools import partial import dateutil.parser import importlib.metadata from .socket_interface import ReceptorControl class IgnoreRequiredWithHelp(click.Group): # allows user to call --help without needing to provide required=true parameters def parse_args(self, ctx, args): try: return super(IgnoreRequiredWithHelp, self).parse_args(ctx, args) except click.MissingParameter: if "--help" not in args: raise # remove the required params so that help can display for param in self.params: param.required = False return super(IgnoreRequiredWithHelp, self).parse_args(ctx, args) def print_json(json_data): click.echo(json.dumps(json_data, indent=4, sort_keys=True)) def print_message(message="", nl=True): click.echo(message, nl=nl) def print_warning(message, nl=True): click.echo(click.style(f"Warning: {message}", fg="magenta"), err=True, nl=nl) def print_error(message, nl=True): click.echo(click.style(f"ERROR: {message}", fg="red"), err=True, nl=nl) @click.group(cls=IgnoreRequiredWithHelp) @click.pass_context @click.option( "--socket", envvar="RECEPTORCTL_SOCKET", required=True, show_envvar=True, help="Control socket address to for the Receptor connection (The default is 'unix:' for a Unix socket, use 'tcp://' for a TCP socket)", # noqa: E501 ) @click.option( "--config", "-c", default=None, envvar="RECEPTORCTL_CONFIG", required=False, show_envvar=True, help="Config filename configured for receptor", ) @click.option( "--tls-client", "tlsclient", default=None, envvar="RECEPTORCTL_TLSCLIENT", required=False, show_envvar=True, help="TLS client name specified in config", ) @click.option( "--rootcas", default=None, help="Root CA bundle to use instead of system trust when connecting with tls", ) @click.option("--key", default=None, help="Client private key filename") @click.option("--cert", default=None, help="Client certificate filename") @click.option( "--insecureskipverify", default=False, help="Accept any server cert", show_default=True, ) def cli(ctx, socket, config, tlsclient, rootcas, key, cert, insecureskipverify): ctx.obj = { "rc": None, "receptorctlVersion": importlib.metadata.version("receptorctl"), "receptorVersion": "Unknown", } # If we got a socket parameter we can make a ReceptorControl object if ctx.params.get("socket", None) is not None: ctx.obj["rc"] = ReceptorControl( socket, config=config, tlsclient=tlsclient, rootcas=rootcas, key=key, cert=cert, insecureskipverify=insecureskipverify, ) # Load and stash the versions ctx.obj["receptorVersion"] = ctx.obj["rc"].simple_command( '{"command":"status","requested_fields":["Version"]}' )["Version"] # If they mismatch throw a stderr warning if ctx.obj["receptorVersion"] != ctx.obj["receptorctlVersion"]: click.echo( click.style( "Warning: receptorctl and receptor are different versions, they may not be compatible", # noqa E501 fg="magenta", ), err=True, ) def get_rc(ctx): return ctx.obj["rc"] @cli.command(help="Show the status of the Receptor network.") @click.pass_context @click.option("--json", "printjson", help="Print as JSON", is_flag=True) def status(ctx, printjson): rc = get_rc(ctx) status = rc.simple_command("status") if printjson: print_json(status) return node_id = status.pop("NodeID") print_message(f"Node ID: {node_id}") version = status.pop("Version") print_message(f"Version: {version}") sysCPU = status.pop("SystemCPUCount") print_message(f"System CPU Count: {sysCPU}") sysMemory = status.pop("SystemMemoryMiB") print_message(f"System Memory MiB: {sysMemory}") longest_node = 12 connections = status.pop("Connections", None) if connections: for conn in connections: length = len(conn["NodeID"]) if length > longest_node: longest_node = length costs = status.pop("KnownConnectionCosts", None) if costs: for node in costs: if len(node) > longest_node: longest_node = len(node) if connections: for conn in connections: length = len(conn["NodeID"]) if length > longest_node: longest_node = length print_message("") print_message(f"{'Connection':<{longest_node}} Cost") for conn in connections: print_message(f"{conn['NodeID']:<{longest_node}} {conn['Cost']}") if costs: print_message() print_message(f"{'Known Node':<{longest_node}} Known Connections") for node in costs: print_message(f"{node:<{longest_node}} ", nl=False) for peer, cost in costs[node].items(): print_message(f"{peer}: {cost} ", nl=False) print_message() routes = status.pop("RoutingTable", None) if routes: print_message() print_message(f"{'Route':<{longest_node}} Via") for node in routes: print_message(f"{node:<{longest_node}} {routes[node]}") ads = status.pop("Advertisements", None) if ads: print_message() print_message( f"{'Node':<{longest_node}} Service Type Last Seen Tags" ) for ad in ads: time = dateutil.parser.parse(ad["Time"]) if ad["ConnType"] == 0: conn_type = "Datagram" elif ad["ConnType"] == 1: conn_type = "Stream" elif ad["ConnType"] == 2: conn_type = "StreamTLS" last_seen = f"{time:%Y-%m-%d %H:%M:%S}" print_message( f"{ad['NodeID']:<{longest_node}} {ad['Service']:<9} {conn_type:<10} {last_seen:<21} {'-' if (ad['Tags'] is None) else str(ad['Tags']):<16}" # noqa: E501 ) def print_worktypes(header, isSecure): printOnce = True seen_nodes = [] for ad in ads: commands = ad["WorkCommands"] if not commands: continue workTypes = [] for c in commands: wT = c["WorkType"] if c["Secure"] == isSecure: workTypes.append(wT) if not workTypes: continue node = ad["NodeID"] if node in seen_nodes: continue else: seen_nodes.append(node) workTypes = ", ".join(workTypes) if printOnce: print_message() print_message(f"{'Node':<{longest_node}} {header}") printOnce = False print_message(f"{ad['NodeID']:<{longest_node}} ", nl=False) print_message(workTypes) if ads: print_worktypes("Work Types", False) print_worktypes("Secure Work Types", True) if status: print_message("Additional data returned from Receptor:") print_json(status) @cli.command(help="Ping a Receptor node.") @click.pass_context @click.argument("node") @click.option("--count", default=4, help="Number of pings to send", show_default=True) @click.option( "--delay", default=1.0, help="Time to wait between pings", show_default=True ) def ping(ctx, node, count, delay): rc = get_rc(ctx) ping_error = False for i in range(count): results = rc.simple_command(f"ping {node}") if "Success" in results and results["Success"]: print_message(f"Reply from {results['From']} in {results['TimeStr']}") else: ping_error = True if "From" in results and "TimeStr" in results: print_error( f"{results['Error']} from {results['From']} in {results['TimeStr']}" ) else: print_error(f"{results['Error']}") if i < count - 1: time.sleep(delay) if ping_error: sys.exit(2) @cli.command(help="Reload receptor configuration.") @click.pass_context def reload(ctx): rc = get_rc(ctx) results = rc.simple_command("reload") if "Success" in results and results["Success"]: print_message("Reload successful") else: print_error(f"{results['Error']}") if "ERRORCODE 3" in results["Error"]: sys.exit(3) elif "ERRORCODE 4" in results["Error"]: sys.exit(4) else: sys.exit(5) @cli.command(help="Do a traceroute to a Receptor node.") @click.pass_context @click.argument("node") def traceroute(ctx, node): rc = get_rc(ctx) results = rc.simple_command(f"traceroute {node}") for resno in sorted(results, key=lambda r: int(r)): resval = results[resno] if "Error" in resval: print_error( f"{resno}: Error {resval['Error']} from {resval['From']} in {resval['TimeStr']}" ) else: print_message(f"{resno}: {resval['From']} in {resval['TimeStr']}") @cli.command(help="Connect the local terminal to a Receptor service on a remote node.") @click.pass_context @click.argument("node") @click.argument("service") @click.option( "--raw", "-r", default=False, is_flag=True, help="Set terminal to raw mode" ) @click.option( "--tls-client", "tlsclient", type=str, default="", help="TLS client config name used when connecting to remote node", ) def connect(ctx, node, service, raw, tlsclient): rc = get_rc(ctx) rc.connect_to_service(node, service, tlsclient) stdin_tattrs = termios.tcgetattr(sys.stdin) stdin_fcntl = fcntl.fcntl(sys.stdin, fcntl.F_GETFL) fcntl.fcntl(sys.stdin, fcntl.F_SETFL, stdin_fcntl | os.O_NONBLOCK) if raw and sys.stdin.isatty(): tty.setraw(sys.stdin.fileno(), termios.TCSAFLUSH) new_term = termios.tcgetattr(sys.stdin) new_term[3] = new_term[3] & ~termios.ISIG termios.tcsetattr(sys.stdin, termios.TCSAFLUSH, new_term) try: while True: r, _, _ = select.select([rc._socket, sys.stdin], [], []) for readable in r: if readable is rc._socket: data = rc._socket.recv(4096) if not data: return sys.stdout.write(data.decode()) sys.stdout.flush() else: data = sys.stdin.read() if not data: return rc._socket.send(data.encode()) finally: termios.tcsetattr(sys.stdin, termios.TCSAFLUSH, stdin_tattrs) print_message() @cli.group(help="Commands related to unit-of-work processing") def work(): pass @cli.command(help="Show version information for receptorctl and the receptor node") @click.pass_context def version(ctx): print_message(f"receptorctl {ctx.obj['receptorctlVersion']}") print_message(f"receptor {ctx.obj['receptorVersion']}") @work.command(name="list", help="List known units of work.") @click.option("--quiet", "-q", is_flag=True, help="Only list unit IDs with no detail") @click.option( "--node", default=None, type=str, help="Receptor node to list work from. Defaults to the local node.", ) @click.option( "--unit_id", type=str, required=False, default="", help="Only show detail for a specific unit id", ) @click.option( "--tls-client", "tlsclient", type=str, default="", help="TLS client config name used when connecting to remote node", ) @click.pass_context def list_units(ctx, unit_id, node, tlsclient, quiet): rc = get_rc(ctx) if node: rc.connect_to_service(node, "control", tlsclient) rc.handshake() if unit_id: unit_id = " " + unit_id work = rc.simple_command("work list" + unit_id) if quiet: for k in work.keys(): print_message(k) else: print_json(work) @work.command(help="Submit a new unit of work.") @click.pass_context @click.argument("worktype", type=str, required=True) @click.option( "--node", type=str, help="Receptor node to run the work on. Defaults to the local node.", ) @click.option( "--payload", "-p", type=str, help="File containing unit of work data. Use - for stdin.", ) @click.option( "--payload-literal", "-l", type=str, help="Use the command line string as the literal unit of work data.", ) @click.option("--no-payload", "-n", is_flag=True, help="Send an empty payload.") @click.option( "--tls-client", "tlsclient", type=str, default="", help="TLS client used when submitting work to a remote node", ) @click.option( "--ttl", type=str, default="", help="Time to live until remote work must start, e.g. 1h20m30s or 30m10s", ) @click.option("--signwork", help="Digitally sign remote work submissions", is_flag=True) @click.option( "--follow", "-f", help="Remain attached to the job and print its results to stdout", is_flag=True, ) @click.option("--rm", help="Release unit after completion", is_flag=True) @click.option( "--param", "-a", help="Additional Receptor parameter (key=value format)", multiple=True, ) @click.argument("cmdparams", type=str, required=False, nargs=-1) def submit( ctx, worktype, node, payload, no_payload, payload_literal, tlsclient, ttl, signwork, follow, rm, param, cmdparams, ): pcmds = 0 if payload: pcmds += 1 if no_payload: pcmds += 1 if payload_literal: pcmds += 1 if pcmds < 1: print_error("Must provide one of --payload, --no-payload or --payload-literal.") sys.exit(1) if pcmds > 1: print_error( "Cannot provide more than one of --payload, --no-payload and --payload-literal." ) sys.exit(1) if rm and not follow: print_warning("using --rm without --follow. Unit results will never be seen.") if payload_literal: payload_data = f"{payload_literal}\n".encode() elif no_payload: payload_data = "".encode() else: if payload == "-": payload_data = sys.stdin.buffer else: try: payload_data = open(payload, "rb") except Exception as e: print_error(f"Failed to load payload file: {e}") sys.exit(1) unitid = None try: params = dict(s.split("=", 1) for s in param) if cmdparams: allparams = [] if "params" in params: allparams.append(params["params"]) allparams.extend(cmdparams) params["params"] = " ".join(allparams) if node == "": node = None rc = get_rc(ctx) work = rc.submit_work( worktype, payload_data, node=node, tlsclient=tlsclient, ttl=ttl, signwork=signwork, params=params, ) result = work.pop("result") unitid = work.pop("unitid") if follow: ctx.invoke(results, unit_id=unitid) else: print_message(f"Result: {result}") print_message(f"Unit ID: {unitid}") except Exception as e: print_error(e) sys.exit(101) finally: if rm and unitid: op_on_unit_ids(ctx, "release", [unitid]) @work.command(help="Get results for a previously or currently running unit of work.") @click.pass_context @click.argument("unit_id", type=str, required=True) def results(ctx, unit_id): rc = get_rc(ctx) resultsfile = rc.get_work_results(unit_id) for text in iter(partial(resultsfile.readline, 256), b""): sys.stdout.buffer.write(text) sys.stdout.buffer.flush() rc = get_rc(ctx) status = rc.simple_command(f"work status {unit_id}") state = status.pop("State", 0) if state == 3: # Failed detail = status.pop("Detail", "Unknown") print_error(f"Remote unit failed: {detail}\n") sys.exit(1) def op_on_unit_ids(ctx, op, unit_ids): rc = get_rc(ctx) for unit_id in unit_ids: try: res = list(rc.simple_command(f"work {op} {unit_id}").items())[0] print_message(f"({res[1]}, {res[0]})") except Exception as e: print_error(f"{unit_id}: ERROR: {e}") sys.exit(1) @work.command(help="Cancel (kill) one or more units of work.") @click.argument("unit_ids", nargs=-1) @click.pass_context def cancel(ctx, unit_ids): if len(unit_ids) == 0: print_warning("No unit IDs supplied: Not doing anything") return print_message("Cancelled:") op_on_unit_ids(ctx, "cancel", unit_ids) @work.command(help="Release (delete) one or more units of work.") @click.option( "--force", help="Delete locally even if we can't reach the remote node", is_flag=True, ) @click.option("--all", help="Delete all work units", is_flag=True) @click.argument("unit_ids", nargs=-1) @click.pass_context def release(ctx, force, all, unit_ids): if len(unit_ids) == 0 and not all: print_warning("No unit IDs supplied: Not doing anything") return op = "release" if not force else "force-release" print_message("Released:") if all: rc = get_rc(ctx) work = rc.simple_command("work list") op_on_unit_ids(ctx, op, work.keys()) else: op_on_unit_ids(ctx, op, unit_ids) def run(): try: cli.main(sys.argv[1:], standalone_mode=False) except click.exceptions.Abort: pass except Exception as e: print_error(e) sys.exit(1) sys.exit(0) receptor-1.5.3/receptorctl/receptorctl/socket_interface.py000066400000000000000000000232151475465740700241270ustar00rootroot00000000000000import os import re import io import socket import shutil import json import ssl import yaml def shutdown_write(sock): if isinstance(sock, ssl.SSLSocket): super(ssl.SSLSocket, sock).shutdown(socket.SHUT_WR) else: sock.shutdown(socket.SHUT_WR) class ReceptorControl: def __init__( self, socketaddress, config=None, tlsclient=None, rootcas=None, key=None, cert=None, insecureskipverify=False, ): if config and any((rootcas, key, cert)): raise RuntimeError("Cannot specify both config and rootcas, key, cert") if config and not tlsclient: raise RuntimeError("Must specify both config and tlsclient") self._socket = None self._sockfile = None self._remote_node = None self._socketaddress = socketaddress self._rootcas = rootcas self._key = key self._cert = cert self._insecureskipverify = insecureskipverify if config and tlsclient: self.readconfig(config, tlsclient) def readstr(self): return self._sockfile.readline().decode().strip() def writestr(self, str): self._sockfile.write(str.encode()) self._sockfile.flush() def handshake(self): m = re.compile("Receptor Control, node (.+)").fullmatch(self.readstr()) if not m: raise RuntimeError("Failed to connect to Receptor socket") self._remote_node = m[1] def read_and_parse_json(self): text = self.readstr() if str.startswith(text, "ERROR:"): raise RuntimeError(text[7:]) data = json.loads(text) return data def readconfig(self, config, tlsclient): with open(config, "r") as yamlfid: yamldata = yaml.load(yamlfid, Loader=yaml.FullLoader) yamlfid.close() for i in yamldata: key = i.get("tls-client", None) if key: if key["name"] == tlsclient: self._rootcas = key.get("rootcas", self._rootcas) self._key = key.get("key", self._key) self._cert = key.get("cert", self._cert) self._insecureskipverify = key.get( "insecureskipverify", self._insecureskipverify ) break def simple_command(self, command): self.connect() self.writestr(f"{command}\n") return self.read_and_parse_json() def connect(self): if self._socket is not None: return m = re.compile( "(tcp|tls):(//)?([a-zA-Z0-9-.:]+):([0-9]+)|(unix:(//)?)?([^:]+)" ).fullmatch(self._socketaddress) if m: unixsocket = m[7] host = m[3] port = m[4] protocol = m[1] if unixsocket: path = os.path.expanduser(unixsocket) if not os.path.exists(path): raise ValueError(f"Socket path does not exist: {path}") self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self._socket.connect(path) self._sockfile = self._socket.makefile("rwb") self.handshake() return elif host and port: self._socket = None addrs = socket.getaddrinfo( host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE, ) for addr in addrs: family, type, proto, canonname, sockaddr = addr try: self._socket = socket.socket(family, type, proto) except OSError: self._socket = None continue try: if protocol == "tls": context = ssl.create_default_context( purpose=ssl.Purpose.SERVER_AUTH, cafile=self._rootcas, ) context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 if self._key and self._cert: context.load_cert_chain( certfile=self._cert, keyfile=self._key ) if not self._insecureskipverify: context.check_hostname = True else: context.check_hostname = False self._socket = context.wrap_socket( self._socket, server_hostname=host ) self._socket.connect(sockaddr) except OSError: self._socket.close() self._socket = None continue self._sockfile = self._socket.makefile("rwb") break if self._socket is None: raise ValueError(f"Could not connect to host {host} port {port}") self.handshake() return raise ValueError(f"Invalid socket address {self._socketaddress}") def close(self): if self._sockfile is not None: try: self._sockfile.close() finally: self._sockfile = None if self._socket is not None: try: self._socket.close() finally: self._socket = None def connect_to_service(self, node, service, tlsclient): self.connect() self.writestr(f"connect {node} {service} {tlsclient}\n") text = self.readstr() if not str.startswith(text, "Connecting"): raise RuntimeError(text) def submit_work( self, worktype, payload, node=None, tlsclient=None, ttl=None, signwork=False, params=None, ): self.connect() if node is None: node = "localhost" commandMap = { "command": "work", "subcommand": "submit", "node": node, "worktype": worktype, } if tlsclient: commandMap["tlsclient"] = tlsclient if ttl: commandMap["ttl"] = ttl if signwork: commandMap["signwork"] = "true" if params: for k, v in params.items(): if k not in commandMap: if v[0] == "@" and v[:2] != "@@": fname = v[1:] if not os.path.exists(fname): raise FileNotFoundError("{} does not exist".format(fname)) try: with open(fname, "r") as f: v_contents = f.read() except Exception: raise OSError("could not read from file {}".format(fname)) commandMap[k] = v_contents else: commandMap[k] = v else: raise RuntimeError(f"Duplicate or illegal parameter {k}") commandJson = json.dumps(commandMap) command = f"{commandJson}\n" self.writestr(command) text = self.readstr() m = re.compile( "Work unit created with ID (.+). Send stdin data and EOF." ).fullmatch(text) if not m: errmsg = "Failed to start work unit" if str.startswith(text, "ERROR: "): errmsg = errmsg + ": " + text[7:] raise RuntimeError(errmsg) if isinstance(payload, io.IOBase): shutil.copyfileobj(payload, self._sockfile) elif isinstance(payload, str): self.writestr(payload) elif isinstance(payload, bytes): self._sockfile.write(payload) else: raise RuntimeError("Unknown payload type") self._sockfile.flush() shutdown_write(self._socket) text = self.readstr() self.close() if text.startswith("ERROR:"): raise RuntimeError(f"Remote error: {text}") result = json.loads(text) return result def get_work_results( self, unit_id, startpos=0, return_socket=False, return_sockfile=True ): self.connect() self.writestr(f"work results {unit_id} {startpos}\n") text = self.readstr() m = re.compile("Streaming results for work unit (.+)").fullmatch(text) if not m: errmsg = "Failed to get results" if str.startswith(text, "ERROR: "): errmsg = errmsg + ": " + text[7:] raise RuntimeError(errmsg) shutdown_write(self._socket) # We return the filelike object created by makefile() by default, or optionally # the socket itself. Either way, we close the other dup'd handle so the caller's # close will be effective. socket = self._socket sockfile = self._sockfile try: if not return_socket: self._socket.close() if not return_sockfile: self._sockfile.close() finally: self._socket = None self._sockfile = None if return_socket and return_sockfile: return socket, sockfile elif return_socket: return socket elif return_sockfile: return sockfile else: return receptor-1.5.3/receptorctl/requirements/000077500000000000000000000000001475465740700204375ustar00rootroot00000000000000receptor-1.5.3/receptorctl/requirements/lint.in000066400000000000000000000002531475465740700217350ustar00rootroot00000000000000# This requirements file is used for receptorctl lint and formatting checks flake8 # <-- used to check Python code style black # <-- used to check Python code formatting receptor-1.5.3/receptorctl/requirements/lint.txt000066400000000000000000000010661475465740700221510ustar00rootroot00000000000000# # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --allow-unsafe --output-file=requirements/lint.txt --strip-extras requirements/lint.in # black==24.10.0 # via -r requirements/lint.in click==8.1.7 # via black flake8==7.1.1 # via -r requirements/lint.in mccabe==0.7.0 # via flake8 mypy-extensions==1.0.0 # via black packaging==24.0 # via black pathspec==0.12.1 # via black platformdirs==4.2.2 # via black pycodestyle==2.12.0 # via flake8 pyflakes==3.2.0 # via flake8 receptor-1.5.3/receptorctl/requirements/pip-tools.in000066400000000000000000000002021475465740700227070ustar00rootroot00000000000000# This requirements file is used to generate lock files for receptorctl pip-tools >= 7 # .pip-tools.toml was introduced in v6.14 receptor-1.5.3/receptorctl/requirements/pip-tools.txt000066400000000000000000000011241475465740700231240ustar00rootroot00000000000000# # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --allow-unsafe --output-file=requirements/pip-tools.txt --strip-extras requirements/pip-tools.in # build==1.2.1 # via pip-tools click==8.1.7 # via pip-tools packaging==24.0 # via build pip-tools==7.4.1 # via -r pip-tools.in pyproject-hooks==1.1.0 # via # build # pip-tools wheel==0.43.0 # via pip-tools # The following packages are considered to be unsafe in a requirements file: pip==24.0 # via pip-tools setuptools==70.0.0 # via pip-tools receptor-1.5.3/receptorctl/requirements/tests.in000066400000000000000000000001761475465740700221350ustar00rootroot00000000000000# This requirements file is used for receptorctl tests pytest==8.3.3 # <-- used to run receptorctl tests pytest-cov coverage receptor-1.5.3/receptorctl/requirements/tests.txt000066400000000000000000000007721475465740700223500ustar00rootroot00000000000000# # This file is autogenerated by pip-compile with Python 3.11 # by the following command: # # pip-compile --allow-unsafe --output-file=requirements/tests.txt --strip-extras requirements/tests.in # coverage==7.6.1 # via # -r requirements/tests.in # pytest-cov iniconfig==2.0.0 # via pytest packaging==24.0 # via pytest pluggy==1.5.0 # via pytest pytest==8.3.3 # via # -r requirements/tests.in # pytest-cov pytest-cov==5.0.0 # via -r requirements/tests.in receptor-1.5.3/receptorctl/setup.cfg000066400000000000000000000011741475465740700175400ustar00rootroot00000000000000[metadata] name = receptorctl author = Red Hat author_email = info@ansible.com summary = "Receptorctl is a front-end CLI and importable Python library that interacts with Receptor over its control socket interface." home_page = https://ansible.readthedocs.io/projects/receptor/ description_file = README.md description_content_type = text/markdown license = "Apache License 2.0" version = file: .VERSION [options] packages = find: install_requires = python-dateutil click pyyaml [options.entry_points] console_scripts = receptorctl = receptorctl:run [files] packages = receptorctl [flake8] max-line-length = 100 receptor-1.5.3/receptorctl/setup.py000066400000000000000000000003061475465740700174250ustar00rootroot00000000000000# This file is only used by our downstream RPM builds. # Remove this once that tooling has been updated to work with setup.cfg. import setuptools if __name__ == "__main__": setuptools.setup() receptor-1.5.3/receptorctl/tests/000077500000000000000000000000001475465740700170565ustar00rootroot00000000000000receptor-1.5.3/receptorctl/tests/__init__.py000066400000000000000000000001021475465740700211600ustar00rootroot00000000000000import os test_dir = os.path.dirname(os.path.realpath(__file__)) receptor-1.5.3/receptorctl/tests/conftest.py000066400000000000000000000265071475465740700212670ustar00rootroot00000000000000import receptorctl import pytest import subprocess import os import shutil import time import json import yaml from click.testing import CliRunner from .lib import create_certificate @pytest.fixture(scope="session") def base_tmp_dir(): receptor_tmp_dir = "/tmp/receptor" base_tmp_dir = "/tmp/receptorctltest" # Clean up tmp directory and create a new one if os.path.exists(base_tmp_dir): shutil.rmtree(base_tmp_dir) os.mkdir(base_tmp_dir) yield base_tmp_dir # Tear-down # if os.path.exists(base_tmp_dir): # shutil.rmtree(base_tmp_dir) if os.path.exists(receptor_tmp_dir): shutil.rmtree(receptor_tmp_dir) subprocess.call(["killall", "receptor"]) @pytest.fixture(scope="class") def receptor_mesh(base_tmp_dir): class ReceptorMeshSetup: # Relative dir to the receptorctl tests mesh_definitions_dir = "tests/mesh-definitions" def __init__(self): # Default vars self.base_tmp_dir = base_tmp_dir # Required dependencies self.__check_dependencies() def setup(self, mesh_name: str = "mesh1", socket_file_name: str = "node1.sock"): self.mesh_name = mesh_name self.__change_config_files_dir(mesh_name) self.__create_tmp_dir() self.__create_certificates() self.socket_file_name = socket_file_name # HACK this should be a dinamic way to select a node socket self.default_socket_unix = "unix://" + os.path.join( self.get_mesh_tmp_dir(), socket_file_name ) def default_receptor_controller_unix(self): return receptorctl.ReceptorControl(self.default_socket_unix) def __change_config_files_dir(self, mesh_name: str): self.config_files_dir = "{}/{}".format(self.mesh_definitions_dir, mesh_name) self.config_files = [] # Iterate over all the files in the config_files_dir # and create a list of all files that end with .yaml or .yml for f in os.listdir(self.config_files_dir): if f.endswith(".yaml") or f.endswith(".yml"): self.config_files.append(os.path.join(self.config_files_dir, f)) def __create_certificates(self): self.certificate_files = create_certificate( self.get_mesh_tmp_dir(), "node1" ) def get_mesh_name(self): return self.config_files_dir.split("/")[-1] def get_mesh_tmp_dir(self): mesh_tmp_dir = "{}/{}".format(self.base_tmp_dir, self.mesh_name) return mesh_tmp_dir def __check_dependencies(self): """Check if we have the required dependencies raise an exception if we don't """ # Check if openssl binary is on the path try: subprocess.check_output(["openssl", "version"]) except FileNotFoundError: raise Exception( "openssl binary not found\n" 'Consider run "sudo dnf install openssl"' ) def __create_tmp_dir(self): mesh_tmp_dir_path = self.get_mesh_tmp_dir() # Clean up tmp directory and create a new one if os.path.exists(mesh_tmp_dir_path): shutil.rmtree(mesh_tmp_dir_path) os.mkdir(mesh_tmp_dir_path) return ReceptorMeshSetup() @pytest.fixture(scope="class") def receptor_bin_path(): """Returns the path to the receptor binary This fixture was created to make possible the use of multiple receptor binaries files. The default priority order is: - ../../tests/artifacts-output - The "receptor" available in the PATH Returns: str: Path to the receptor binary """ # Check if the receptor binary is in '../../tests/artifacts-output' and returns # the path to the binary if it is found. receptor_bin_path_from_test_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), "../../tests/artifacts-output/", "receptor", ) if os.path.exists(receptor_bin_path_from_test_dir): return receptor_bin_path_from_test_dir # Check if the receptor binary is in '../../' and returns # the path to the binary if it is found. receptor_bin_path_from_test_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), "../../", "receptor", ) if os.path.exists(receptor_bin_path_from_test_dir): return receptor_bin_path_from_test_dir # Check if the receptor binary is in the path try: subprocess.check_output(["receptor", "--version"]) return "receptor" except subprocess.CalledProcessError: raise Exception( "Receptor binary not found in $PATH or in '../../tests/artifacts-output'" ) @pytest.fixture(scope="class") def default_socket_tcp(): return "tcp://localhost:11112" @pytest.fixture(scope="class") def default_socket_file(receptor_mesh): return receptor_mesh.get_mesh_tmp_dir() + "/node1.sock" @pytest.fixture(scope="class") def default_receptor_controller_socket_file(default_socket_file): return receptorctl.ReceptorControl(default_socket_file) @pytest.fixture(scope="class") def default_receptor_controller_tcp(default_socket_tcp): return receptorctl.ReceptorControl(default_socket_tcp) @pytest.fixture(scope="class") def default_receptor_controller_tcp_tls(default_socket_tcp, certificate_files): rootcas = certificate_files["caCrtPath"] key = certificate_files["clientKeyPath"] cert = certificate_files["clientCrtPath"] insecureskipverify = True controller = receptorctl.ReceptorControl( default_socket_tcp, rootcas=rootcas, key=key, cert=cert, insecureskipverify=insecureskipverify, ) return controller @pytest.fixture(scope="class") def receptor_nodes(): class ReceptorNodes: nodes = [] log_files = [] return ReceptorNodes() def receptor_nodes_kill(nodes): for node in nodes: node.kill() for node in nodes: node.wait(3) def import_config_from_node(node): """Receive a node and return the config file as a dict""" stream = open(node.args[2], "r") try: config_unflatten = yaml.safe_load(stream) except yaml.YAMLError as e: raise e stream.close() config = {} for c in config_unflatten: config.update(c) return config def receptor_mesh_wait_until_ready(nodes, receptor_controller): time.sleep(0.5) # Try up to 6 times tries = 0 while True: status = receptor_controller.simple_command("status") # Check if it has three known nodes if len(status["KnownConnectionCosts"]) == 3: break tries += 1 if tries > 6: raise Exception("Receptor Mesh did not start up") time.sleep(1) receptor_controller.close() @pytest.fixture(scope="class") def certificate_files(receptor_mesh): return receptor_mesh.certificate_files @pytest.fixture(scope="class") def default_receptor_controller_unix(receptor_mesh): return receptor_mesh.default_receptor_controller_unix() def start_nodes(receptor_mesh, receptor_nodes, receptor_bin_path): for i, config_file in enumerate(receptor_mesh.config_files): log_file_name = ( config_file.split("/")[-1].replace(".yaml", ".log").replace(".yml", ".log") ) receptor_nodes.log_files.append( open( os.path.join(receptor_mesh.get_mesh_tmp_dir(), log_file_name), "w", ) ) receptor_nodes.nodes.append( subprocess.Popen( [receptor_bin_path, "-c", config_file], stdout=receptor_nodes.log_files[i], stderr=receptor_nodes.log_files[i], ) ) @pytest.fixture(scope="class") def receptor_mesh_mesh1( receptor_bin_path, receptor_nodes, receptor_mesh, ): # Set custom config files dir receptor_mesh.setup("mesh1") # Start the receptor nodes processes start_nodes(receptor_mesh, receptor_nodes, receptor_bin_path) receptor_mesh_wait_until_ready( receptor_nodes.nodes, receptor_mesh.default_receptor_controller_unix() ) yield receptor_nodes_kill(receptor_nodes.nodes) @pytest.fixture(scope="class") def receptor_mesh_access_control( receptor_bin_path, receptor_nodes, receptor_mesh, ): # Set custom config files dir receptor_mesh.setup("access_control", "node2.sock") # Create PEM key for signed work key_path = os.path.join(receptor_mesh.get_mesh_tmp_dir(), "signwork_key") subprocess.check_output( [ "ssh-keygen", "-b", "2048", "-t", "rsa", "-f", key_path, "-q", "-N", "", ] ) # Start the receptor nodes processes start_nodes(receptor_mesh, receptor_nodes, receptor_bin_path) receptor_mesh_wait_until_ready( receptor_nodes.nodes, receptor_mesh.default_receptor_controller_unix() ) yield receptor_nodes_kill(receptor_nodes.nodes) @pytest.fixture(scope="function") def receptor_control_args(receptor_mesh): args = { "--socket": f"{receptor_mesh.get_mesh_tmp_dir()}/{receptor_mesh.socket_file_name}", "--config": None, "--tls": None, "--rootcas": None, "--key": None, "--cert": None, "--insecureskipverify": None, } return args @pytest.fixture(scope="function") def invoke(receptor_control_args): def f_invoke(command, args: list = []): """Invoke a command and return the original result. Args: command (click command): The command to invoke. args (list): The arguments to pass to the command. Returns: click.testing: The original result. """ def parse_args_to_list(args: dict): """Parse the args (dict) to a list of strings.""" arg_list = [] for k, v in args.items(): if v is not None: arg_list.append(str(k)) arg_list.append(str(v)) return arg_list # Since we may log errors/warnings on stderr we want to split stdout and stderr runner = CliRunner(mix_stderr=False) out = runner.invoke( receptorctl.cli.cli, parse_args_to_list(receptor_control_args) + [command.name] + args, ) return out return f_invoke @pytest.fixture(scope="function") def invoke_as_json(invoke): def f_invoke_as_json(command, args: list = []): """Invoke a command and return the original result and the json output. Args: command (click command): The command to invoke. args (list): The arguments to pass to the command. Returns: tuple: Tuple of the original result and the json output. """ result = invoke(command, ["--json"] + args) try: # JSON data should only be on stdout json_output = json.loads(result.stdout) except json.decoder.JSONDecodeError: pytest.fail("The command is not in json format") return result, json_output return f_invoke_as_json receptor-1.5.3/receptorctl/tests/lib.py000066400000000000000000000057351475465740700202100ustar00rootroot00000000000000import os import subprocess __OIDReceptorName = "1.3.6.1.4.1.2312.19.1" __OIDReceptorNameFormat = "UTF8" def __init__(): pass def create_certificate(tmp_dir: str, commonName: str = "localhost"): def generate_cert(name, commonName): keyPath = os.path.join(tmp_dir, name + ".key") crtPath = os.path.join(tmp_dir, name + ".crt") subprocess.check_output(["openssl", "genrsa", "-out", keyPath, "2048"]) subprocess.check_output( [ "openssl", "req", "-x509", "-new", "-nodes", "-key", keyPath, "-subj", "/C=/ST=/L=/O=/OU=ReceptorTesting/CN=ca", "-sha256", "-out", crtPath, ] ) return keyPath, crtPath def generate_cert_with_ca(name, caKeyPath, caCrtPath, commonName): keyPath = os.path.join(tmp_dir, name + ".key") crtPath = os.path.join(tmp_dir, name + ".crt") csrPath = os.path.join(tmp_dir, name + ".csa") extPath = os.path.join(tmp_dir, name + ".ext") # create x509 extension with open(extPath, "w") as ext: # DNSName to SAN ext.write("subjectAltName=DNS:" + commonName) # Receptor NodeID (otherName) to SAN ext.write( ",otherName:" + __OIDReceptorName + ";" + __OIDReceptorNameFormat + ":" + commonName ) ext.close() subprocess.check_output(["openssl", "genrsa", "-out", keyPath, "2048"]) # create cert request subprocess.check_output( [ "openssl", "req", "-new", "-sha256", "-key", keyPath, "-subj", "/C=/ST=/L=/O=/OU=ReceptorTesting/CN=" + commonName, "-out", csrPath, ] ) # sign cert request subprocess.check_output( [ "openssl", "x509", "-req", "-extfile", extPath, "-in", csrPath, "-CA", caCrtPath, "-CAkey", caKeyPath, "-CAcreateserial", "-out", crtPath, "-sha256", ] ) return keyPath, crtPath # Create a new CA caKeyPath, caCrtPath = generate_cert("ca", "ca") clientKeyPath, clientCrtPath = generate_cert_with_ca( "client", caKeyPath, caCrtPath, commonName ) generate_cert_with_ca("server", caKeyPath, caCrtPath, commonName) return { "caKeyPath": caKeyPath, "caCrtPath": caCrtPath, "clientKeyPath": clientKeyPath, "clientCrtPath": clientCrtPath, } receptor-1.5.3/receptorctl/tests/mesh-definitions/000077500000000000000000000000001475465740700223235ustar00rootroot00000000000000receptor-1.5.3/receptorctl/tests/mesh-definitions/access_control/000077500000000000000000000000001475465740700253245ustar00rootroot00000000000000receptor-1.5.3/receptorctl/tests/mesh-definitions/access_control/node1.yaml000066400000000000000000000012201475465740700272110ustar00rootroot00000000000000--- - node: id: node1 - log-level: debug - tcp-listener: port: 12111 - control-service: filename: /tmp/receptorctltest/access_control/node1.sock - work-signing: privatekey: /tmp/receptorctltest/access_control/signwork_key tokenexpiration: 10h30m - work-verification: publickey: /tmp/receptorctltest/access_control/signwork_key.pub - work-command: worktype: signed-echo command: bash params: "-c \"for w in {1..4}; do echo ${line^^}; sleep 1; done\"" verifysignature: true - work-command: workType: unsigned-echo command: bash params: "-c \"for w in {1..4}; do echo ${line^^}; sleep 1; done\"" ... receptor-1.5.3/receptorctl/tests/mesh-definitions/access_control/node2.yaml000066400000000000000000000003161475465740700272170ustar00rootroot00000000000000--- - node: id: node2 - log-level: debug - tcp-peer: address: localhost:12111 - tcp-listener: port: 12121 - control-service: filename: /tmp/receptorctltest/access_control/node2.sock ... receptor-1.5.3/receptorctl/tests/mesh-definitions/access_control/node3.yaml000066400000000000000000000002551475465740700272220ustar00rootroot00000000000000--- - node: id: node3 - log-level: debug - tcp-peer: address: localhost:12121 - control-service: filename: /tmp/receptorctltest/access_control/node3.sock ... receptor-1.5.3/receptorctl/tests/mesh-definitions/mesh1/000077500000000000000000000000001475465740700233405ustar00rootroot00000000000000receptor-1.5.3/receptorctl/tests/mesh-definitions/mesh1/node1.yaml000066400000000000000000000007751475465740700252430ustar00rootroot00000000000000--- - node: id: node1 - tcp-listener: port: 11111 - control-service: filename: /tmp/receptorctltest/mesh1/node1.sock - tcp-server: port: 11112 remotenode: localhost remoteservice: control - tls-server: name: tlsserver key: /tmp/receptorctltest/mesh1/server.key cert: /tmp/receptorctltest/mesh1/server.crt requireclientcert: true clientcas: /tmp/receptorctltest/mesh1/ca.crt - control-service: service: ctltls tcplisten: 11113 tcptls: tlsserver ... receptor-1.5.3/receptorctl/tests/mesh-definitions/mesh1/node2.yaml000066400000000000000000000002611475465740700252320ustar00rootroot00000000000000--- - node: id: node2 - tcp-peer: address: localhost:11111 - tcp-listener: port: 11121 - control-service: filename: /tmp/receptorctltest/mesh1/node2.sock ... receptor-1.5.3/receptorctl/tests/mesh-definitions/mesh1/node3.yaml000066400000000000000000000007161475465740700252400ustar00rootroot00000000000000--- - node: id: node3 - tcp-peer: address: localhost:11121 - control-service: filename: /tmp/receptorctltest/mesh1/node3.sock - work-command: worktype: sleep command: bash params: "-c \"read N_ITER; for i in `seq 1 $N_ITER`; do echo $((${N_ITER}-${i}+1)) 'remaining'; sleep 1; done\"" allowruntimeparams: true - work-command: workType: echo-uppercase command: bash params: "-c \"read PAYLOAD; echo ${PAYLOAD^^}\"" ... receptor-1.5.3/receptorctl/tests/test_cli.py000066400000000000000000000014601475465740700212370ustar00rootroot00000000000000from receptorctl import cli as commands # The goal is to write tests following the click documentation: # https://click.palletsprojects.com/en/8.0.x/testing/ import pytest @pytest.mark.usefixtures("receptor_mesh_mesh1") class TestCLI: def test_cli_cmd_status(self, invoke_as_json): result, json_output = invoke_as_json(commands.status, []) assert result.exit_code == 0 assert set( [ "Advertisements", "Connections", "KnownConnectionCosts", "NodeID", "RoutingTable", "SystemCPUCount", "SystemMemoryMiB", "Version", ] ) == set( json_output.keys() ), "The command returned unexpected keys from json output" receptor-1.5.3/receptorctl/tests/test_connection.py000066400000000000000000000044571475465740700226400ustar00rootroot00000000000000import pytest @pytest.mark.usefixtures("receptor_mesh_mesh1") class TestReceptorCtlConnection: def test_connect_to_service(self, default_receptor_controller_unix): node1_controller = default_receptor_controller_unix node1_controller.connect_to_service("node2", "control", "") node1_controller.handshake() status = node1_controller.simple_command("status") node1_controller.close() assert status["NodeID"] == "node2" def test_simple_command(self, default_receptor_controller_unix): node1_controller = default_receptor_controller_unix status = node1_controller.simple_command("status") node1_controller.close() assert not ( set( [ "Advertisements", "Connections", "KnownConnectionCosts", "NodeID", "RoutingTable", ] ) - status.keys() ) def test_simple_command_fail(self, default_receptor_controller_unix): node1_controller = default_receptor_controller_unix with pytest.raises(RuntimeError): node1_controller.simple_command("doesnotexist") node1_controller.close() def test_tcp_control_service(self, default_receptor_controller_tcp): node1_controller = default_receptor_controller_tcp status = node1_controller.simple_command("status") node1_controller.close() assert not ( set( [ "Advertisements", "Connections", "KnownConnectionCosts", "NodeID", "RoutingTable", ] ) - status.keys() ) def test_tcp_control_service_tls(self, default_receptor_controller_tcp_tls): node1_controller = default_receptor_controller_tcp_tls status = node1_controller.simple_command("status") node1_controller.close() assert not ( set( [ "Advertisements", "Connections", "KnownConnectionCosts", "NodeID", "RoutingTable", ] ) - status.keys() ) receptor-1.5.3/receptorctl/tests/test_mesh.py000066400000000000000000000040001475465740700214150ustar00rootroot00000000000000from receptorctl import cli as commands # The goal is to write tests following the click documentation: # https://click.palletsprojects.com/en/8.0.x/testing/ import pytest import time @pytest.mark.usefixtures("receptor_mesh_access_control") class TestMeshFirewall: def test_work_unsigned(self, invoke, receptor_nodes): """Run a unsigned work-command Steps: 1. Create node1 with a unsigned work-command 2. Create node2 3. Run from node2 a unsigned work-command to node1 4. Expect to be accepted """ # Run an unsigned command result = invoke( commands.work, "submit unsigned-echo --node node1 --no-payload".split(), ) work_unit_id = result.stdout.split("Unit ID: ")[-1].replace("\n", "") time.sleep(5) assert result.exit_code == 0 # Release unsigned work result = invoke(commands.work, f"release {work_unit_id}".split()) time.sleep(5) assert result.exit_code == 0 # DISABLE UNTIL THE FIX BEING IMPLEMENTED # # def test_work_signed_expect_block(self, invoke, receptor_nodes): # """Run a signed work-command without the right key # and expect to be blocked. # Steps: # 1. Create node1 with a signed work-command # 2. Create node2 # 3. Run from node2 a signed work-command to node1 # 4. Expect to be blocked # """ # # Run an unsigned command # result = invoke( # commands.work, "submit signed-echo --node node1 --no-payload".split() # ) # work_unit_id = result.stdout.split("Unit ID: ")[-1].replace("\n", "") # time.sleep(5) # assert work_unit_id, "Work unit ID should not be empty" # assert result.exit_code != 0, "Work signed run should fail, but it worked" # # Release unsigned work # result = invoke(commands.work, f"release {work_unit_id}".split()) # assert result.exit_code == 0, "Work release failed" receptor-1.5.3/receptorctl/tests/test_workunit.py000066400000000000000000000125671475465740700223640ustar00rootroot00000000000000# The goal is to write tests following the click documentation: # https://click.palletsprojects.com/en/8.0.x/testing/ import pytest import time @pytest.fixture(scope="function") def wait_for_workunit_state(): def _wait_for_workunit_state( node_controller, unitid: str, expected_detail: str = None, expected_state_name: str = None, timeout_seconds: int = 30, ) -> bool: """Wait for a workunit to finish At least 'expected_detail' or 'expected_state_name' must be specified. Args: node_controller: The node controller used to create the workunit unitid: The unitid of the workunit to wait for expected_detail: The expected detail of the workunit expected_state_name: The expected state name of the workunit timeout_seconds: The number of seconds to wait before timing out Returns: True if the workunit finished, False if it timed out """ if expected_detail is None and expected_state_name is None: raise ValueError( "At least 'expected_detail' or 'expected_state_name' must be specified" ) remaining_time = timeout_seconds if expected_detail is not None: for _ in range(remaining_time): status = node_controller.simple_command("work status {}".format(unitid)) if status["Detail"] == expected_detail: return True else: time.sleep(1) remaining_time -= 1 if remaining_time <= 0: return False if expected_state_name is not None: for _ in range(remaining_time): status = node_controller.simple_command("work status {}".format(unitid)) if status["StateName"] == expected_state_name: return True else: time.sleep(1) remaining_time -= 1 return False return _wait_for_workunit_state @pytest.fixture(scope="function") def wait_for_work_finished(wait_for_workunit_state): def _wait_for_work_finished( node_controller, unitid: str, timeout_seconds: int = 30 ) -> bool: """Wait for a workunit to finish Args: node_controller: The node controller used to create the workunit unitid: The unitid of the workunit to wait for timeout_seconds: The number of seconds to wait before timing out Returns: True if the workunit finished, False if it timed out """ return wait_for_workunit_state( node_controller, unitid, expected_detail="exit status 0", expected_state_name="Succeeded", timeout_seconds=timeout_seconds, ) return _wait_for_work_finished @pytest.mark.usefixtures("receptor_mesh_mesh1") class TestWorkUnit: def test_workunit_simple( self, invoke_as_json, default_receptor_controller_socket_file, wait_for_work_finished, ): # Spawn a long running command node1_controller = default_receptor_controller_socket_file wait_for = 5 # in seconds payload = "That's a long string example! And there's emoji too! 👾" work = node1_controller.submit_work("echo-uppercase", payload, node="node3") state_result = work.pop("result") state_unitid = work.pop("unitid") assert state_result == "Job Started" assert wait_for_work_finished( node1_controller, state_unitid, wait_for ), "Workunit timed out and never finished" work_result = ( node1_controller.get_work_results(state_unitid) .read() .decode("utf-8") .strip() ) assert payload.upper() == work_result, ( f"Workunit did not report the expected result:\n - payload: {payload}" f"\n - work_result: {work_result}" ) node1_controller.close() def test_workunit_cmd_cancel( self, invoke_as_json, default_receptor_controller_socket_file, wait_for_workunit_state, ): # Spawn a long running command node1_controller = default_receptor_controller_socket_file sleep_for = 9999 # in seconds wait_for = 15 # in seconds work = node1_controller.submit_work("sleep", str(sleep_for), node="node3") state_result = work.pop("result") state_unitid = work.pop("unitid") assert state_result == "Job Started" # HACK: Wait for the workunit to start # receptor should be able to cancel the workunit with this time.sleep(5) # Run and check cancel command cancel_output = node1_controller.simple_command(f"work cancel {state_unitid}") assert cancel_output["cancelled"] == state_unitid # Wait workunit detail == 'Cancelled' assert wait_for_workunit_state( node1_controller, state_unitid, expected_detail="Canceled", timeout_seconds=wait_for, ), "Workunit timed out and never finished" # Get work list and check for the workunit detail state work_list = node1_controller.simple_command("work list") assert work_list[state_unitid]["Detail"] == "Canceled" node1_controller.close() receptor-1.5.3/setup.cfg000077700000000000000000000000001475465740700213512receptorctl/setup.cfgustar00rootroot00000000000000receptor-1.5.3/tests/000077500000000000000000000000001475465740700145305ustar00rootroot00000000000000receptor-1.5.3/tests/.gitignore000066400000000000000000000000221475465740700165120ustar00rootroot00000000000000artifacts-output/ receptor-1.5.3/tests/Makefile000066400000000000000000000035451475465740700161770ustar00rootroot00000000000000CONTAINER_IMAGE_TAG_builder=ansible/receptor:builder CONTAINER_IMAGE_TAG_dev=ansible/receptor:dev CONTAINER_RUN:=$$(./receptor-tester.sh container-runtime) # Tests GO_FUNCTIONAL_TESTS_DIRS:=$$(find ./functional -type d) # Generate artifacts artifacts: container-image-builder mkdir -p $(PWD)/artifacts-output $(CONTAINER_RUN) run --rm \ -v $(PWD)/../:/source/ \ -v $(PWD)/artifacts-output/:/artifacts/:rw \ -v receptor_go_root_cache:/root/go:rw \ -e OUTPUT_UID=$$(id -u) \ $(CONTAINER_IMAGE_TAG_builder) \ /build-artifacts.sh artifacts-no-cache: container-image-builder mkdir -p $(PWD)/artifacts $(CONTAINER_RUN) run --rm \ -v $(PWD)/../:/source/:ro \ -v $(PWD)/artifacts/:/artifacts/:rw \ $(CONTAINER_IMAGE_TAG_builder) \ /build-artifacts.sh # Container environment container-image-dev: $(CONTAINER_RUN) build \ -t $(CONTAINER_IMAGE_TAG_dev) \ -f ./environments/container-dev/Containerfile \ ./../ container-image-builder: $(CONTAINER_RUN) build \ -t $(CONTAINER_IMAGE_TAG_builder) \ -f ./environments/container-builder/Containerfile \ ./../ container-image-dev-shell: $(CONTAINER_RUN) run -it --rm \ $(CONTAINER_IMAGE_TAG_dev) bash container-image-builder-shell: $(CONTAINER_RUN) run -it --rm \ -v $(PWD)/../:/source/:ro \ -v receptor_go_root_cache:/root/go:rw \ $(CONTAINER_IMAGE_TAG_builder) bash -c \ 'cp -r /source /build; cd /build; bash' # VM (vagrant) vm: vm-create vm-create: cd environments/vagrant && \ vagrant up shell: vm-shell vm-shell: @cd environments/vagrant && \ vagrant ssh -c "cd /vagrant/tests && /bin/bash" vm-destroy: cd environments/vagrant && \ vagrant destroy vm-provision: cd environments/vagrant && \ vagrant provision # Colored output ccblue=$(echo -e "\033[0;31m") ccred=$(echo -e "\033[0;31m") ccyellow=$(echo -e "\033[0;33m") ccend=$(echo -e "\033[0m") # Other configs SHELL:=/bin/bash receptor-1.5.3/tests/README.md000066400000000000000000000046231475465740700160140ustar00rootroot00000000000000# Receptor tests All test files and test tools can be found on this directory. ```java . |-- artifacts : "receptor-tester.sh builds output" |-- environments : "Controlled environments for testing" | |-- container : "Containerfile recipe" | `-- vagrant : "VM recipe" `-- functional : "Functional test files" |-- cli |-- lib `-- mesh ``` ## Tests 1. Functional tests and correspondent docs can be found here: [./functional/README.md](./functional/README.md) ## Tools There are two parts of Receptor tools. Each one is used for different scenarios. Requirements: - podman - make ### receptor-tester All features will be showed on `help` argument: ```bash ./receptor-tester.sh help # Command list: # list-dirs - list all available tests directories # list-files - list all available tests files # run - run a specific test # run-all - run all tests. Returns 0 if pass # help - Displays this help text ``` List all available tests: ```bash # list all available tests directories ./receptor-tester.sh list-dirs # ./functional/mesh # ./functional/cli # ./functional/lib/utils # List all available tests files ./receptor-tester.sh list-files # ./functional/mesh/mesh_test.go # ./functional/mesh/work_test.go # ./functional/mesh/tls_test.go # ./functional/cli/cli_test.go # ./functional/lib/utils/utils_test.go ``` Run tests: ```bash # run a specific test ./receptor-tester.sh run ./functional/cli/cli_test.go # run all tests ./receptor-tester.sh run-all ``` ### Makefile Build artifacts (receptor and receptorctl) based on latest source code. The container recipe used can be found at `environments/container`. ```bash # Build artifacts make artifacts ``` Container commands can be used in conjunction with `make artifacts` or isolated to run ad-hoc commands inside a controlled environment. ```bash # Rebuild container image used # by `make artifacts` make container-image # OR make container-image-base # Jump into a container created from # the same container image used by # `make artifacts` make container-shell-base ``` If you're using Windows or macOS, the following commands can be useful to create a virtual machine and play aroung with containers inside there. Requirements: - vagrant ```bash # Creates a Vagrant VM make vm # OR make vm-create # SSH into VM make vm-shell # Destroy VM make vm-destroy # Reapply Ansible playbook into VM make vm-provision ``` receptor-1.5.3/tests/environments/000077500000000000000000000000001475465740700172575ustar00rootroot00000000000000receptor-1.5.3/tests/environments/container-builder/000077500000000000000000000000001475465740700226655ustar00rootroot00000000000000receptor-1.5.3/tests/environments/container-builder/Containerfile000066400000000000000000000024031475465740700253710ustar00rootroot00000000000000FROM quay.io/centos/centos:stream9 # Python options = [3.9, 3.11, 3.12] ARG PYTHON_VERSION=3.12 ENV PATH=${PATH}:/usr/local/go/bin RUN set -x \ && echo 'fastestmirror=True' >> /etc/dnf/dnf.conf \ && dnf update -y \ # Receptor build tools && dnf install -y \ findutils \ git \ iproute \ make \ openssl \ wget \ # Install specific python version && dnf install -y \ python${PYTHON_VERSION} \ python${PYTHON_VERSION}-pip \ && pip${PYTHON_VERSION} install virtualenv \ # Install specific golang version && dnf install -y \ golang \ && dnf clean all # --- ALL IMAGE MUST BE THE SAME UNTIL NOW --- # Caching dependencies WORKDIR /dependencies ADD ./go.mod \ ./go.sum \ ../receptorctl/requirements/tests.txt \ ../receptorctl/requirements/tests.in ./ RUN set -x \ # Go && go get -u golang.org/x/lint/golint \ && go get -d -v ./... \ # Python && virtualenv -p python${PYTHON_VERSION} /opt/venv \ && source /opt/venv/bin/activate \ && pip${PYTHON_VERSION} install \ --upgrade \ -r tests.in \ -c tests.txt ADD ./tests/environments/container-builder/build-artifacts.sh / RUN chmod +x /build-artifacts.sh WORKDIR / receptor-1.5.3/tests/environments/container-builder/README.md000066400000000000000000000002171475465740700241440ustar00rootroot00000000000000# Receptor base container image This container image is used as a controlled environment to build and run Receptor for CI and automate tests. receptor-1.5.3/tests/environments/container-builder/build-artifacts.sh000066400000000000000000000013201475465740700262720ustar00rootroot00000000000000#!/bin/bash set -ex SOURCE_DIR=/source BUILD_DIR=/build ARTIFACTS_DIR=/artifacts OUTPUT_UID=${OUTPUT_UID:-1000} OUTPUT_GID=${OUTPUT_GID:-$OUTPUT_UID} # Copy all content cp -r ${SOURCE_DIR}/ ${BUILD_DIR} # Build receptor cd ${BUILD_DIR} make clean # prevent fail on dev environment make receptor # Build receptorctl cd ${BUILD_DIR}/receptorctl source /opt/venv/bin/activate # uses the currect Python version python -m build # Move packages mkdir -p ${ARTIFACTS_DIR}/dist rm -f ${ARTIFACTS_DIR}/receptor cp ${BUILD_DIR}/receptor ${ARTIFACTS_DIR}/receptor rm -rf ${ARTIFACTS_DIR}/dist cp -r ${BUILD_DIR}/receptorctl/dist/ ${ARTIFACTS_DIR}/dist # Fix permissions chown -R ${OUTPUT_UID}:${OUTPUT_GID} ${ARTIFACTS_DIR} receptor-1.5.3/tests/environments/container-dev/000077500000000000000000000000001475465740700220155ustar00rootroot00000000000000receptor-1.5.3/tests/environments/container-dev/Containerfile000066400000000000000000000044141475465740700245250ustar00rootroot00000000000000FROM centos:8 # Python options = [3.8, 3.9] ARG PYTHON_VERSION=3.8 ENV PATH=${PATH}:/usr/local/go/bin RUN set -x \ && echo 'fastestmirror=True' >> /etc/dnf/dnf.conf \ && dnf update -y \ # Receptor build tools && dnf install -y \ git wget make iproute openssl findutils virtualenv \ # Install specific python version && export PYTHON_PKG_NAME=python$(echo ${PYTHON_VERSION} | sed 's/\.//g') \ && dnf module install -y ${PYTHON_PKG_NAME} \ && alternatives --set python /usr/bin/python${PYTHON_VERSION} \ # Install specific golang version && dnf install -y golang \ && dnf clean all # --- ALL IMAGE MUST BE THE SAME UNTIL NOW --- # Build steps WORKDIR /dependencies ADD . . RUN set -x \ # Go dependencies && go get -u golang.org/x/lint/golint \ && go get -d -v ./... \ # Python dependencies && virtualenv -p python${PYTHON_VERSION} /opt/venv \ && source /opt/venv/bin/activate \ && cd receptorctl \ && pip3 install -r requirements.txt \ && pip3 install --upgrade -r build-requirements.txt \ && cd - \ # Build receptor && make receptor \ # Build receptorctl && cd receptorctl \ && python -m build # Final image FROM centos:8 ARG PYTHON_VERSION=3.8 ENV PATH=${PATH}:/opt/venv/bin/ ENV RECEPTORCTL_SOCKET=/tmp/receptor.sock RUN set -x \ && echo 'fastestmirror=True' >> /etc/dnf/dnf.conf \ && dnf update -y \ # OS dependencies && dnf install -y \ virtualenv podman \ # Install specific python version && export PYTHON_PKG_NAME=python$(echo ${PYTHON_VERSION} | sed 's/\.//g') \ && dnf module install -y ${PYTHON_PKG_NAME} \ && alternatives --set python /usr/bin/python${PYTHON_VERSION} \ && dnf clean all \ # Python virtualenv && virtualenv -p python${PYTHON_VERSION} /opt/venv \ && source /opt/venv/bin/activate \ && pip install ansible-runner COPY --from=0 /dependencies/receptor /usr/local/bin/receptor COPY --from=0 /dependencies/receptorctl/dist/* /tmp/receptorctl_dist/ ADD ./tests/environments/container-dev/receptor.conf /etc/receptor.conf # Install RUN set -x \ && source /opt/venv/bin/activate \ && pip install /tmp/receptorctl_dist/receptorctl-*.whl CMD ["/usr/local/bin/receptor", "--config", "/etc/receptor.conf"] receptor-1.5.3/tests/environments/container-dev/receptor.conf000066400000000000000000000004611475465740700245100ustar00rootroot00000000000000# Default Receptor config file - log-level: debug # Receptor node name - node: id: worker # File socket - control-service: service: control filename: /tmp/receptor.sock - tcp-listener: port: 2345 - work-command: workType: echocafe command: bash params: "-c \"echo cafe\"" receptor-1.5.3/tests/functional/000077500000000000000000000000001475465740700166725ustar00rootroot00000000000000receptor-1.5.3/tests/functional/README.md000066400000000000000000000115221475465740700201520ustar00rootroot00000000000000# Receptor Functional Test Docs This document will serve as a reference for how the upstream receptor tests work. This will cover both high level and low level details of how the tests are setup and run including the assumptions being made and the best way to add tests going forward. ## Test Dependencies The following are needed to run the integration tests: * `receptor` must be in your path, I have symlinked `~/bin/receptor` to the `receptor` binary in my repo so my current dev build is always in my path. * `lsof` is used to check if processes are bound to the right ports * In order to run the kubernetes integration tests you must have a running kubernetes and a `~/.kube/config` that has access to the default namespace as the kubernetes tests will expect they can run there. To skip kubernetes tests set `SKIP_KUBE=1` in your environment. * In order to run the receptorctl tests you must have python3, virtualenv, and pip installed. The `receptorctl-tests` make target will setup a virtualenv for the tests. * `openssl` must be installed on the system in order for the cli tests to run properly. ## CLI Tests The cli tests are found in `functional/cli/cli_test.go`. The intention for these tests is to validate that the cli handles flags correctly. For example the flags that take bools correctly interpret their input as bools and those that take maps are able to take a string of json and convert it to a map. These tests are fairly simple because of this and wont be a large focus of this document. ## Mesh Tests The mesh tests are found in `functional/mesh/`. These tests are the most complicated and provide the most value. They test that the receptor mesh works as expected, nodes can connect to eachother, route packets, start work, etc. In many cases this is where tests should be added. Each test launches its own mesh by using yaml or the in language specification and calling a helper function to build a mesh based on that mesh definition. These meshes can be built in 3 ways: * As part of the test process (LibMesh and LibNode) * As new processes using the cli (CliMesh and CliNode) * As containers using docker or podman (ContainerMesh and ContainerNode) In most cases the CliMesh is appropriate and almost all existing tests use it. The LibMesh was made to allow for testing receptor as an imported library into other golang projects and was used to find some bugs, however at this point it is unclear when/if receptor will be used in this way. The LibNodes also dont support all receptor features as the mesh building code must manually import the right functions and call them to setup the receptor node. For these 2 reasons the LibMesh should probably be avoided unless you know what you want to test is using receptor as a importable library. Finally the ContainerMeshes are essentially CliMeshes wrapped in a container. The initial intention was to use these to simulate various network conditions using tc (which is supported in the mesh definition and building for ContainerMeshes), and I have used them for that purpose in somewhat manual ways, but no existing automation uses them. One thing to note is all nodes in the mesh for our test suite are always of the same type, you cannot mix CLINodes with LibNodes, I decided to keep that separation for simplicity however in practice there's no reason this cant be done. Another thing to consider is when running tests in parallel we make some assumptions to make this easier to reason about so nodes dont try use overlapping ports. LibNodes select their own ports by asking the OS to give them an unused port. ContainerNodes and CLINodes use a shared port pool and reserve ports to use. However it is possible from the time it is reserved to the time the process starts that another process could grab that port, we are unable to prevent this. The most likely scenario that could cause this is running LibNodes and CLI/ContainerNodes at the same time. To reduce the chance of this we only run 1 test binary at once, however the various tests in that binary can run in parallel. Mesh logs are saved to a unique directory in `/tmp/receptor-tests/` based on the name of the test. All the config files required to start the nodes are also stored in this directory, this makes it easy to navigate to and debug nodes when a test fails. ## Automation The receptor tests run in github actions and must pass before merging a PR. There is a make `ci` target that does linting, building, and runs the receptor tests as well as the receptorctl tests, this is what github actions runs. The github actions run in a docker container that is built nightly and uploaded to the github image registry. The tests also start a minikube k8s cluster for testing the k8s integration. If the tests fail, we collect all the node logs for each test mesh and archive it however we do not have a way to generate a junit test report and archive that. receptor-1.5.3/tests/functional/cli/000077500000000000000000000000001475465740700174415ustar00rootroot00000000000000receptor-1.5.3/tests/functional/cli/cli_test.go000066400000000000000000000154141475465740700216030ustar00rootroot00000000000000package main import ( "bytes" "context" "fmt" "os/exec" "strconv" "strings" "testing" "time" "github.com/ansible/receptor/tests/utils" ) func ConfirmListening(pid int, proto string) (bool, error) { out := bytes.Buffer{} cmd := exec.Command("lsof", "-tap", fmt.Sprint(pid), "-i", proto) cmd.Stdout = &out cmd.Run() if strings.Contains(out.String(), fmt.Sprint(pid)) { return true, nil } return false, nil } func TestHelp(t *testing.T) { t.Parallel() cmd := exec.Command("receptor", "--help") if err := cmd.Run(); err != nil { t.Fatal(err) } } func TestListeners(t *testing.T) { testTable := []struct { listener string listenProto string }{ {"--tcp-listener", "TCP"}, {"--ws-listener", "TCP"}, {"--udp-listener", "UDP"}, } for _, data := range testTable { listener := data.listener listenProto := data.listenProto t.Run(listener, func(t *testing.T) { receptorStdOut := bytes.Buffer{} cmd := exec.Command("receptor", "--node", "id=test", listener, "port=0") cmd.Stdout = &receptorStdOut err := cmd.Start() if err != nil { t.Fatal(err) } defer cmd.Process.Wait() defer cmd.Process.Kill() ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) defer cancel1() success, err := utils.CheckUntilTimeoutWithErr(ctx1, 10*time.Millisecond, func() (bool, error) { return ConfirmListening(cmd.Process.Pid, listenProto) }) if err != nil { t.Fatal(err) } if !success { t.Fatalf("Timed out while waiting for backend to start:\n%s", receptorStdOut.String()) } }) } } func TestSSLListeners(t *testing.T) { testTable := []struct { listener string }{ {"--tcp-listener"}, {"--ws-listener"}, } for _, data := range testTable { listener := data.listener t.Run(listener, func(t *testing.T) { key, crt, err := utils.GenerateCert("test", "test", []string{"test"}, []string{"test"}) if err != nil { t.Fatal(err) } receptorStdOut := bytes.Buffer{} port, err := utils.GetFreeTCPPort() if err != nil { t.Fatal(err) } cmd := exec.Command("receptor", "--node", "id=test", "--tls-server", "name=server-tls", fmt.Sprintf("cert=%s", crt), fmt.Sprintf("key=%s", key), listener, fmt.Sprintf("port=%d", port), "tls=server-tls") cmd.Stdout = &receptorStdOut err = cmd.Start() if err != nil { t.Fatal(err) } defer cmd.Process.Wait() defer cmd.Process.Kill() checkFunc := func() bool { opensslStdOut := bytes.Buffer{} opensslStdIn := bytes.Buffer{} opensslCmd := exec.Command("openssl", "s_client", "-connect", "localhost:"+strconv.Itoa(port)) opensslCmd.Stdin = &opensslStdIn opensslCmd.Stdout = &opensslStdOut err = opensslCmd.Run() return err == nil } ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) defer cancel1() success := utils.CheckUntilTimeout(ctx1, 10*time.Millisecond, checkFunc) if !success { t.Fatalf("Timed out while waiting for tls backend to start:\n%s", receptorStdOut.String()) } }) } } func TestNegativeCost(t *testing.T) { testTable := []struct { listener string }{ {"--tcp-listener"}, {"--ws-listener"}, {"--udp-listener"}, } for _, data := range testTable { listener := data.listener t.Run(listener, func(t *testing.T) { receptorStdOut := bytes.Buffer{} cmd := exec.Command("receptor", "--node", "id=test", listener, "port=0", "cost=-1") cmd.Stdout = &receptorStdOut err := cmd.Start() if err != nil { t.Fatal(err) } // Wait for our process to hopefully run and quit time.Sleep(1500 * time.Millisecond) cmd.Process.Kill() cmd.Wait() if !strings.Contains(receptorStdOut.String(), "Error: connection cost must be positive") { t.Fatalf("Expected stdout: Error: connection cost must be positive, actual stdout: %v", receptorStdOut.String()) } }) } } func TestCostMap(t *testing.T) { testTable := []struct { listener string listenProto string costMaps []string }{ {"--tcp-listener", "TCP", []string{"{}", "{\"a\": 1}", "{\"a\": 1.1}", "{\"a\": 1.3, \"b\": 5.6, \"c\": 0.2}"}}, {"--ws-listener", "TCP", []string{"{}", "{\"a\": 1}", "{\"a\": 1.1}", "{\"a\": 1.3, \"b\": 5.6, \"c\": 0.2}"}}, {"--udp-listener", "UDP", []string{"{}", "{\"a\": 1}", "{\"a\": 1.1}", "{\"a\": 1.3, \"b\": 5.6, \"c\": 0.2}"}}, } for _, data := range testTable { listener := data.listener listenProto := data.listenProto costMaps := make([]string, len(data.costMaps)) copy(costMaps, data.costMaps) t.Run(listener, func(t *testing.T) { for _, costMap := range costMaps { costMapCopy := costMap t.Run(costMapCopy, func(t *testing.T) { receptorStdOut := bytes.Buffer{} cmd := exec.Command("receptor", "--node", "id=test", listener, "port=0", fmt.Sprintf("nodecost=%s", costMapCopy)) cmd.Stdout = &receptorStdOut err := cmd.Start() if err != nil { t.Fatal(err) } defer cmd.Process.Wait() defer cmd.Process.Kill() ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) defer cancel1() success, err := utils.CheckUntilTimeoutWithErr(ctx1, 10*time.Millisecond, func() (bool, error) { return ConfirmListening(cmd.Process.Pid, listenProto) }) if err != nil { t.Fatal(err) } if !success { t.Fatalf("Timed out while waiting for backend to start:\n%s", receptorStdOut.String()) } }) } }) } } func TestCosts(t *testing.T) { testTable := []struct { listener string listenProto string costs []string }{ {"--tcp-listener", "TCP", []string{"1", "1.5", "1.0", "0.2", "52", "23"}}, {"--ws-listener", "TCP", []string{"1", "1.5", "1.0", "0.2", "52", "23"}}, {"--udp-listener", "UDP", []string{"1", "1.5", "1.0", "0.2", "52", "23"}}, } for _, data := range testTable { listener := data.listener listenProto := data.listenProto costs := make([]string, len(data.costs)) copy(costs, data.costs) t.Run(listener, func(t *testing.T) { for _, cost := range costs { costCopy := cost t.Run(costCopy, func(t *testing.T) { t.Parallel() receptorStdOut := bytes.Buffer{} cmd := exec.Command("receptor", "--node", "id=test", listener, "port=0", fmt.Sprintf("cost=%s", costCopy)) cmd.Stdout = &receptorStdOut err := cmd.Start() if err != nil { t.Fatal(err) } defer cmd.Process.Wait() defer cmd.Process.Kill() ctx1, cancel1 := context.WithTimeout(context.Background(), 2*time.Second) defer cancel1() success, err := utils.CheckUntilTimeoutWithErr(ctx1, 10*time.Millisecond, func() (bool, error) { return ConfirmListening(cmd.Process.Pid, listenProto) }) if err != nil { t.Fatal(err) } if !success { t.Fatalf("Timed out while waiting for backend to start:\n%s", receptorStdOut.String()) } }) } }) } } receptor-1.5.3/tests/functional/mesh/000077500000000000000000000000001475465740700176265ustar00rootroot00000000000000receptor-1.5.3/tests/functional/mesh/conn_test.go000066400000000000000000000104671475465740700221610ustar00rootroot00000000000000package mesh import ( "context" "io" "net" "strings" "testing" "time" "github.com/ansible/receptor/pkg/backends" "github.com/ansible/receptor/pkg/netceptor" ) func TestQuicConnectTimeout(t *testing.T) { // // Note! // It is important that this test do not run in parellel since it modifies package-level variables. // defaultIdleTimeout := netceptor.MaxIdleTimeoutForQuicConnections defaultQuicKeepAlive := netceptor.KeepAliveForQuicConnections defer func() { netceptor.MaxIdleTimeoutForQuicConnections = defaultIdleTimeout netceptor.KeepAliveForQuicConnections = defaultQuicKeepAlive }() // Change MaxIdleTimeoutForQuicConnections to 1 seconds (default in lib is 30, our code is 60) netceptor.MaxIdleTimeoutForQuicConnections = 1 * time.Second // We also have to disable heart beats or the connection will not properly timeout netceptor.KeepAliveForQuicConnections = false // Create two nodes of the Receptor network-layer protocol (Netceptors). n1 := netceptor.New(context.Background(), "node1") n2 := netceptor.New(context.Background(), "node2") // Start a TCP listener on the first node b1, err := backends.NewTCPListener("localhost:3333", nil, n1.Logger) if err != nil { t.Fatalf("Error listening on TCP: %s\n", err) } err = n1.AddBackend(b1) if err != nil { t.Fatalf("Error starting backend: %s\n", err) } // Start a TCP dialer on the second node - this will connect to the listener we just started b2, err := backends.NewTCPDialer("localhost:3333", false, nil, n2.Logger) if err != nil { t.Fatalf("Error dialing on TCP: %s\n", err) } err = n2.AddBackend(b2) if err != nil { t.Fatalf("Error starting backend: %s\n", err) } // Start an echo server on node 1 l1, err := n1.Listen("echo", nil) if err != nil { t.Fatalf("Error listening on Receptor network: %s\n", err) } go func() { // Accept an incoming connection - note that conn is just a regular net.Conn conn, err := l1.Accept() if err != nil { t.Fatalf("Error accepting connection: %s\n", err) return } go func() { defer conn.Close() buf := make([]byte, 1024) done := false for !done { n, err := conn.Read(buf) if err == io.EOF { done = true } else if err != nil { // Is ok if we got a 'NO_ERROR: No recent network activity' error but anything else is a test failure. if strings.Contains(err.Error(), "no recent network activity") { t.Log("Successfully got the desired timeout error") } else { t.Fatalf("Read error in Receptor listener: %s\n", err) } return } if n > 0 { _, err := conn.Write(buf[:n]) if err != nil { t.Fatalf("Write error in Receptor listener: %s\n", err) return } } } }() }() // Connect to the echo server from node 2. We expect this to error out at first with // "no route to node" because it takes a second or two for node1 and node2 to exchange // routing information and form a mesh var c2 net.Conn for { c2, err = n2.Dial("node1", "echo", nil) if err != nil { time.Sleep(1 * time.Second) continue } break } // Sleep longer than MaxIdleTimeout (see pkg/netceptor/conn.go for current setting) sleepDuration := 6 * time.Second time.Sleep(sleepDuration) // Start a listener function that prints received data to the screen // Note that because net.Conn is a stream connection, it is not guaranteed // that received messages will be the same size as the messages that are sent. // For datagram use, Receptor also provides a net.PacketConn. go func() { rbuf := make([]byte, 1024) for { n, err := c2.Read(rbuf) if n > 0 { n2.Shutdown() t.Fatal("Should not have gotten data back") return } if err == io.EOF { // Shut down the whole Netceptor when any connection closes, because this is just a demo n2.Shutdown() t.Fatal("Should not have gotten an EOF") return } if err != nil { n2.Shutdown() return } } }() // Send some data, which should be processed through the echo server back to our // receive function and printed to the screen. _, err = c2.Write([]byte("Hello, world!")) if !(err != nil && err != io.EOF) { t.Fatal("We should have gotten an error here") } // Close our end of the connection _ = c2.Close() // Wait for n2 to shut down n2.BackendWait() // Gracefully shut down n1 n1.Shutdown() n1.BackendWait() } receptor-1.5.3/tests/functional/mesh/factories.go000066400000000000000000000210271475465740700221360ustar00rootroot00000000000000package mesh import ( "fmt" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/tests/utils" ) func flatMesh(proto string) *LibMesh { m := NewLibMesh() // Controller has no peers, only a listener controllerNodeID := "controller" controller := m.NewLibNode(controllerNodeID) controller.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } i := 1 // All nodes peer out to "controller" for i <= 15 { nodeID := fmt.Sprintf("node%d", i) node := m.NewLibNode(nodeID) node.Connections = []Connection{ {RemoteNode: controller, Protocol: proto}, } i++ } return &m } func randomMesh(proto string) *LibMesh { m := NewLibMesh() // Controller only has a listener controller := m.NewLibNode("controller") controller.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node1 := m.NewLibNode("node1") node2 := m.NewLibNode("node2") node3 := m.NewLibNode("node3") node4 := m.NewLibNode("node4") node5 := m.NewLibNode("node5") node6 := m.NewLibNode("node6") node7 := m.NewLibNode("node7") node8 := m.NewLibNode("node8") node9 := m.NewLibNode("node9") node10 := m.NewLibNode("node10") node11 := m.NewLibNode("node11") node12 := m.NewLibNode("node12") // node1 connects to controller node1.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node1.Connections = []Connection{ {RemoteNode: controller, Protocol: proto}, } // node2 connects to node1 node2.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node2.Connections = []Connection{ {RemoteNode: node1, Protocol: proto}, } // node3 connects to node4 and node6 node3.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node3.Connections = []Connection{ {RemoteNode: node4, Protocol: proto}, {RemoteNode: node6, Protocol: proto}, } // node4 connects to node2 and node7 node4.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node4.Connections = []Connection{ {RemoteNode: node2, Protocol: proto}, {RemoteNode: node7, Protocol: proto}, } // node5 connects to node8 and node12 node5.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node5.Connections = []Connection{ {RemoteNode: node8, Protocol: proto}, {RemoteNode: node12, Protocol: proto}, } // node6 connects to node10 node6.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node6.Connections = []Connection{ {RemoteNode: node10, Protocol: proto}, } // node7 connects to node1 and node3 node7.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node7.Connections = []Connection{ {RemoteNode: node1, Protocol: proto}, {RemoteNode: node3, Protocol: proto}, } // node8 connects to node1 node8.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node8.Connections = []Connection{ {RemoteNode: node1, Protocol: proto}, } // node9 connects to node5 and node10 node9.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node9.Connections = []Connection{ {RemoteNode: node5, Protocol: proto}, {RemoteNode: node10, Protocol: proto}, } // node10 connects to node4 and node12 node10.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node10.Connections = []Connection{ {RemoteNode: node4, Protocol: proto}, {RemoteNode: node12, Protocol: proto}, } // node11 connects to node1 node11.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node11.Connections = []Connection{ {RemoteNode: node1, Protocol: proto}, } // node12 connects to controller and node11 node12.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node12.Connections = []Connection{ {RemoteNode: controller, Protocol: proto}, {RemoteNode: node11, Protocol: proto}, } return &m } func treeMesh(proto string) *LibMesh { m := NewLibMesh() // Controller only has a listener controller := m.NewLibNode("controller") controller.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } // node1 connects to controller node1 := m.NewLibNode("node1") node1.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node1.Connections = []Connection{ {RemoteNode: controller, Protocol: proto}, } // nodes2-4 connects to node1 for _, id := range []int{2, 3, 4} { node := m.NewLibNode(fmt.Sprintf("node%d", id)) node.Connections = []Connection{ {RemoteNode: node1, Protocol: proto}, } } // node5 connects to controller node5 := m.NewLibNode("node5") node5.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node5.Connections = []Connection{ {RemoteNode: controller, Protocol: proto}, } // node6-8 connects to node5 for _, id := range []int{6, 7, 8} { node := m.NewLibNode(fmt.Sprintf("node%d", id)) node.Connections = []Connection{ {RemoteNode: node5, Protocol: proto}, } } // node9 connects to controller node9 := m.NewLibNode("node9") node9.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } node9.Connections = []Connection{ {RemoteNode: controller, Protocol: proto}, } // node10-12 connects to node9 for _, id := range []int{10, 11, 12} { node := m.NewLibNode(fmt.Sprintf("node%d", id)) node.Connections = []Connection{ {RemoteNode: node9, Protocol: proto}, } } return &m } // used in work_test.go. func workTestMesh(workPluginName workPlugin) *LibMesh { caKey, caCrt, err := utils.GenerateCA("ca", "localhost") if err != nil { panic(err) } key1, crt1, err := utils.GenerateCertWithCA("node1", caKey, caCrt, "localhost", []string{"localhost"}, []string{"node1"}) if err != nil { panic(err) } key2, crt2, err := utils.GenerateCertWithCA("node2", caKey, caCrt, "localhost", []string{"localhost"}, []string{"node2"}) if err != nil { panic(err) } key3, crt3, err := utils.GenerateCertWithCA("node3", caKey, caCrt, "localhost", []string{"localhost"}, []string{"node3"}) if err != nil { panic(err) } key4, crt4, err := utils.GenerateCertWithCA("node1wrongCN", caKey, caCrt, "node1wrongCN", nil, []string{"node1wrongCN"}) if err != nil { panic(err) } m := NewLibMesh() // node1 -> node2 <- node3 node1 := m.NewLibNode("node1") node2 := m.NewLibNode("node2") node3 := m.NewLibNode("node3") // node1 dials out to node2 node1.Connections = []Connection{ {RemoteNode: node2, Protocol: "tcp", TLS: "client"}, } node1.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName("tcp"): newListenerCfg("tcp", "", 1, nil), } node1.TLSClientConfigs = []*netceptor.TLSClientConfig{ { Name: "client", Key: key1, Cert: crt1, RootCAs: caCrt, }, { Name: "tlsclientwrongCN", Key: key4, Cert: crt4, RootCAs: caCrt, SkipReceptorNamesCheck: true, }, } // node2 has a listener node2.workerConfigs = []workceptor.WorkerConfig{workTestConfigs[workPluginName]["echosleepshort"]} node2.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName("tcp"): newListenerCfg("tcp", "server", 1, nil), } node2.TLSServerConfigs = []*netceptor.TLSServerConfig{ { Name: "server", Key: key2, Cert: crt2, RequireClientCert: true, ClientCAs: caCrt, }, } node2.TLSClientConfigs = []*netceptor.TLSClientConfig{ { Name: "client", Key: key2, Cert: crt2, RootCAs: caCrt, }, } node2.controlServerTLS = "server" // node3 dials out to node2 node3.Connections = []Connection{ {RemoteNode: node2, Protocol: "tcp", TLS: "client"}, } node3.workerConfigs = []workceptor.WorkerConfig{ workTestConfigs[workPluginName]["echosleepshort"], workTestConfigs[workPluginName]["echosleeplong"], workTestConfigs[workPluginName]["echosleeplong50"], } node3.TLSClientConfigs = []*netceptor.TLSClientConfig{ { Name: "client", Key: key3, Cert: crt3, RootCAs: caCrt, }, } return &m } receptor-1.5.3/tests/functional/mesh/firewall_test.go000066400000000000000000000045341475465740700230270ustar00rootroot00000000000000package mesh import ( "context" "testing" "time" "github.com/ansible/receptor/pkg/netceptor" _ "github.com/fortytw2/leaktest" ) func TestFirewall(t *testing.T) { t.Parallel() for _, proto := range []string{"tcp", "ws", "udp"} { proto := proto t.Run(proto, func(t *testing.T) { t.Parallel() m := NewLibMesh() defer func() { t.Log(m.LogWriter.String()) }() node1 := m.NewLibNode("node1") node2 := m.NewLibNode("node2") node1.Connections = []Connection{ {RemoteNode: node2, Protocol: proto}, } m.GetNodes()[node1.GetID()] = node1 node2.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } m.GetNodes()[node2.GetID()] = node2 node2.Config.FirewallRules = []netceptor.FirewallRuleData{ {"Action": "accept", "FromNode": "node2"}, {"Action": "reject", "ToNode": "node3"}, {"Action": "reject", "ToNode": "node1"}, } node3 := m.NewLibNode("node3") node3.Connections = []Connection{ {RemoteNode: node2, Protocol: proto}, } m.GetNodes()[node3.GetID()] = node3 defer m.WaitForShutdown() defer m.Destroy() err := m.Start(t.Name()) if err != nil { t.Fatal(err) } ctx1, cancel1 := context.WithTimeout(context.Background(), 20*time.Second) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } // Test that node1 and node3 can ping node2 for _, nodeSender := range []*LibNode{m.GetNodes()["node1"], m.GetNodes()["node3"]} { controller := NewReceptorControl() err = controller.Connect(nodeSender.GetControlSocket()) if err != nil { t.Fatal(err) } response, err := controller.Ping("node2") if err != nil { t.Error(err) } t.Logf("%v", response) controller.Close() } // Test that node1 and node3 cannot ping each other for strSender, strReceiver := range map[string]string{"node1": "node3", "node3": "node1"} { controller := NewReceptorControl() err = controller.Connect(m.GetNodes()[strSender].GetControlSocket()) if err != nil { t.Fatal(err) } _, err := controller.Ping(strReceiver) if err == nil { t.Error("firewall failed to block ping") } else if err.Error() != "blocked by firewall" { t.Errorf("got wrong error: %s", err) } t.Logf("%v", err) controller.Close() } }) } } receptor-1.5.3/tests/functional/mesh/fixtures.go000066400000000000000000000026201475465740700220260ustar00rootroot00000000000000package mesh import "github.com/ansible/receptor/pkg/workceptor" var workPlugins = []workPlugin{"command", "kube"} var workTestConfigs = map[workPlugin]map[workType]workceptor.WorkerConfig{ "kube": { "echosleepshort": workceptor.KubeWorkerCfg{ WorkType: "echosleepshort", AuthMethod: "kubeconfig", Namespace: "default", Image: "alpine", Command: "sh -c 'for i in `seq 1 5`; do echo $i;done'", }, "echosleeplong": workceptor.KubeWorkerCfg{ WorkType: "echosleeplong", AuthMethod: "kubeconfig", Namespace: "default", Image: "alpine", Command: "sh -c 'for i in `seq 1 5`; do echo $i; sleep 3;done'", }, "echosleeplong50": workceptor.KubeWorkerCfg{ WorkType: "echosleeplong50", AuthMethod: "kubeconfig", Namespace: "default", Image: "alpine", Command: "sh -c 'for i in `seq 1 50`; do echo $i; sleep 4;done'", }, }, "command": { "echosleepshort": workceptor.CommandWorkerCfg{ WorkType: "echosleepshort", Command: "bash", Params: "-c 'for i in {1..5}; do echo $i;done'", }, "echosleeplong": workceptor.CommandWorkerCfg{ WorkType: "echosleeplong", Command: "bash", Params: "-c 'for i in {1..5}; do echo $i; sleep 3;done'", }, "echosleeplong50": workceptor.CommandWorkerCfg{ WorkType: "echosleeplong50", Command: "base", Params: "-c 'for i in {1..50}; do echo $i; sleep 4;done'", }, }, } receptor-1.5.3/tests/functional/mesh/lib.go000066400000000000000000000455631475465740700207400ustar00rootroot00000000000000package mesh import ( "context" "crypto/tls" "errors" "fmt" "net" "os" "path/filepath" "reflect" "time" "github.com/ansible/receptor/pkg/backends" "github.com/ansible/receptor/pkg/controlsvc" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/types" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/tests/utils" ) // LibMesh represents a single Receptor mesh network, used for test simulations. type LibMesh struct { Name string // Only used for generating test names nodes map[string]*LibNode DataDir string LogWriter *utils.TestLogWriter Context context.Context } // NewLibMesh constructs a new LibMesh. func NewLibMesh() LibMesh { baseDir := filepath.Join(os.TempDir(), "receptor-testing") os.Mkdir(baseDir, 0o700) err := os.MkdirAll(baseDir, 0o755) if err != nil { panic(err) } tempdir, err := os.MkdirTemp(baseDir, "mesh-") if err != nil { panic(err) } return LibMesh{ nodes: make(map[string]*LibNode), LogWriter: utils.NewTestLogWriter(), DataDir: tempdir, Context: context.Background(), } } // m.NewLibNode constructs a node with the name passed as the argument. func (m *LibMesh) NewLibNode(nodeID string) *LibNode { node := LibNode{ Config: types.NodeCfg{ ID: nodeID, DataDir: m.DataDir, }, ControlSocket: filepath.Join(m.DataDir, nodeID+".sock"), backends: make(map[string]BackendInfo), } m.nodes[nodeID] = &node return &node } func (m *LibMesh) Start(_ string) error { var err error // Bootstrap nodes for _, node := range m.GetNodes() { err = node.StartLocalServices() if err != nil { return err } // Comment out the line below to print test logs to stdout. // Note that some assertions will fail by doing this. node.netceptorInstance.Logger.SetOutput(m.LogWriter) } // Start listeners first, we connect below for _, node := range m.GetNodes() { err = node.StartListeners() if err != nil { return err } } // Establish outbound connections for _, node := range m.GetNodes() { err = node.EstablishRemoteConnections() if err != nil { return err } } return nil } // GetNodes returns a list of nodes. func (m *LibMesh) GetNodes() map[string]*LibNode { return m.nodes } // GetDataDir returns the path to the data directory for this mesh. func (m *LibMesh) GetDataDir() string { return m.DataDir } // Shutdown stops all running Netceptors and their backends. func (m *LibMesh) Destroy() { for _, node := range m.GetNodes() { node.Destroy() } } // WaitForShutdown Waits for all running Netceptors and their backends to stop. func (m LibMesh) WaitForShutdown() { for _, node := range m.GetNodes() { node.WaitForShutdown() } } // CheckConnections returns true if the connections defined in our mesh definition are // consistent with the connections made by the nodes. func (m LibMesh) CheckConnections() bool { statusList, err := m.Status() if err != nil { return false } expectedConnections := make(map[string]map[string]float64) actualConnections := make(map[string]map[string]float64) for nodeID := range m.GetNodes() { expectedConnections[nodeID] = map[string]float64{} actualConnections[nodeID] = map[string]float64{} } for nodeID, node := range m.GetNodes() { for _, connection := range node.Connections { backend := connection.RemoteNode.backends[connection.Protocol] cost := backend.connectionCost nodeCost, ok := backend.nodeCost[nodeID] if ok { cost = nodeCost } expectedConnections[nodeID][connection.RemoteNode.GetID()] = cost expectedConnections[connection.RemoteNode.GetID()][nodeID] = cost } } for _, nodeStatus := range statusList { for _, connection := range nodeStatus.Connections { actualConnections[nodeStatus.NodeID][connection.NodeID] = connection.Cost } } return reflect.DeepEqual(actualConnections, expectedConnections) } // CheckKnownConnectionCosts returns true if every node has the same view of the connections in the mesh. func (m *LibMesh) CheckKnownConnectionCosts() bool { meshStatus, err := m.Status() if err != nil { return false } // If the mesh is empty we are done if len(meshStatus) == 0 { return true } knownConnectionCosts := meshStatus[0].KnownConnectionCosts for _, status := range meshStatus { if !reflect.DeepEqual(status.KnownConnectionCosts, knownConnectionCosts) { return false } } return true } // CheckRoutes returns true if every node has a route to every other node. func (m *LibMesh) CheckRoutes() bool { meshStatus, err := m.Status() if err != nil { return false } for _, status := range meshStatus { // loop over m.MeshDefinition.Nodes instead... check for NodeConfig.ID, fall back to key for _, node := range m.GetNodes() { // Dont check a route to ourselves if status.NodeID == node.GetID() { continue } _, ok := status.RoutingTable[node.GetID()] if !ok { return false } } } return true } // CheckControlSockets Checks if the Control sockets in the mesh are all running and accepting // connections. func (m *LibMesh) CheckControlSockets() bool { for _, node := range m.GetNodes() { controller := NewReceptorControl() if controller.Connect(node.GetControlSocket()) != nil { node.netceptorInstance.Logger.Warning("%s: failed to connect to control socket", node.GetID()) return false } controller.Close() } return true } // WaitForReady Waits for connections and routes to converge. func (m *LibMesh) WaitForReady(ctx context.Context) error { sleepInterval := 1 * time.Second if !utils.CheckUntilTimeout(ctx, sleepInterval, m.CheckControlSockets) { return errors.New("timed out while waiting for control sockets") } if !utils.CheckUntilTimeout(ctx, sleepInterval, m.CheckConnections) { return errors.New("timed out while waiting for Connections") } if !utils.CheckUntilTimeout(ctx, sleepInterval, m.CheckKnownConnectionCosts) { return errors.New("timed out while checking Connection Costs") } if !utils.CheckUntilTimeout(ctx, sleepInterval, m.CheckRoutes) { return errors.New("timed out while waiting for routes to converge") } return nil } // Status returns a list of statuses from the contained netceptors. func (m *LibMesh) Status() ([]*netceptor.Status, error) { out := []*netceptor.Status{} for _, node := range m.GetNodes() { status, err := node.Status() if err != nil { return nil, err } out = append(out, status) } return out, nil } // LibNode represents a node (it's configuration and running services). type LibNode struct { Config types.NodeCfg Connections []Connection ListenerCfgs map[listenerName]ListenerCfg netceptorInstance *netceptor.Netceptor workceptorInstance *workceptor.Workceptor backends map[string]BackendInfo controlServer *controlsvc.Server ControlSocket string controlServerCanceller context.CancelFunc controlerServerContext context.Context controlServerTLS string workerConfigs []workceptor.WorkerConfig TLSServerConfigs []*netceptor.TLSServerConfig TLSClientConfigs []*netceptor.TLSClientConfig WorkSigningKey *workceptor.SigningKeyPrivateCfg WorkVerificationKey *workceptor.VerifyingKeyPublicCfg } type listenerName string type ( workPlugin string // "kube" or "command" workType string // identifier for an instance of work-kubernetes or work-command ) // Status returns the status of the node. func (n *LibNode) Status() (*netceptor.Status, error) { status := n.netceptorInstance.Status() return &status, nil } // GetControlSocket returns the path to the controlsocket. func (n *LibNode) GetControlSocket() string { return n.ControlSocket } // GetDataDir returns the path to the directory where data is stored for this node. func (n *LibNode) GetDataDir() string { return n.Config.DataDir } // GetID returns the ID (name) of this node. func (n *LibNode) GetID() string { return n.Config.ID } // Start will start local services (netceptor, workceptor, controlsvc), // then start any listeners, finally establishing any remote connections. // Note that this requires remote nodes to be running since we need to detect which // random port was assigned to the backend. This is typically only used when calling Shutdown // in the tests. When starting the mesh for the first time we loop over nodes in 2 phases, // first calling StartListeners and then EstablishRemoteConnections. func (n *LibNode) Start() error { var err error err = n.StartLocalServices() if err != nil { return err } err = n.StartListeners() if err != nil { return err } err = n.EstablishRemoteConnections() if err != nil { return err } return nil } // StartListeners loops over n.ListenerCfgs, which is an interface that wraps // TCPListenerCfg, UDPListenerCfg, and WebsocketListenerCfg and starts listening // on the appropriate protocol. func (n *LibNode) StartListeners() error { var bi *BackendInfo var err error for _, listenerCfg := range n.ListenerCfgs { switch lcfg := listenerCfg.(type) { case *backends.TCPListenerCfg: bi, err = n.TCPListen(listenerCfg) // Record what address we are listening on so we can reuse it if we restart this node lcfg.BindAddr = bi.listener.GetAddr() case *backends.UDPListenerCfg: bi, err = n.UDPListen(listenerCfg) // Record what address we are listening on so we can reuse it if we restart this node lcfg.BindAddr = bi.listener.GetAddr() case *backends.WebsocketListenerCfg: bi, err = n.WebsocketListen(listenerCfg) // Record what address we are listening on so we can reuse it if we restart this node lcfg.BindAddr = bi.listener.GetAddr() default: err = fmt.Errorf("unknown listener type: %s", reflect.TypeOf(lcfg)) } if err != nil { return err } } return nil } // EstablishRemoteConnections discovers which address a remote backend is listening on // and then dials out to it. func (n *LibNode) EstablishRemoteConnections() error { for _, connection := range n.Connections { backend := connection.RemoteNode.backends[connection.Protocol] host, _, err := net.SplitHostPort(backend.bindAddr) dialAddr := backend.listener.GetAddr() if err != nil { return err } tlscfg, err := n.netceptorInstance.GetClientTLSConfig(connection.TLS, host, netceptor.ExpectedHostnameTypeDNS) if err != nil { return err } connectionCost := backend.connectionCost nodeCost, ok := backend.nodeCost[n.GetID()] if ok { connectionCost = nodeCost } switch connection.Protocol { case "tcp": err = n.TCPDial(dialAddr, connectionCost, tlscfg) if err != nil { return err } case "udp": err = n.UDPDial(dialAddr, connectionCost) if err != nil { return err } case "ws": proto := "wss://" if tlscfg == nil { proto = "ws://" } err = n.WebSocketDial(proto+dialAddr, connectionCost, tlscfg) if err != nil { return err } } } return nil } // Shutdown stops the node and waits for it to exit. func (n *LibNode) Shutdown() { n.Destroy() n.WaitForShutdown() // Forces a new instance of netceptor to get created when we restart. // This is necessary because we allow for pre-assigning the netceptor instance // so we can simulate duplicate nodes in TestDuplicateNodes. n.netceptorInstance = nil } // Destroy instructs the node to stop its services. func (n *LibNode) Destroy() { n.controlServerCanceller() n.netceptorInstance.Shutdown() } // WaitForShutdown Waits for the node to shutdown completely. func (n *LibNode) WaitForShutdown() { n.netceptorInstance.BackendWait() } // TCPListen takes a ListenerCfg (backends.TCPListenerCfg) and listens for TCP traffic. func (n *LibNode) TCPListen(listenerCfg ListenerCfg) (*BackendInfo, error) { tlsCfg, err := n.netceptorInstance.GetServerTLSConfig(listenerCfg.GetTLS()) if err != nil { return nil, err } backend, err := backends.NewTCPListener(listenerCfg.GetAddr(), tlsCfg, n.netceptorInstance.Logger) if err != nil { return nil, err } cost := listenerCfg.GetCost() nodeCost := listenerCfg.GetNodeCost() err = n.netceptorInstance.AddBackend( backend, netceptor.BackendConnectionCost(cost), netceptor.BackendNodeCost(nodeCost), ) if err != nil { return nil, err } bi := BackendInfo{ protocol: "tcp", bindAddr: listenerCfg.GetAddr(), connectionCost: cost, nodeCost: nodeCost, listener: backend, } n.backends[bi.protocol] = bi return &bi, nil } // TCPDial registers a new netceptor.Backend that will dial a remote node via TCP. func (n *LibNode) TCPDial(address string, cost float64, tlsCfg *tls.Config) error { b1, err := backends.NewTCPDialer(address, true, tlsCfg, n.netceptorInstance.Logger) if err != nil { return err } err = n.netceptorInstance.AddBackend(b1, netceptor.BackendConnectionCost(cost)) return err } // UDPListen takes a ListenerCfg (backends.UDPListenerCfg) and listens for UDP traffic. func (n *LibNode) UDPListen(listenerCfg ListenerCfg) (*BackendInfo, error) { backend, err := backends.NewUDPListener(listenerCfg.GetAddr(), n.netceptorInstance.Logger) if err != nil { return nil, err } cost := listenerCfg.GetCost() nodeCost := listenerCfg.GetNodeCost() err = n.netceptorInstance.AddBackend( backend, netceptor.BackendConnectionCost(cost), netceptor.BackendNodeCost(nodeCost), ) if err != nil { return nil, err } bi := BackendInfo{ protocol: "udp", bindAddr: listenerCfg.GetAddr(), connectionCost: cost, nodeCost: nodeCost, listener: backend, } n.backends[bi.protocol] = bi return &bi, nil } // UDPDial registers a new netceptor.Backend that will dial a remote node via UDP. func (n *LibNode) UDPDial(address string, cost float64) error { b1, err := backends.NewUDPDialer(address, true, n.netceptorInstance.Logger) if err != nil { return err } err = n.netceptorInstance.AddBackend(b1, netceptor.BackendConnectionCost(cost)) return err } // WebsocketListen takes a ListenerCfg (backends.WebsocketListenerCfg) and listens for Websocket traffic. func (n *LibNode) WebsocketListen(listenerCfg ListenerCfg) (*BackendInfo, error) { tlsCfg, err := n.netceptorInstance.GetServerTLSConfig(listenerCfg.GetTLS()) if err != nil { return nil, err } backend, err := backends.NewWebsocketListener(listenerCfg.GetAddr(), tlsCfg, n.netceptorInstance.Logger, nil, nil) if err != nil { return nil, err } cost := listenerCfg.GetCost() nodeCost := listenerCfg.GetNodeCost() err = n.netceptorInstance.AddBackend( backend, netceptor.BackendConnectionCost(cost), netceptor.BackendNodeCost(nodeCost), ) if err != nil { return nil, err } bi := BackendInfo{ protocol: "ws", bindAddr: listenerCfg.GetAddr(), connectionCost: cost, nodeCost: nodeCost, listener: backend, } n.backends[bi.protocol] = bi return &bi, nil } // WebSocketDial registers a new netceptor.Backend that will dial a remote node via a WebSocket. func (n *LibNode) WebSocketDial(address string, cost float64, tlsCfg *tls.Config) error { b1, err := backends.NewWebsocketDialer(address, tlsCfg, "", true, n.netceptorInstance.Logger, nil) if err != nil { return err } err = n.netceptorInstance.AddBackend(b1, netceptor.BackendConnectionCost(cost)) return err } func (n *LibNode) StartLocalServices() error { // This conditional only exists to give TestDuplicateNodes a way // to simulate a duplicate node on the mesh. if n.netceptorInstance == nil { n.netceptorInstance = netceptor.New(context.Background(), n.GetID()) } ctx, canceller := context.WithCancel(context.Background()) n.controlerServerContext = ctx n.controlServerCanceller = canceller n.controlServer = controlsvc.New(true, n.netceptorInstance) err := n.configureFirewallRules() if err != nil { return err } err = n.configureTLS() if err != nil { return err } tlsCfg, err := n.netceptorInstance.GetServerTLSConfig(n.controlServerTLS) if err != nil { return err } n.workceptorInstance, err = workceptor.New(n.netceptorInstance.Context(), n.netceptorInstance, n.GetDataDir()) if err != nil { return err } err = n.configureWorkSigning() if err != nil { return err } err = n.workceptorInstance.RegisterWithControlService(n.controlServer) if err != nil { return err } err = n.configureWorkers() if err != nil { return err } err = n.controlServer.RunControlSvc(n.controlerServerContext, "control", tlsCfg, n.ControlSocket, os.FileMode(0o600), "", nil) if err != nil { return err } return nil } func (n *LibNode) configureFirewallRules() error { rules, err := netceptor.ParseFirewallRules(n.Config.FirewallRules) if err != nil { return err } err = n.netceptorInstance.AddFirewallRules(rules, true) if err != nil { return err } return nil } func (n *LibNode) configureTLS() error { for _, c := range n.TLSServerConfigs { tlscfg, err := c.PrepareTLSServerConfig(n.netceptorInstance) if err != nil { return err } err = n.netceptorInstance.SetServerTLSConfig(c.Name, tlscfg) if err != nil { return err } } for _, c := range n.TLSClientConfigs { tlscfg, pinnedFingerprints, err := c.PrepareTLSClientConfig(n.netceptorInstance) if err != nil { return err } err = n.netceptorInstance.SetClientTLSConfig(c.Name, tlscfg, pinnedFingerprints) if err != nil { return err } } return nil } func (n *LibNode) configureWorkers() error { for _, cfg := range n.workerConfigs { err := n.workceptorInstance.RegisterWorker(cfg.GetWorkType(), cfg.NewWorker, cfg.GetVerifySignature()) if err != nil { return err } } return nil } func (n *LibNode) configureWorkSigning() error { if n.WorkSigningKey != nil { duration, err := n.WorkSigningKey.PrepareSigningKeyPrivateCfg() if err != nil { return err } if duration != nil { n.workceptorInstance.SigningExpiration = *duration } n.workceptorInstance.SigningKey = n.WorkSigningKey.PrivateKey } if n.WorkVerificationKey != nil { err := n.WorkVerificationKey.PrepareVerifyingKeyPublicCfg() if err != nil { return err } n.workceptorInstance.VerifyingKey = n.WorkVerificationKey.PublicKey } return nil } // Connection is an abstraction that ultimately results in a new running netceptor.Backend. type Connection struct { RemoteNode *LibNode Protocol string TLS string } type ListenerCfg interface { GetCost() float64 GetNodeCost() map[string]float64 GetAddr() string GetTLS() string } type NativeBackend interface { netceptor.Backend GetAddr() string GetTLS() *tls.Config } type BackendInfo struct { protocol string bindAddr string connectionCost float64 nodeCost map[string]float64 listener NativeBackend } func newListenerCfg(proto string, tls string, cost float64, nodeCost map[string]float64) ListenerCfg { switch proto { case "tcp": return &backends.TCPListenerCfg{BindAddr: "localhost:0", TLS: tls, Cost: cost, NodeCost: nodeCost} case "udp": return &backends.UDPListenerCfg{BindAddr: "localhost:0", Cost: cost, NodeCost: nodeCost} case "ws": return &backends.WebsocketListenerCfg{BindAddr: "localhost:0", TLS: tls, Cost: cost, NodeCost: nodeCost} } return nil } receptor-1.5.3/tests/functional/mesh/mesh_test.go000066400000000000000000000201051475465740700221460ustar00rootroot00000000000000package mesh import ( "bytes" "context" "fmt" "os" "os/exec" "strings" "testing" "time" "github.com/ansible/receptor/pkg/backends" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/tests/utils" _ "github.com/fortytw2/leaktest" ) // Test that a mesh starts and that connections are what we expect and that // each node's view of the mesh converges. func TestMeshStartup(t *testing.T) { meshDefinitions := map[string]*LibMesh{ "tcp": flatMesh("tcp"), "udp": flatMesh("udp"), "ws": flatMesh("ws"), } t.Parallel() for protocol, m := range meshDefinitions { m := m protocol := protocol testName := protocol + "/" + m.Name t.Run(testName, func(t *testing.T) { t.Parallel() err := m.Start(t.Name()) defer func() { m.Destroy() m.WaitForShutdown() t.Log(m.LogWriter.String()) }() if err != nil { t.Fatal(err) } ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } // Test that each Node can ping each Node for _, node := range m.GetNodes() { controller := NewReceptorControl() err = controller.Connect(node.GetControlSocket()) if err != nil { t.Fatalf("Error connecting to controller: %s", err) } for _, remoteNode := range m.GetNodes() { retryloop: for i := 30; i > 0; i-- { _, err := controller.Ping(remoteNode.GetID()) switch { case err == nil: break retryloop case i != 1: t.Logf("Error pinging %s: %s. Retrying", remoteNode.GetID(), err) continue default: t.Fatalf("Error pinging %s: %s", remoteNode.GetID(), err) } } } } }) } } // Test that traceroute works. func TestTraceroute(t *testing.T) { meshDefinitions := map[string]*LibMesh{ "tcp": treeMesh("tcp"), "udp": treeMesh("udp"), "ws": treeMesh("ws"), } t.Parallel() for protocol, m := range meshDefinitions { m := m protocol := protocol testName := protocol + "/" + m.Name t.Run(testName, func(t *testing.T) { t.Parallel() defer func() { t.Log(m.LogWriter.String()) }() defer m.WaitForShutdown() defer m.Destroy() err := m.Start(t.Name()) if err != nil { t.Fatal(err) } ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } controlNode := m.GetNodes()["controller"] controller := NewReceptorControl() err = controller.Connect(controlNode.GetControlSocket()) if err != nil { t.Fatal(err) } _, err = controller.WriteStr("traceroute node7\n") if err != nil { t.Fatal(err) } jsonData, err := controller.ReadAndParseJSON() if err != nil { t.Fatal(err) } err = controller.Close() if err != nil { t.Fatal(err) } for key := range jsonData { value := jsonData[key] valMap, ok := value.(map[string]interface{}) if !ok { t.Fatal("traceroute returned invalid result") } _, ok = valMap["Error"] if ok { t.Fatalf("traceroute returned error: %s", valMap["Error"]) } } expectedHops := []struct { key string from string }{ {"0", "controller"}, {"1", "node5"}, {"2", "node7"}, } if len(jsonData) != len(expectedHops) { t.Fatal("traceroute has wrong number of hops") } for i := range expectedHops { eh := expectedHops[i] var fromStr string result, ok := jsonData[eh.key] if ok { var resultMap map[string]interface{} resultMap, ok = result.(map[string]interface{}) if ok { var fromIf interface{} fromIf, ok = resultMap["From"] if ok { fromStr, ok = fromIf.(string) } } } if !ok { t.Fatalf("hop %s not in result data or not in expected format", eh.key) } if fromStr != eh.from { t.Fatalf("hop %s should be %s but is actually %s", eh.key, eh.from, fromStr) } } }) } } // Test that a mesh starts and that connections are what we expect. // //nolint:tparallel func TestMeshShutdown(t *testing.T) { // !!!!!!!!!! // This test is intentionally set to not run in parallel with the other tests // since it is checking to see that all ports are appropriately released. // !!!!!!!!!! meshDefinitions := map[string]*LibMesh{ "tcp": randomMesh("tcp"), "udp": randomMesh("udp"), "ws": randomMesh("ws"), } for protocol, m := range meshDefinitions { m := m protocol := protocol testName := protocol + "/" + m.Name t.Run(testName, func(t *testing.T) { t.Parallel() defer func() { t.Log(m.LogWriter.String()) }() err := m.Start(t.Name()) if err != nil { t.Fatal(err) } ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } m.Destroy() m.WaitForShutdown() var lsofProto string switch protocol { case "tcp": lsofProto = "TCP" case "ws": lsofProto = "TCP" case "udp": lsofProto = "UDP" } // Check that the connections are closed pid := os.Getpid() done := false var out bytes.Buffer for timeout := 10 * time.Second; timeout > 0 && !done; { out = bytes.Buffer{} cmd := exec.Command("lsof", "-tap", fmt.Sprint(pid), "-i", lsofProto) cmd.Stdout = &out cmd.Run() if !strings.Contains(out.String(), fmt.Sprint(pid)) { done = true break } time.Sleep(100 * time.Millisecond) timeout -= 100 * time.Millisecond } if done == false { t.Errorf("Timed out while waiting for backends to close:%s\n", out.String()) } }) } } func TestCosts(t *testing.T) { t.Parallel() m := NewLibMesh() defer func() { t.Log(m.LogWriter.String()) }() node1 := m.NewLibNode("node1") node1.ListenerCfgs = map[listenerName]ListenerCfg{ "tcp": &backends.TCPListenerCfg{ BindAddr: "127.0.0.1:0", Cost: 4.5, NodeCost: map[string]float64{"node2": 2.6, "node3": 3.2}, }, } for _, i := range []int{2, 3, 4} { nodeID := fmt.Sprintf("node%d", i) node := m.NewLibNode(nodeID) node.Connections = []Connection{ {RemoteNode: node1, Protocol: "tcp"}, } } err := m.Start(t.Name()) if err != nil { t.Fatal(err) } defer m.WaitForShutdown() defer m.Destroy() ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } // Test that each Node can ping each Node for _, nodeSender := range m.GetNodes() { controller := NewReceptorControl() err = controller.Connect(nodeSender.GetControlSocket()) if err != nil { t.Fatal(err) } for nodeIDResponder := range m.GetNodes() { response, err := controller.Ping(nodeIDResponder) if err != nil { t.Error(err) } else { t.Logf("%v", response) } } controller.Close() } } func TestDuplicateNodes(t *testing.T) { t.Parallel() m := NewLibMesh() defer func() { t.Log(m.LogWriter.String()) }() node1 := m.NewLibNode("node1") node1.ListenerCfgs = map[listenerName]ListenerCfg{ "tcp": &backends.TCPListenerCfg{ BindAddr: "127.0.0.1:0", Cost: 4.5, NodeCost: map[string]float64{"node2": 2.6, "node3": 3.2}, }, } node2 := m.NewLibNode("node2") node2.Connections = []Connection{ {RemoteNode: node1, Protocol: "tcp"}, } node3 := m.NewLibNode("node3") node3.netceptorInstance = netceptor.New(context.Background(), "node2") node3.Connections = []Connection{ {RemoteNode: node1, Protocol: "tcp"}, } // Hack a duplicate node onto the mesh delete(m.nodes, "node3") m.nodes["node2-dupe"] = node3 err := m.Start(t.Name()) defer m.WaitForShutdown() defer m.Destroy() if err != nil { t.Fatal(err) } ctx1, cancel1 := context.WithTimeout(context.Background(), 1*time.Minute) defer cancel1() sleepInterval := 100 * time.Millisecond if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { return strings.Contains(m.LogWriter.String(), "connected using a node ID we are already connected to") }) { t.Fatal("duplicate nodes were not expected to exist together") } time.Sleep(5 * time.Second) } receptor-1.5.3/tests/functional/mesh/receptorcontrol.go000066400000000000000000000276471475465740700234210ustar00rootroot00000000000000package mesh import ( "context" "encoding/json" "errors" "fmt" "net" "regexp" "strings" "time" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/tests/utils" ) // ReceptorControl Connects to a control socket and provides basic commands. type ReceptorControl struct { socketConn *net.UnixConn SocketFilename string } // New Returns an empty ReceptorControl. func NewReceptorControl() *ReceptorControl { return &ReceptorControl{ socketConn: nil, SocketFilename: "", } } // Connect connects to the socket at the specified filename and checks the // handshake with the control service. func (r *ReceptorControl) Connect(filename string) error { if r.socketConn != nil { return errors.New("tried to connect to a socket after already being connected to a socket") } addr, err := net.ResolveUnixAddr("unix", filename) if err != nil { return err } r.socketConn, err = net.DialUnix("unix", nil, addr) if err != nil { return err } err = r.handshake() if err != nil { return err } r.SocketFilename = filename return nil } func (r *ReceptorControl) Reload() error { if _, err := r.WriteStr("reload \n"); err != nil { return err } jsonData, err := r.ReadAndParseJSON() if err != nil { return err } success := jsonData["Success"].(bool) if !success { return errors.New("Error") } return nil } // Reconnect to unix socket. func (r *ReceptorControl) Reconnect() error { if r.SocketFilename != "" { err := r.Connect(r.SocketFilename) if err != nil { return err } } else { return fmt.Errorf("could not reconnect, no SocketFilename") } return nil } // Read reads a line from the socket. func (r *ReceptorControl) Read() ([]byte, error) { dataBytes := make([]byte, 0) buf := make([]byte, 1) for { n, err := r.socketConn.Read(buf) if err != nil { return dataBytes, err } if n == 1 { if buf[0] == '\n' { break } dataBytes = append(dataBytes, buf[0]) } } return dataBytes, nil } // Write writes some data to the socket. func (r *ReceptorControl) Write(data []byte) (int, error) { return r.socketConn.Write(data) } // ReadStr reads some data from the socket and converts it to a string. func (r *ReceptorControl) ReadStr() (string, error) { data, err := r.Read() if err != nil { return "", err } return string(data), nil } // WriteStr writes string data to the socket. func (r *ReceptorControl) WriteStr(data string) (int, error) { return r.Write([]byte(data)) } func (r *ReceptorControl) handshake() error { data, err := r.Read() if err != nil { return err } matched, err := regexp.Match("^Receptor Control, node (.+)$", data) if err != nil { return err } if !matched { return fmt.Errorf("failed control socket handshake, got: %s", data) } return nil } // Close closes the connection to the socket. func (r *ReceptorControl) Close() error { err := r.socketConn.Close() r.socketConn = nil return err } // CloseWrite closes the write side of the socket. func (r *ReceptorControl) CloseWrite() error { err := r.socketConn.CloseWrite() return err } // ReadAndParseJSON reads data from the socket and parses it as json. func (r *ReceptorControl) ReadAndParseJSON() (map[string]interface{}, error) { data, err := r.Read() if err != nil { return nil, err } if str := string(data); strings.HasPrefix(str, "ERROR") { return nil, fmt.Errorf(str) //nolint:govet,staticcheck } jsonData := make(map[string]interface{}) err = json.Unmarshal(data, &jsonData) if err != nil { return nil, err } return jsonData, nil } // Ping pings the specified node. func (r *ReceptorControl) Ping(node string) (string, error) { _, err := r.WriteStr(fmt.Sprintf("ping %s\n", node)) if err != nil { return "", err } jsonData, err := r.ReadAndParseJSON() if err != nil { return "", err } success := jsonData["Success"].(bool) if !success { return "", errors.New(jsonData["Error"].(string)) } return fmt.Sprintf("Reply from %s in %s", jsonData["From"].(string), jsonData["TimeStr"].(string)), nil } // Status retrieves the status of the current node. func (r *ReceptorControl) Status() (*netceptor.Status, error) { _, err := r.WriteStr("status\n") if err != nil { return nil, err } data, err := r.Read() if err != nil { return nil, err } status := netceptor.Status{} err = json.Unmarshal(data, &status) if err != nil { return nil, err } return &status, nil } func (r *ReceptorControl) getWorkSubmitResponse() (string, error) { msg, err := r.ReadStr() // flush getWorkSubmitResponse if !strings.Contains(msg, "Send stdin data and EOF.") { return "", fmt.Errorf(msg) //nolint:govet,staticcheck } if err != nil { return "", err } err = r.CloseWrite() // close write half to signal EOF if err != nil { return "", err } defer func() { err = r.Close() // since write is closed, we should close the whole socket if err != nil { panic(err) } err = r.Reconnect() if err != nil { panic(err) } }() response, err := r.ReadAndParseJSON() if err != nil { return "", err } unitID := fmt.Sprintf("%v", response["unitid"]) return unitID, nil } // WorkSubmitJSON begins work on remote node via JSON command. func (r *ReceptorControl) WorkSubmitJSON(command string) (string, error) { _, err := r.WriteStr(fmt.Sprintf("%s\n", command)) if err != nil { return "", err } unitID, err := r.getWorkSubmitResponse() if err != nil { return "", err } return unitID, nil } // WorkSubmit begins work on remote node. func (r *ReceptorControl) WorkSubmit(node, workType string) (string, error) { _, err := r.WriteStr(fmt.Sprintf("work submit %s %s\n", node, workType)) if err != nil { return "", err } unitID, err := r.getWorkSubmitResponse() if err != nil { return "", err } return unitID, nil } // WorkStart begins work on local node. func (r *ReceptorControl) WorkStart(workType string) (string, error) { return r.WorkSubmit("localhost", workType) } // WorkCancel cancels work. func (r *ReceptorControl) WorkCancel(unitID string) (map[string]interface{}, error) { _, err := r.WriteStr(fmt.Sprintf("work cancel %s\n", unitID)) if err != nil { return nil, err } return r.ReadAndParseJSON() } // WorkRelease cancels and deletes work. func (r *ReceptorControl) WorkRelease(unitID string) (map[string]interface{}, error) { _, err := r.WriteStr(fmt.Sprintf("work release %s\n", unitID)) if err != nil { return nil, err } return r.ReadAndParseJSON() } // GetWorkStatus returns JSON of status file for a given unitID. func (r *ReceptorControl) GetWorkStatus(unitID string) (*workceptor.StatusFileData, error) { status := &workceptor.StatusFileData{} _, err := r.WriteStr(fmt.Sprintf("work status %s\n", unitID)) if err != nil { return status, err } jsonData, err := r.Read() if err != nil { return status, err } err = json.Unmarshal(jsonData, status) if err != nil { return status, err } return status, nil } func (r *ReceptorControl) getWorkList() (map[string]interface{}, error) { _, err := r.WriteStr("work list\n") if err != nil { return nil, err } workList, err := r.ReadAndParseJSON() if err != nil { return nil, err } return workList, nil } func assertWithTimeout(ctx context.Context, check func() bool) bool { return utils.CheckUntilTimeout(ctx, 500*time.Millisecond, check) } func (r *ReceptorControl) assertWorkState(ctx context.Context, unitID string, state int) bool { check := func() bool { workStatus, _ := r.GetWorkStatus(unitID) return workStatus.State == state } return assertWithTimeout(ctx, check) } func (r *ReceptorControl) assertWorkSize(ctx context.Context, unitID string, size int64) bool { check := func() bool { workStatus, _ := r.GetWorkStatus(unitID) return workStatus.StdoutSize > size } return assertWithTimeout(ctx, check) } // assert if the work size is increasing. func (r *ReceptorControl) AssertWorkSizeIncreasing(ctx context.Context, unitID string, size int64) error { if !r.assertWorkSize(ctx, unitID, size) { return fmt.Errorf("failed to assert %s is increasing or ctx timed out", unitID) } return nil } // AssertWorkRunning waits until work status is running. func (r *ReceptorControl) AssertWorkRunning(ctx context.Context, unitID string) error { if !r.assertWorkState(ctx, unitID, workceptor.WorkStateRunning) { return fmt.Errorf("failed to assert %s is running or ctx timed out", unitID) } return nil } // AssertWorkPending waits until status is pending. func (r *ReceptorControl) AssertWorkPending(ctx context.Context, unitID string) error { if !r.assertWorkState(ctx, unitID, workceptor.WorkStatePending) { return fmt.Errorf("failed to assert %s is pending or ctx timed out", unitID) } return nil } // AssertWorkSucceeded waits until status is successful. func (r *ReceptorControl) AssertWorkSucceeded(ctx context.Context, unitID string) error { if !r.assertWorkState(ctx, unitID, workceptor.WorkStateSucceeded) { return fmt.Errorf("failed to assert %s succeeded or ctx timed out", unitID) } return nil } // AssertWorkFailed waits until status is failed. func (r *ReceptorControl) AssertWorkFailed(ctx context.Context, unitID string) error { if !r.assertWorkState(ctx, unitID, workceptor.WorkStateFailed) { return fmt.Errorf("failed to assert %s failed or ctx timed out", unitID) } return nil } // AssertWorkCancelled waits until work status is cancelled. func (r *ReceptorControl) AssertWorkCancelled(ctx context.Context, unitID string) error { check := func() bool { workStatus, err := r.GetWorkStatus(unitID) if err != nil { return false } if workStatus.State != workceptor.WorkStateCanceled { return false } detail := workStatus.Detail detailLc := strings.ToLower(detail) keywords := []string{ "cancel", "kill", "terminate", "stop", } for kwIdx := range keywords { if strings.Contains(detailLc, keywords[kwIdx]) { return true } } return false } if !assertWithTimeout(ctx, check) { return fmt.Errorf("failed to assert %s is cancelled or ctx timed out", unitID) } return nil } // AssertWorkTimedOut asserts that work failed. func (r *ReceptorControl) AssertWorkTimedOut(ctx context.Context, unitID string) error { check := func() bool { workStatus, err := r.GetWorkStatus(unitID) if err != nil { return false } if workStatus.State != workceptor.WorkStateFailed { return false } detail := workStatus.Detail return strings.HasPrefix(detail, "Work unit expired on") } if !assertWithTimeout(ctx, check) { return fmt.Errorf("failed to assert work timed out or ctx timed out") } return nil } // AssertWorkReleased asserts that work is not in work list. func (r *ReceptorControl) AssertWorkReleased(ctx context.Context, unitID string) error { check := func() bool { workList, err := r.getWorkList() if err != nil { return false } _, ok := workList[unitID] // unitID should not be in list return !ok } if !assertWithTimeout(ctx, check) { return fmt.Errorf("failed to assert %s is released or ctx timed out", unitID) } return nil } func (r *ReceptorControl) getWorkResults(unitID string, readSize int) ([]byte, error) { _, err := r.WriteStr(fmt.Sprintf("work results %s\n", unitID)) if err != nil { return nil, err } str, err := r.ReadStr() if err != nil { return nil, err } if str[:5] == "ERROR" { return nil, fmt.Errorf("remote error: %s", str) } buf := make([]byte, readSize) n, err := r.socketConn.Read(buf) if err != nil { return nil, err } if n != readSize { return nil, fmt.Errorf("did not read correct size") } err = r.Close() if err != nil { return nil, err } err = r.Reconnect() if err != nil { return nil, err } return buf, nil } // AssertWorkResults makes sure results match expected byte array. func (r *ReceptorControl) AssertWorkResults(unitID string, expectedResults []byte) error { workResults, err := r.getWorkResults(unitID, len(expectedResults)) if err != nil { return err } if string(expectedResults) != string(workResults) { return fmt.Errorf("work results did not match expected results") } return nil } receptor-1.5.3/tests/functional/mesh/testdata/000077500000000000000000000000001475465740700214375ustar00rootroot00000000000000receptor-1.5.3/tests/functional/mesh/testdata/echo-pod.yml000066400000000000000000000002741475465740700236630ustar00rootroot00000000000000--- apiVersion: v1 kind: Pod spec: containers: - name: worker image: centos:8 command: - bash args: - -c - for i in {1..5}; do echo $i;done receptor-1.5.3/tests/functional/mesh/tls_test.go000066400000000000000000000247631475465740700220320ustar00rootroot00000000000000package mesh import ( "context" "strings" "testing" "time" "github.com/ansible/receptor/pkg/netceptor" "github.com/ansible/receptor/tests/utils" ) func TestTCPSSLConnections(t *testing.T) { t.Parallel() for _, proto := range []string{"tcp", "ws"} { proto := proto t.Run(proto, func(t *testing.T) { t.Parallel() m := NewLibMesh() caKey, caCrt, err := utils.GenerateCA("ca", "localhost") if err != nil { t.Fatal(err) } key1, crt1, err := utils.GenerateCertWithCA("node1", caKey, caCrt, "localhost", []string{"localhost"}, []string{"node1"}) if err != nil { t.Fatal(err) } key2, crt2, err := utils.GenerateCertWithCA("node2", caKey, caCrt, "localhost", []string{"localhost"}, []string{"node2"}) if err != nil { t.Fatal(err) } key3, crt3, err := utils.GenerateCertWithCA("node3", caKey, caCrt, "localhost", []string{"localhost"}, []string{"node3"}) if err != nil { t.Fatal(err) } defer func() { t.Log(m.LogWriter.String()) }() node1 := m.NewLibNode("node1") node1.TLSServerConfigs = []*netceptor.TLSServerConfig{ { Name: "server", Key: key1, Cert: crt1, RequireClientCert: true, ClientCAs: caCrt, }, } node1.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "server", 1, nil), } m.GetNodes()[node1.GetID()] = node1 node2 := m.NewLibNode("node2") node2.Connections = []Connection{ {RemoteNode: node1, Protocol: proto, TLS: "client"}, } node2.TLSServerConfigs = []*netceptor.TLSServerConfig{ { Name: "server", Key: key2, Cert: crt2, RequireClientCert: true, ClientCAs: caCrt, }, } node2.TLSClientConfigs = []*netceptor.TLSClientConfig{ { Name: "client", Key: key2, Cert: crt2, RootCAs: caCrt, }, } node2.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "server", 1, nil), } m.GetNodes()[node2.GetID()] = node2 node3 := m.NewLibNode("node3") node3.Connections = []Connection{ {RemoteNode: node2, Protocol: proto, TLS: "client"}, } node3.TLSClientConfigs = []*netceptor.TLSClientConfig{ { Name: "client", Key: key3, Cert: crt3, RootCAs: caCrt, }, } m.GetNodes()[node3.GetID()] = node3 err = m.Start(t.Name()) if err != nil { t.Fatal(err) } defer m.WaitForShutdown() defer m.Destroy() ctx1, cancel1 := context.WithTimeout(context.Background(), 20*time.Second) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err) } // Test that each Node can ping each Node for _, nodeSender := range m.GetNodes() { controller := NewReceptorControl() err = controller.Connect(nodeSender.GetControlSocket()) if err != nil { t.Fatal(err) } for nodeIDResponder := range m.GetNodes() { response, err := controller.Ping(nodeIDResponder) if err != nil { t.Error(err) } t.Logf("%v", response) } controller.Close() } }) } } func TestTCPSSLClientAuthFailNoKey(t *testing.T) { t.Parallel() for _, proto := range []string{"tcp", "ws"} { proto := proto t.Run(proto, func(t *testing.T) { t.Parallel() _, caCrt, err := utils.GenerateCA("ca", "localhost") if err != nil { t.Fatal(err) } key1, crt1, err := utils.GenerateCert("node1", "localhost", []string{"localhost"}, []string{"node1"}) if err != nil { t.Fatal(err) } m := NewLibMesh() defer func() { t.Log(m.LogWriter.String()) }() node1 := m.NewLibNode("node1") node1.TLSServerConfigs = []*netceptor.TLSServerConfig{ { Name: "cert1", Key: key1, Cert: crt1, RequireClientCert: true, ClientCAs: caCrt, }, } node1.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "cert1", 1, nil), } m.GetNodes()[node1.GetID()] = node1 node2 := m.NewLibNode("node2") node2.Connections = []Connection{ {RemoteNode: node1, Protocol: proto, TLS: "client-insecure"}, } node2.TLSClientConfigs = []*netceptor.TLSClientConfig{ { Name: "client-insecure", Key: "", Cert: "", }, } m.GetNodes()[node2.GetID()] = node2 err = m.Start(t.Name()) if err != nil { t.Fatal(err) } defer m.WaitForShutdown() defer m.Destroy() ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel1() sleepInterval := 100 * time.Millisecond if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { linuxTLSError := strings.Contains(m.LogWriter.String(), "certificate signed by unknown authority") macTLSError := strings.Contains(m.LogWriter.String(), "certificate is not trusted") return linuxTLSError || macTLSError }) { t.Fatal("Expected connection to fail but it succeeded") } }) } } func TestTCPSSLClientAuthFailBadKey(t *testing.T) { t.Parallel() for _, proto := range []string{"tcp", "ws"} { proto := proto t.Run(proto, func(t *testing.T) { t.Parallel() _, caCrt, err := utils.GenerateCA("ca", "localhost") if err != nil { t.Fatal(err) } key1, crt1, err := utils.GenerateCert("node1", "localhost", []string{"localhost"}, []string{"node1"}) if err != nil { t.Fatal(err) } key2, crt2, err := utils.GenerateCert("node2", "localhost", []string{"localhost"}, []string{"node2"}) if err != nil { t.Fatal(err) } m := NewLibMesh() defer func() { t.Log(m.LogWriter.String()) }() node1 := m.NewLibNode("node1") node1.TLSServerConfigs = []*netceptor.TLSServerConfig{ { Name: "cert1", Key: key1, Cert: crt1, RequireClientCert: true, ClientCAs: caCrt, }, } node1.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "cert1", 1, nil), } m.GetNodes()[node1.GetID()] = node1 node2 := m.NewLibNode("node2") node2.Connections = []Connection{ {RemoteNode: node1, Protocol: proto, TLS: "client-insecure"}, } node2.TLSClientConfigs = []*netceptor.TLSClientConfig{ { Name: "client-insecure", Key: key2, Cert: crt2, }, } m.GetNodes()[node2.GetID()] = node2 err = m.Start(t.Name()) if err != nil { t.Fatal(err) } defer m.WaitForShutdown() defer m.Destroy() ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel1() sleepInterval := 100 * time.Millisecond if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { linuxTLSError := strings.Contains(m.LogWriter.String(), "certificate signed by unknown authority") macTLSError := strings.Contains(m.LogWriter.String(), "certificate is not trusted") return linuxTLSError || macTLSError }) { t.Fatal("Expected connection to fail but it succeeded") } }) } } func TestTCPSSLServerAuthFailNoKey(t *testing.T) { t.Parallel() for _, proto := range []string{"tcp", "ws"} { proto := proto t.Run(proto, func(t *testing.T) { t.Parallel() _, caCrt, err := utils.GenerateCA("ca", "localhost") if err != nil { t.Fatal(err) } key1, crt1, err := utils.GenerateCert("node2", "localhost", []string{"localhost"}, []string{"node2"}) if err != nil { t.Fatal(err) } m := NewLibMesh() defer func() { t.Log(m.LogWriter.String()) }() node1 := m.NewLibNode("node1") node1.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "", 1, nil), } m.GetNodes()[node1.GetID()] = node1 node2 := m.NewLibNode("node2") node2.Connections = []Connection{ {RemoteNode: node1, Protocol: proto, TLS: "client"}, } node2.TLSClientConfigs = []*netceptor.TLSClientConfig{ { Name: "client", Key: key1, Cert: crt1, RootCAs: caCrt, }, } m.GetNodes()[node2.GetID()] = node2 err = m.Start(t.Name()) if err != nil { t.Fatal(err) } defer m.WaitForShutdown() defer m.Destroy() ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel1() sleepInterval := 100 * time.Millisecond if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { return strings.Contains(m.LogWriter.String(), "first record does not look like a TLS handshake") }) { t.Fatal("Expected connection to fail but it succeeded") } }) } } func TestTCPSSLServerAuthFailBadKey(t *testing.T) { t.Parallel() for _, proto := range []string{"tcp", "ws"} { proto := proto t.Run(proto, func(t *testing.T) { t.Parallel() _, caCrt, err := utils.GenerateCA("ca", "localhost") if err != nil { t.Fatal(err) } key1, crt1, err := utils.GenerateCert("node1", "localhost", []string{"localhost"}, []string{"node1"}) if err != nil { t.Fatal(err) } key2, crt2, err := utils.GenerateCert("node2", "localhost", []string{"localhost"}, []string{"node2"}) if err != nil { t.Fatal(err) } m := NewLibMesh() defer func() { t.Log(m.LogWriter.String()) }() node1 := m.NewLibNode("node1") node1.TLSServerConfigs = []*netceptor.TLSServerConfig{ { Name: "cert1", Key: key1, Cert: crt1, RequireClientCert: true, ClientCAs: caCrt, }, } node1.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName(proto): newListenerCfg(proto, "cert1", 1, nil), } m.GetNodes()[node1.GetID()] = node1 node2 := m.NewLibNode("node2") node2.Connections = []Connection{ {RemoteNode: node1, Protocol: proto, TLS: "client-insecure"}, } node2.TLSClientConfigs = []*netceptor.TLSClientConfig{ { Name: "client-insecure", Key: key2, Cert: crt2, }, } m.GetNodes()[node2.GetID()] = node2 err = m.Start(t.Name()) if err != nil { t.Fatal(err) } defer m.WaitForShutdown() defer m.Destroy() ctx1, cancel1 := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel1() sleepInterval := 100 * time.Millisecond if !utils.CheckUntilTimeout(ctx1, sleepInterval, func() bool { linuxTLSError := strings.Contains(m.LogWriter.String(), "certificate signed by unknown authority") macTLSError := strings.Contains(m.LogWriter.String(), "certificate is not trusted") return linuxTLSError || macTLSError }) { t.Fatal("Expected connection to fail but it succeeded") } }) } } receptor-1.5.3/tests/functional/mesh/work_test.go000066400000000000000000000516101475465740700222010ustar00rootroot00000000000000package mesh import ( "bytes" "context" "encoding/json" "fmt" "os" "path/filepath" "strings" "testing" "time" "github.com/ansible/receptor/pkg/workceptor" "github.com/ansible/receptor/tests/utils" ) func TestWorkSubmitWithTLSClient(t *testing.T) { t.Parallel() for _, plugin := range workPlugins { plugin := plugin t.Run(string(plugin), func(t *testing.T) { t.Parallel() controllers, m, expectedResults := workSetup(plugin, t) defer m.WaitForShutdown() defer m.Destroy() command := `{"command":"work","subcommand":"submit","worktype":"echosleepshort","tlsclient":"client","node":"node2","params":"", "ttl":"10h"}` unitID, err := controllers["node1"].WorkSubmitJSON(command) if err != nil { t.Fatal(err, m.DataDir) } ctx1, cancel1 := context.WithTimeout(context.Background(), 60*time.Second) defer cancel1() err = controllers["node1"].AssertWorkSucceeded(ctx1, unitID) if err != nil { t.Fatal(err, m.DataDir) } err = controllers["node1"].AssertWorkResults(unitID, expectedResults) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node2"].AssertWorkResults(unitID, expectedResults) if err != nil { t.Fatal(err, m.GetDataDir()) } }) } } // Tests that submitting work with wrong cert CN immediately fails the job // also tests that releasing a job that has not been started on remote // will not attempt to connect to remote. func TestWorkSubmitWithIncorrectTLSClient(t *testing.T) { t.Parallel() for _, plugin := range workPlugins { plugin := plugin t.Run(string(plugin), func(t *testing.T) { t.Parallel() controllers, m, _ := workSetup(plugin, t) nodes := m.GetNodes() command := `{"command":"work","subcommand":"submit","worktype":"echosleepshort","tlsclient":"tlsclientwrongCN","node":"node2","params":""}` unitID, err := controllers["node1"].WorkSubmitJSON(command) if err != nil { t.Fatal(err) } ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = controllers["node1"].AssertWorkFailed(ctx1, unitID) if err != nil { t.Fatal(err) } _, err = controllers["node1"].WorkRelease(unitID) if err != nil { t.Fatal(err) } ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel2() err = controllers["node1"].AssertWorkReleased(ctx2, unitID) if err != nil { t.Fatal(err) } ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel3() err = assertFilesReleased(ctx3, nodes["node1"].GetDataDir(), "node1", unitID) if err != nil { t.Fatal(err) } }) } } func TestStartRemoteWorkWithTTL(t *testing.T) { t.Parallel() for _, plugin := range workPlugins { plugin := plugin t.Run(string(plugin), func(t *testing.T) { t.Parallel() controllers, m, _ := workSetup(plugin, t) defer func() { t.Log(m.LogWriter.String()) }() nodes := m.GetNodes() nodes["node2"].Shutdown() command := `{"command":"work","subcommand":"submit","worktype":"echosleepshort","tlsclient":"client","node":"node2","params":"","ttl":"5s"}` unitID, err := controllers["node1"].WorkSubmitJSON(command) if err != nil { t.Fatal(err) } ctx1, cancel1 := context.WithTimeout(context.Background(), 10*time.Second) defer cancel1() err = controllers["node1"].AssertWorkTimedOut(ctx1, unitID) if err != nil { t.Fatal(err) } _, err = controllers["node1"].WorkRelease(unitID) if err != nil { t.Fatal(err) } ctx2, cancel2 := context.WithTimeout(context.Background(), 5*time.Second) defer cancel2() err = controllers["node1"].AssertWorkReleased(ctx2, unitID) if err != nil { t.Fatal(err) } ctx3, cancel3 := context.WithTimeout(context.Background(), 5*time.Second) defer cancel3() err = assertFilesReleased(ctx3, nodes["node1"].GetDataDir(), "node1", unitID) if err != nil { t.Fatal(err) } }) } } func TestCancelThenReleaseRemoteWork(t *testing.T) { t.Parallel() for _, plugin := range workPlugins { plugin := plugin t.Run(string(plugin), func(t *testing.T) { t.Parallel() controllers, m, _ := workSetup(plugin, t) defer func() { t.Log(m.LogWriter.String()) }() nodes := m.GetNodes() unitID, err := controllers["node1"].WorkSubmit("node3", "echosleeplong") if err != nil { t.Fatal(err, m.GetDataDir()) } ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = controllers["node1"].AssertWorkRunning(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } _, err = controllers["node1"].WorkCancel(unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel2() err = controllers["node1"].AssertWorkCancelled(ctx2, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } workStatus, err := controllers["node1"].GetWorkStatus(unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } remoteUnitID := workStatus.ExtraData.(map[string]interface{})["RemoteUnitID"].(string) if remoteUnitID == "" { t.Errorf("remoteUnitID should not be empty") } nodes["node1"].Shutdown() err = nodes["node1"].Start() if err != nil { t.Fatal(err, m.GetDataDir()) } ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel3() err = m.WaitForReady(ctx3) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].Close() if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].Reconnect() if err != nil { t.Fatal(err, m.GetDataDir()) } _, err = controllers["node1"].WorkRelease(unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx4, cancel4 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel4() err = controllers["node1"].AssertWorkReleased(ctx4, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx5, cancel5 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel5() err = assertFilesReleased(ctx5, nodes["node1"].GetDataDir(), "node1", unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx6, cancel6 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel6() err = assertFilesReleased(ctx6, nodes["node3"].GetDataDir(), "node3", remoteUnitID) if err != nil { t.Fatal(err, m.GetDataDir()) } }) } } func TestWorkSubmitWhileRemoteNodeIsDown(t *testing.T) { t.Parallel() for _, plugin := range workPlugins { plugin := plugin t.Run(string(plugin), func(t *testing.T) { t.Parallel() controllers, m, expectedResults := workSetup(plugin, t) nodes := m.GetNodes() nodes["node3"].Shutdown() unitID, err := controllers["node1"].WorkSubmit("node3", "echosleepshort") if err != nil { t.Fatal(err, m.GetDataDir()) } ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = controllers["node1"].AssertWorkPending(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } err = nodes["node3"].Start() if err != nil { t.Fatal(err, m.GetDataDir()) } // Wait for node3 to join the mesh again ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel2() err = m.WaitForReady(ctx2) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel3() err = controllers["node1"].AssertWorkSucceeded(ctx3, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].AssertWorkResults(unitID, expectedResults) if err != nil { t.Fatal(err, m.GetDataDir()) } }) } } func TestWorkStreamingResumesWhenRelayNodeRestarts(t *testing.T) { t.Parallel() for _, plugin := range workPlugins { plugin := plugin t.Run(string(plugin), func(t *testing.T) { t.Parallel() controllers, m, expectedResults := workSetup(plugin, t) nodes := m.GetNodes() unitID, err := controllers["node1"].WorkSubmit("node3", "echosleeplong") if err != nil { t.Fatal(err, m.GetDataDir()) } ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = controllers["node1"].AssertWorkRunning(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel2() err = assertStdoutFizeSize(ctx2, nodes["node1"].GetDataDir(), "node1", unitID, 1) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].AssertWorkResults(unitID, expectedResults[:1]) if err != nil { t.Fatal(err, m.GetDataDir()) } nodes["node2"].Shutdown() nodes["node2"].Start() // Wait for node2 to join the mesh again ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel3() err = m.WaitForReady(ctx3) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx4, cancel4 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel4() err = controllers["node1"].AssertWorkSucceeded(ctx4, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx5, cancel5 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel5() err = assertStdoutFizeSize(ctx5, nodes["node1"].GetDataDir(), "node1", unitID, 10) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].AssertWorkResults(unitID, expectedResults) if err != nil { t.Fatal(err, m.GetDataDir()) } }) } } func TestResultsOnRestartedNode(t *testing.T) { t.Parallel() for _, plugin := range workPlugins { plugin := plugin t.Run(string(plugin), func(t *testing.T) { t.Parallel() controllers, m, expectedResults := workSetup(plugin, t) nodes := m.GetNodes() unitID, err := controllers["node1"].WorkSubmit("node3", "echosleeplong") if err != nil { t.Fatal(err, m.GetDataDir()) } ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = controllers["node1"].AssertWorkRunning(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } nodes["node3"].Shutdown() err = nodes["node3"].Start() if err != nil { t.Fatal(err, m.GetDataDir()) } // Wait for node3 to join the mesh again ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel2() err = m.WaitForReady(ctx2) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel3() err = controllers["node1"].AssertWorkSucceeded(ctx3, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].AssertWorkResults(unitID, expectedResults) if err != nil { t.Fatal(err, m.GetDataDir()) } }) } } func TestWorkSubmitAndReleaseToNonexistentNode(t *testing.T) { t.Parallel() for _, plugin := range workPlugins { plugin := plugin t.Run(string(plugin), func(t *testing.T) { t.Parallel() controllers, m, _ := workSetup(plugin, t) nodes := m.GetNodes() // submit work from node1 to non-existent-node // node999 was never initialised unitID, err := controllers["node1"].WorkSubmit("node999", "echosleeplong") if err != nil { t.Fatal(err, m.GetDataDir()) } // wait for 10 seconds, and check if the work is in pending state ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = controllers["node1"].AssertWorkPending(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } nodes["node1"].Shutdown() err = nodes["node1"].Start() if err != nil { t.Fatal(err, m.GetDataDir()) } ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel2() err = m.WaitForReady(ctx2) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].Close() if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].Reconnect() if err != nil { t.Fatal(err, m.GetDataDir()) } // release the work on node1 _, err = controllers["node1"].WorkRelease(unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx3, cancel3 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel3() err = controllers["node1"].AssertWorkReleased(ctx3, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } }) } } func TestRuntimeParams(t *testing.T) { m := NewLibMesh() node1 := m.NewLibNode("node1") node1.workerConfigs = []workceptor.WorkerConfig{ workceptor.CommandWorkerCfg{ WorkType: "echo", Command: "echo", AllowRuntimeParams: true, }, } err := m.Start(t.Name()) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } nodes := m.GetNodes() controllers := make(map[string]*ReceptorControl) controllers["node1"] = NewReceptorControl() err = controllers["node1"].Connect(nodes["node1"].GetControlSocket()) if err != nil { t.Fatal(err, m.GetDataDir()) } command := `{"command":"work","subcommand":"submit","worktype":"echo","node":"localhost","params":"it worked!"}` unitID, err := controllers["node1"].WorkSubmitJSON(command) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].AssertWorkSucceeded(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].AssertWorkResults(unitID, []byte("it worked!")) if err != nil { t.Fatal(err, m.GetDataDir()) } } func TestKubeRuntimeParams(t *testing.T) { checkSkipKube(t) m := NewLibMesh() node1 := m.NewLibNode("node1") node1.workerConfigs = []workceptor.WorkerConfig{ workceptor.KubeWorkerCfg{ WorkType: "echo", AuthMethod: "runtime", Namespace: "default", AllowRuntimePod: true, AllowRuntimeAuth: true, }, } m.Start(t.Name()) ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err := m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } nodes := m.GetNodes() controllers := make(map[string]*ReceptorControl) controllers["node1"] = NewReceptorControl() err = controllers["node1"].Connect(nodes["node1"].GetControlSocket()) if err != nil { t.Fatal(err, m.GetDataDir()) } submitJSON := new(bytes.Buffer) err = json.Compact(submitJSON, []byte(`{ "command": "work", "subcommand": "submit", "node": "localhost", "worktype": "echo", "secret_kube_pod": "%s", "secret_kube_config": "%s" }`)) if err != nil { t.Fatal(err) } kubeConfigBytes, err := os.ReadFile(filepath.Join(os.Getenv("HOME"), ".kube/config")) if err != nil { t.Fatal(err) } // is there a better way to do this? kubeConfig := strings.ReplaceAll(string(kubeConfigBytes), "\n", "\\n") echoPodBytes, err := os.ReadFile("testdata/echo-pod.yml") if err != nil { t.Fatal(err) } // is there a better way to do this? echoPod := strings.ReplaceAll(string(echoPodBytes), "\n", "\\n") command := fmt.Sprintf(submitJSON.String(), echoPod, kubeConfig) unitID, err := controllers["node1"].WorkSubmitJSON(command) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].AssertWorkSucceeded(ctx1, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } err = controllers["node1"].AssertWorkResults(unitID, []byte("1\n2\n3\n4\n5\n")) if err != nil { t.Fatal(err, m.GetDataDir()) } } func TestRuntimeParamsNotAllowed(t *testing.T) { m := NewLibMesh() node1 := m.NewLibNode("node1") node1.workerConfigs = []workceptor.WorkerConfig{ workceptor.CommandWorkerCfg{ WorkType: "echo", Command: "echo", AllowRuntimeParams: false, }, } err := m.Start(t.Name()) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } nodes := m.GetNodes() controllers := make(map[string]*ReceptorControl) controllers["node1"] = NewReceptorControl() err = controllers["node1"].Connect(nodes["node1"].GetControlSocket()) if err != nil { t.Fatal(err, m.GetDataDir()) } command := `{"command":"work","subcommand":"submit","worktype":"echo","node":"node1","params":"it worked!"}` _, err = controllers["node1"].WorkSubmitJSON(command) if err == nil { t.Fatal("Expected this to fail") } if !strings.Contains(err.Error(), "extra params provided but not allowed") { t.Fatal("Did not see the expected error") } } func TestKubeContainerFailure(t *testing.T) { checkSkipKube(t) m := NewLibMesh() node1 := m.NewLibNode("node1") node1.workerConfigs = []workceptor.WorkerConfig{ workceptor.KubeWorkerCfg{ WorkType: "kubejob", AuthMethod: "kubeconfig", Image: "alpine", KubeConfig: filepath.Join(os.Getenv("HOME"), ".kube/config"), Namespace: "default", Command: "thiscommandwontexist", }, } m.Start(t.Name()) ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err := m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } nodes := m.GetNodes() controllers := make(map[string]*ReceptorControl) controllers["node1"] = NewReceptorControl() err = controllers["node1"].Connect(nodes["node1"].GetControlSocket()) if err != nil { t.Fatal(err, m.GetDataDir()) } job := `{"command":"work","subcommand":"submit","worktype":"kubejob","node":"node1"}` unitID, err := controllers["node1"].WorkSubmitJSON(job) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel2() err = controllers["node1"].AssertWorkFailed(ctx2, unitID) if err != nil { t.Fatal("Expected work to fail but it succeeded") } status, err := controllers["node1"].GetWorkStatus(unitID) if err != nil { t.Fatal("Could not check status") } expected := `executable file not found in $PATH` actual := status.Detail if !strings.Contains(actual, expected) { t.Fatalf("Did not see the expected error. Wanted %s, got: %s", expected, actual) } } func TestSignedWorkVerification(t *testing.T) { t.Parallel() privateKey, publicKey, err := utils.GenerateRSAPair() if err != nil { t.Fatal(err) } _, publicKeyWrong, err := utils.GenerateRSAPair() if err != nil { t.Fatal(err) } m := NewLibMesh() node1 := m.NewLibNode("node1") node1.WorkSigningKey = &workceptor.SigningKeyPrivateCfg{ PrivateKey: privateKey, } node1.ListenerCfgs = map[listenerName]ListenerCfg{ listenerName("tcp"): newListenerCfg("tcp", "", 1, nil), } node2 := m.NewLibNode("node2") node2.workerConfigs = []workceptor.WorkerConfig{ workceptor.CommandWorkerCfg{ WorkType: "echo", Command: "echo", VerifySignature: true, }, } node2.WorkVerificationKey = &workceptor.VerifyingKeyPublicCfg{ PublicKey: publicKey, } node2.Connections = []Connection{ {RemoteNode: node1, Protocol: "tcp"}, } node3 := m.NewLibNode("node3") node3.workerConfigs = []workceptor.WorkerConfig{ workceptor.CommandWorkerCfg{ WorkType: "echo", Command: "echo", VerifySignature: true, }, } node3.WorkVerificationKey = &workceptor.VerifyingKeyPublicCfg{ PublicKey: publicKeyWrong, } node3.Connections = []Connection{ {RemoteNode: node1, Protocol: "tcp"}, } err = m.Start(t.Name()) if err != nil { t.Fatal(err) } ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.GetDataDir()) } nodes := m.GetNodes() controllers := make(map[string]*ReceptorControl) controllers["node1"] = NewReceptorControl() err = controllers["node1"].Connect(nodes["node1"].GetControlSocket()) if err != nil { t.Fatal(err, m.GetDataDir()) } job := `{"command":"work","subcommand":"submit","worktype":"echo","node":"node2", "signwork":"true"}` unitID, err := controllers["node1"].WorkSubmitJSON(job) if err != nil { t.Fatal(err, m.GetDataDir()) } ctx2, cancel2 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel2() err = controllers["node1"].AssertWorkSucceeded(ctx2, unitID) if err != nil { t.Fatal(err, m.GetDataDir()) } // node3 has the wrong public key to verify work signatures, so the work submission should fail job = `{"command":"work","subcommand":"submit","worktype":"echo","node":"node3", "signwork":"true"}` _, err = controllers["node1"].WorkSubmitJSON(job) if err == nil { t.Fatal("expected work submission to fail") } expected := `could not verify signature: crypto/rsa: verification error` actual := err.Error() if !strings.Contains(actual, expected) { t.Fatalf("Did not see the expected error. Wanted %s, got: %s", expected, actual) } } receptor-1.5.3/tests/functional/mesh/work_utils.go000066400000000000000000000035731475465740700223670ustar00rootroot00000000000000package mesh import ( "context" "fmt" "os" "path/filepath" "testing" "time" "github.com/ansible/receptor/tests/utils" ) func workSetup(workPluginName workPlugin, t *testing.T) (map[string]*ReceptorControl, *LibMesh, []byte) { checkSkipKube(t) m := workTestMesh(workPluginName) err := m.Start(t.Name()) if err != nil { t.Fatal(err) } ctx1, cancel1 := context.WithTimeout(context.Background(), 120*time.Second) defer cancel1() err = m.WaitForReady(ctx1) if err != nil { t.Fatal(err, m.DataDir) } nodes := m.GetNodes() controllers := make(map[string]*ReceptorControl) for k := range nodes { controller := NewReceptorControl() err = controller.Connect(nodes[k].GetControlSocket()) if err != nil { t.Fatal(err, m.DataDir) } controllers[k] = controller } return controllers, m, []byte("1\n2\n3\n4\n5\n") } func assertFilesReleased(ctx context.Context, nodeDir, nodeID, unitID string) error { workPath := filepath.Join(nodeDir, "datadir", nodeID, unitID) check := func() bool { _, err := os.Stat(workPath) return os.IsNotExist(err) } if !utils.CheckUntilTimeout(ctx, 5*time.Second, check) { return fmt.Errorf("unitID %s on %s did not release", unitID, nodeID) } return nil } func assertStdoutFizeSize(ctx context.Context, dataDir, nodeID, unitID string, waitUntilSize int) error { stdoutFilename := filepath.Join(dataDir, nodeID, unitID, "stdout") check := func() bool { _, err := os.Stat(stdoutFilename) if os.IsNotExist(err) { return false } fstat, _ := os.Stat(stdoutFilename) return int(fstat.Size()) >= waitUntilSize } if !utils.CheckUntilTimeout(ctx, 3000*time.Millisecond, check) { return fmt.Errorf("file size not correct for %s", stdoutFilename) } return nil } func checkSkipKube(t *testing.T) { if skip := os.Getenv("SKIP_KUBE"); skip == "1" { t.Skip("Kubernetes tests are set to skip, unset SKIP_KUBE to run them") } } receptor-1.5.3/tests/goroutines/000077500000000000000000000000001475465740700167265ustar00rootroot00000000000000receptor-1.5.3/tests/goroutines/simple_config.go000066400000000000000000000106101475465740700220710ustar00rootroot00000000000000package main import ( "context" "io" "net" "strings" "testing" "time" "github.com/ansible/receptor/pkg/backends" "github.com/ansible/receptor/pkg/netceptor" "go.uber.org/goleak" ) func TestGoleakSimpleConfig(t *testing.T) { defer goleak.VerifyNone(t) // // Note! // It is important that this test do not run in parellel since it modifies package-level variables. // defaultIdleTimeout := netceptor.MaxIdleTimeoutForQuicConnections defaultQuicKeepAlive := netceptor.KeepAliveForQuicConnections defer func() { netceptor.MaxIdleTimeoutForQuicConnections = defaultIdleTimeout netceptor.KeepAliveForQuicConnections = defaultQuicKeepAlive }() // Change MaxIdleTimeoutForQuicConnections to 1 seconds (default in lib is 30, our code is 60) netceptor.MaxIdleTimeoutForQuicConnections = 1 * time.Second // We also have to disable heart beats or the connection will not properly timeout netceptor.KeepAliveForQuicConnections = false // Create two nodes of the Receptor network-layer protocol (Netceptors). n1 := netceptor.New(context.Background(), "node1") n2 := netceptor.New(context.Background(), "node2") // Start a TCP listener on the first node b1, err := backends.NewTCPListener("localhost:3334", nil, n1.Logger) if err != nil { t.Fatalf("Error listening on TCP: %s\n", err) } err = n1.AddBackend(b1) if err != nil { t.Fatalf("Error starting backend: %s\n", err) } // Start a TCP dialer on the second node - this will connect to the listener we just started b2, err := backends.NewTCPDialer("localhost:3334", false, nil, n2.Logger) if err != nil { t.Fatalf("Error dialing on TCP: %s\n", err) } err = n2.AddBackend(b2) if err != nil { t.Fatalf("Error starting backend: %s\n", err) } // Start an echo server on node 1 l1, err := n1.Listen("echo", nil) if err != nil { t.Fatalf("Error listening on Receptor network: %s\n", err) } go func() { // Accept an incoming connection - note that conn is just a regular net.Conn conn, err := l1.Accept() if err != nil { t.Fatalf("Error accepting connection: %s\n", err) return } go func() { defer conn.Close() buf := make([]byte, 1024) done := false for !done { n, err := conn.Read(buf) if err == io.EOF { done = true } else if err != nil { // Is ok if we got a 'NO_ERROR: No recent network activity' error but anything else is a test failure. if strings.Contains(err.Error(), "no recent network activity") { t.Log("Successfully got the desired timeout error") } else { t.Fatalf("Read error in Receptor listener: %s\n", err) } return } if n > 0 { _, err := conn.Write(buf[:n]) if err != nil { t.Fatalf("Write error in Receptor listener: %s\n", err) return } } } }() }() // Connect to the echo server from node 2. We expect this to error out at first with // "no route to node" because it takes a second or two for node1 and node2 to exchange // routing information and form a mesh var c2 net.Conn for { c2, err = n2.Dial("node1", "echo", nil) if err != nil { time.Sleep(1 * time.Second) continue } break } // Sleep longer than MaxIdleTimeout (see pkg/netceptor/conn.go for current setting) sleepDuration := 6 * time.Second time.Sleep(sleepDuration) // Start a listener function that prints received data to the screen // Note that because net.Conn is a stream connection, it is not guaranteed // that received messages will be the same size as the messages that are sent. // For datagram use, Receptor also provides a net.PacketConn. go func() { rbuf := make([]byte, 1024) for { n, err := c2.Read(rbuf) if n > 0 { n2.Shutdown() t.Fatal("Should not have gotten data back") return } if err == io.EOF { // Shut down the whole Netceptor when any connection closes, because this is just a demo n2.Shutdown() t.Fatal("Should not have gotten an EOF") return } if err != nil { n2.Shutdown() return } } }() // Send some data, which should be processed through the echo server back to our // receive function and printed to the screen. _, err = c2.Write([]byte("Hello, world!")) if !(err != nil && err != io.EOF) { t.Fatal("We should have gotten an error here") } // Close our end of the connection _ = c2.Close() // Wait for n2 to shut down n2.BackendWait() // Gracefully shut down n1 n1.Shutdown() n1.BackendWait() time.Sleep(10 * time.Second) } receptor-1.5.3/tests/receptor-tester.sh000077500000000000000000000104231475465740700202160ustar00rootroot00000000000000#!/bin/bash set -e # Go to current script dir current_dir=$( cd "$( dirname "$0" )" >/dev/null 2>&1 && pwd ) cd ${current_dir} # Global vars tests_dirs="" for f in $(find . -name *_test.go); do tests_dirs="${tests_dirs} $(dirname $f)" done tests_dirs=$(echo $tests_dirs | sed 's/ /\n/g' | uniq) tests_files=$(find . -name *_test.go) # Switch between podman or docker if command -v docker &> /dev/null ; then export CONTAINER_RUN=docker elif command -v podman &> /dev/null ; then export CONTAINER_RUN=podman fi # Logs. Based on: # https://github.com/containers/Demos/blob/master/building/buildah_intro/buildah_intro.sh if [[ ${TERM} != "dumb" ]]; then bold=$(tput bold) reset=$(tput sgr0) red=$(tput setaf 1) green=$(tput setaf 2) yellow=$(tput setaf 3) blue=$(tput setaf 4) purple=$(tput setaf 5) cyan=$(tput setaf 6) white=$(tput setaf 7) grey=$(tput setaf 8) vivid_red=$(tput setaf 9) vivid_green=$(tput setaf 10) vivid_yellow=$(tput setaf 11) vivid_blue=$(tput setaf 12) vivid_purple=$(tput setaf 13) vivid_cyan=$(tput setaf 14) fi log() { if [[ $1 == *"["*"]"* ]]; then out=$(echo $1 | sed "s/]/]${reset}/g") echo "${bold}$out${reset}" else echo "${bold}$1${reset}" fi } log_bold() { echo "${bold}$1${reset}" } log_info() { if [[ $1 == *"["*"]"* ]]; then out=$(echo $1 | sed "s/]/]${reset}/g") echo "${vivid_purple}$out${reset}" else echo "${cyan}$1${reset}" fi } log_warning() { echo "${vivid_yellow}$1${reset}" } log_error() { echo "${vivid_red}$1${reset}" } # Helpers function check_requirements(){ # Check if podman is installed if [ -z "${CONTAINER_RUN}" ] then log_error "ERROR: podman OR docker must be installed!" exit 1 fi # Check if make is installed if ! command -v make &> /dev/null then log_error "ERROR: make is not installed!" exit 1 fi # Check if Makefile exists if [ ! -f "${current_dir}/Makefile" ] then echo ${current_dir}/Makefile log_error "ERROR: Makefile not found in current dir!" exit 1 fi } function build_artifacts(){ make artifacts } # Import main libs and vars function f_list_dirs() { echo $tests_dirs | sed 's/ /\n/g' } function f_list_files() { echo $tests_files | sed 's/ /\n/g' } function f_run() { # Ensure podman is installed check_requirements # Build artifacts if not exist yet if ! test -f "./artifacts/receptor"; then log_warning "# Artifacts not found..." build_artifacts fi # Get container image tag container_image_tag=$(cat Makefile | grep "^CONTAINER_IMAGE_TAG_BASE=" | cut -d'=' -f 2) # Fix possible permission issue chmod +x ./artifacts/receptor # Run test command inside the container RUN_CMD=' PATH=$PATH:/artifacts && cd /source/tests && set -x && go test -v '$@' ' ${CONTAINER_RUN} run -it --rm \ -v $(pwd)/../:/source/:ro \ -v $(pwd)/artifacts:/artifacts/:rw \ -v receptor_go_root_cache:/root/go:rw \ $container_image_tag bash -c "${RUN_CMD}" } function f_run_all() { for f in ${tests_files} ; do log_info "# START Test for '${f}'" $0 run $f done } function f_help() { echo cat <<- HELP_INFO Command list: list-dirs list all available tests directories list-files list all available tests files run run a specific test run-all run all tests. Returns 0 if pass container-runtime show container runtime available requirements check all system requirements help Displays this help text HELP_INFO } # Menu if [[ $1 == "list-dirs" ]]; then shift f_list_dirs $@ exit elif [[ $1 == "list-files" ]]; then shift f_list_files $@ exit elif [[ $1 == "run" ]]; then shift f_run $@ exit elif [[ $1 == "run-all" ]]; then shift f_run_all $@ exit elif [[ $1 == "container-runtime" ]]; then echo ${CONTAINER_RUN} exit elif [[ $1 == "requirements" ]]; then check_requirements log_info "All system requirements are satisfied!" exit elif [[ $1 == "help" ]]; then shift f_help exit else log_error "[ERROR] Command not supported!" f_help exit 1 fi receptor-1.5.3/tests/utils/000077500000000000000000000000001475465740700156705ustar00rootroot00000000000000receptor-1.5.3/tests/utils/common.go000066400000000000000000000127451475465740700175200ustar00rootroot00000000000000package utils import ( "context" "crypto/rand" "crypto/rsa" "net" "os" "path/filepath" "time" "github.com/ansible/receptor/pkg/certificates" ) // GenerateCA generates a CA certificate and key. func GenerateCA(name, commonName string) (string, string, error) { dir, err := os.MkdirTemp("", "") if err != nil { return "", "", err } keyPath := filepath.Join(dir, name+".key") crtPath := filepath.Join(dir, name+".crt") // Create our certificate and private key CA, err := certificates.CreateCA(&certificates.CertOptions{CommonName: commonName, Bits: 2048}, &certificates.RsaWrapper{}) if err != nil { return "", "", err } err = certificates.SaveToPEMFile(crtPath, []interface{}{CA.Certificate}, &certificates.OsWrapper{}) if err != nil { return "", "", err } err = certificates.SaveToPEMFile(keyPath, []interface{}{CA.PrivateKey}, &certificates.OsWrapper{}) if err != nil { return "", "", err } return keyPath, crtPath, nil } // GenerateCert generates a private and public key for testing in the directory specified. func GenerateCert(name, commonName string, dnsNames, nodeIDs []string) (string, string, error) { dir, err := os.MkdirTemp("", "") if err != nil { return "", "", err } keyPath := filepath.Join(dir, name+".key") crtPath := filepath.Join(dir, name+".crt") // Create a temporary CA to sign this certificate CA, err := certificates.CreateCA(&certificates.CertOptions{CommonName: "temp ca", Bits: 2048}, &certificates.RsaWrapper{}) if err != nil { return "", "", err } // Create certificate request req, key, err := certificates.CreateCertReqWithKey(&certificates.CertOptions{ CommonName: commonName, Bits: 2048, CertNames: certificates.CertNames{ DNSNames: dnsNames, NodeIDs: nodeIDs, }, }) if err != nil { return "", "", err } // Sign the certificate cert, err := certificates.SignCertReq(req, CA, &certificates.CertOptions{}) if err != nil { return "", "", err } // Save cert and key to files err = certificates.SaveToPEMFile(crtPath, []interface{}{cert}, &certificates.OsWrapper{}) if err != nil { return "", "", err } err = certificates.SaveToPEMFile(keyPath, []interface{}{key}, &certificates.OsWrapper{}) if err != nil { return "", "", err } return keyPath, crtPath, nil } // GenerateCertWithCA generates a private and public key for testing in the directory // specified using the ca specified. func GenerateCertWithCA(name, caKeyPath, caCrtPath, commonName string, dnsNames, nodeIDs []string) (string, string, error) { dir, err := os.MkdirTemp("", "") if err != nil { return "", "", err } CA := &certificates.CA{} CA.Certificate, err = certificates.LoadCertificate(caCrtPath, &certificates.OsWrapper{}) if err != nil { return "", "", err } CA.PrivateKey, err = certificates.LoadPrivateKey(caKeyPath, &certificates.OsWrapper{}) if err != nil { return "", "", err } keyPath := filepath.Join(dir, name+".key") crtPath := filepath.Join(dir, name+".crt") // Create certificate request req, key, err := certificates.CreateCertReqWithKey(&certificates.CertOptions{ CommonName: commonName, Bits: 2048, CertNames: certificates.CertNames{ DNSNames: dnsNames, NodeIDs: nodeIDs, }, }) if err != nil { return "", "", err } // Sign the certificate cert, err := certificates.SignCertReq(req, CA, &certificates.CertOptions{}) if err != nil { return "", "", err } // Save cert and key to files err = certificates.SaveToPEMFile(crtPath, []interface{}{cert}, &certificates.OsWrapper{}) if err != nil { return "", "", err } err = certificates.SaveToPEMFile(keyPath, []interface{}{key}, &certificates.OsWrapper{}) if err != nil { return "", "", err } return keyPath, crtPath, nil } func GenerateRSAPair() (string, string, error) { dir, err := os.MkdirTemp("", "") if err != nil { return "", "", err } privateKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { return "", "", err } publicKey := &privateKey.PublicKey privateKeyPath := filepath.Join(dir, "private.pem") err = certificates.SaveToPEMFile(privateKeyPath, []interface{}{privateKey}, &certificates.OsWrapper{}) if err != nil { return "", "", err } publicKeyPath := filepath.Join(dir, "public.pem") err = certificates.SaveToPEMFile(publicKeyPath, []interface{}{publicKey}, &certificates.OsWrapper{}) if err != nil { return "", "", err } return privateKeyPath, publicKeyPath, nil } // CheckUntilTimeout Polls the check function until the context expires, in // which case it returns false. func CheckUntilTimeout(ctx context.Context, interval time.Duration, check func() bool) bool { for ready := check(); !ready; ready = check() { if ctx.Err() != nil { return false } time.Sleep(interval) } return true } // CheckUntilTimeoutWithErr does the same as CheckUntilTimeout but requires the // check function returns (bool, error), and will return an error immediately // if the check function returns an error. func CheckUntilTimeoutWithErr(ctx context.Context, interval time.Duration, check func() (bool, error)) (bool, error) { for ready, err := check(); !ready; ready, err = check() { if err != nil { return false, err } if ctx.Err() != nil { return false, nil //nolint:nilerr // Make this nice later. } time.Sleep(interval) } return true, nil } func GetFreeTCPPort() (int, error) { addr, err := net.ResolveTCPAddr("tcp", "localhost:0") if err != nil { return 0, err } l, err := net.ListenTCP("tcp", addr) if err != nil { return 0, err } defer l.Close() return l.Addr().(*net.TCPAddr).Port, nil } receptor-1.5.3/tests/utils/logs.go000066400000000000000000000011661475465740700171670ustar00rootroot00000000000000package utils import ( "bytes" "sync" ) // testLogWriter provides a threadsafe way of reading and writing logs to a buffer. type TestLogWriter struct { Lock *sync.RWMutex Buffer *bytes.Buffer } func (lw *TestLogWriter) Write(p []byte) (n int, err error) { lw.Lock.Lock() defer lw.Lock.Unlock() n, err = lw.Buffer.Write(p) if err != nil { return 0, err } return n, nil } func (lw *TestLogWriter) String() string { lw.Lock.RLock() defer lw.Lock.RUnlock() return lw.Buffer.String() } func NewTestLogWriter() *TestLogWriter { return &TestLogWriter{ Lock: &sync.RWMutex{}, Buffer: &bytes.Buffer{}, } } receptor-1.5.3/tools/000077500000000000000000000000001475465740700145265ustar00rootroot00000000000000receptor-1.5.3/tools/README.md000066400000000000000000000001301475465740700157770ustar00rootroot00000000000000# Receptor docs ## Examples - 1. [Simple receptor network](./examples/simple-network) receptor-1.5.3/tools/ansible/000077500000000000000000000000001475465740700161435ustar00rootroot00000000000000receptor-1.5.3/tools/ansible/stage.yml000066400000000000000000000031021475465740700177650ustar00rootroot00000000000000--- - hosts: localhost connection: local vars: headers: Accept: "application/vnd.github+json" Authorization: "Bearer {{ github_token }}" X-GitHub-Api-Version: "2022-11-28" tasks: # The next 2 tasks can maybe get deleted if GitHub ever fixes this: # https://github.com/orgs/community/discussions/4924 - name: Create tag object uri: url: "https://api.github.com/repos/{{ repo }}/git/tags" method: "POST" headers: "{{ headers }}" body: tag: "v{{ version }}" message: "Release for v{{ version }}" object: "{{ target_commitish }}" type: "commit" tagger: name: "{{ tagger_name }}" email: "{{ tagger_email }}" body_format: "json" status_code: - 200 - 201 register: tag - name: Create tag reference uri: url: "https://api.github.com/repos/{{ repo }}/git/refs" method: "POST" headers: "{{ headers }}" body: ref: "refs/tags/{{ tag.json.tag }}" sha: "{{ tag.json.sha }}" body_format: "json" status_code: - 200 - 201 - name: Publish draft Release uri: url: "https://api.github.com/repos/{{ repo }}/releases" method: "POST" headers: "{{ headers }}" body: name: "v{{ version }}" tag_name: "v{{ version }}" target_commitish: "{{ target_commitish }}" draft: true body_format: "json" status_code: - 200 - 201 receptor-1.5.3/tools/examples/000077500000000000000000000000001475465740700163445ustar00rootroot00000000000000receptor-1.5.3/tools/examples/simple-network/000077500000000000000000000000001475465740700213245ustar00rootroot00000000000000receptor-1.5.3/tools/examples/simple-network/.gitignore000066400000000000000000000000111475465740700233040ustar00rootroot00000000000000!build/* receptor-1.5.3/tools/examples/simple-network/README.md000066400000000000000000000017551475465740700226130ustar00rootroot00000000000000# receptor - basic example This example creates two receptor nodes, called `arceus` and `celebi`. ## Diagram The `ctl.sh` script (equivalent to `receptorctl`) sends the message to Celebi through an unix domain socket `socks/celebi.sock`, then Celebi forwards that message to Arceus through the docker-compose network. ![Receptor simple network diagram](./simple-network-diagram.drawio.png) ## Commands ```bash # Build and run docker-compose up -d # Destroy and cleanup docker-compose down rm -rf ./socks/ # Run commands on receptorctl ./ctl.sh # Examples ./ctl.sh ping celebi ./ctl.sh ping arceus ``` ## Config files Celebi socket is exposed: Full Celebi config file can be found here: [configs/celebi.yaml](configs/celebi.yaml) ```yaml ... - control-service: service: control filename: /socks/celebi.sock ``` Arceus port is exposed: Full Arceus config file can be found here: [configs/arceus.yaml](configs/arceus.yaml) ```yaml ... - tcp-listener: port: 2223 ... ``` receptor-1.5.3/tools/examples/simple-network/build/000077500000000000000000000000001475465740700224235ustar00rootroot00000000000000receptor-1.5.3/tools/examples/simple-network/build/receptor/000077500000000000000000000000001475465740700242465ustar00rootroot00000000000000receptor-1.5.3/tools/examples/simple-network/build/receptor/Dockerfile000066400000000000000000000002671475465740700262450ustar00rootroot00000000000000FROM quay.io/ansible/receptor RUN set -x \ # Set fastest repo echo 'fastestmirror=1' >> /etc/dnf/dnf.conf \ # Add debug tools && yum install -y iputils nano htop vim receptor-1.5.3/tools/examples/simple-network/build/receptorctl/000077500000000000000000000000001475465740700247515ustar00rootroot00000000000000receptor-1.5.3/tools/examples/simple-network/build/receptorctl/Dockerfile000066400000000000000000000003621475465740700267440ustar00rootroot00000000000000FROM alpine WORKDIR /opt RUN set -x \ && apk add php py3-pip git bash \ && git clone --depth 1 https://github.com/ansible/receptor.git \ && pip3 install -e ./receptor/receptorctl WORKDIR /app CMD ["php", "-S", "0.0.0.0:8080"] receptor-1.5.3/tools/examples/simple-network/configs/000077500000000000000000000000001475465740700227545ustar00rootroot00000000000000receptor-1.5.3/tools/examples/simple-network/configs/arceus.yaml000066400000000000000000000002411475465740700251170ustar00rootroot00000000000000--- - node: id: arceus - tcp-listener: port: 2223 - work-command: worktype: echo command: echo params: arceus allowruntimeparams: true receptor-1.5.3/tools/examples/simple-network/configs/celebi.yaml000066400000000000000000000002521475465740700250620ustar00rootroot00000000000000--- - log-level: level: debug - node: id: celebi - tcp-peer: address: arceus:2223 - control-service: service: control filename: /socks/celebi.sock receptor-1.5.3/tools/examples/simple-network/ctl.sh000077500000000000000000000001111475465740700224360ustar00rootroot00000000000000#!/bin/bash docker exec -it simple-network_receptorctl_1 receptorctl $@ receptor-1.5.3/tools/examples/simple-network/docker-compose.yml000066400000000000000000000011321475465740700247560ustar00rootroot00000000000000version: "3.9" services: arceus: build: ./build/receptor/ command: receptor -c /configs/arceus.yaml volumes: - ./configs/:/configs networks: - receptor-network celebi: build: ./build/receptor/ command: receptor -c /configs/celebi.yaml volumes: - ./configs/:/configs - ./socks/:/socks networks: - receptor-network receptorctl: build: ./build/receptorctl/ volumes: - ./socks/:/socks environment: RECEPTORCTL_SOCKET: /socks/celebi.sock networks: - random networks: receptor-network: {} random: {}receptor-1.5.3/tools/examples/simple-network/simple-network-diagram.drawio.png000066400000000000000000001037511475465740700277070ustar00rootroot00000000000000PNG  IHDR \sRGBtEXtmxfile%3Cmxfile%3E%3Cdiagram%20id%3D%22zlLyTtCHPA5knhtA98M-%22%20name%3D%22P%C3%A1gina-1%22%3E%3CmxGraphModel%20dx%3D%22293%22%20dy%3D%22526%22%20grid%3D%221%22%20gridSize%3D%2210%22%20guides%3D%221%22%20tooltips%3D%221%22%20connect%3D%221%22%20arrows%3D%221%22%20fold%3D%221%22%20page%3D%221%22%20pageScale%3D%221%22%20pageWidth%3D%22827%22%20pageHeight%3D%221169%22%20math%3D%220%22%20shadow%3D%220%22%3E%3Croot%3E%3CmxCell%20id%3D%220%22%2F%3E%3CmxCell%20id%3D%221%22%20parent%3D%220%22%2F%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-5%22%20value%3D%22Shared%20file%20system%26%2310%3B(with%20socket%20files)%22%20style%3D%22fillColor%3D%23EFF0F3%3BstrokeColor%3Dnone%3Bdashed%3D0%3BverticalAlign%3Dtop%3BfontStyle%3D0%3BfontColor%3D%23232F3D%3B%22%20parent%3D%221%22%20vertex%3D%221%22%3E%3CmxGeometry%20x%3D%22270%22%20y%3D%22270%22%20width%3D%22150%22%20height%3D%22240%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-4%22%20value%3D%22receptor-network%22%20style%3D%22fillColor%3Dnone%3BstrokeColor%3D%235A6C86%3Bdashed%3D1%3BverticalAlign%3Dtop%3BfontStyle%3D0%3BfontColor%3D%235A6C86%3B%22%20parent%3D%221%22%20vertex%3D%221%22%3E%3CmxGeometry%20x%3D%22260%22%20y%3D%22230%22%20width%3D%22350%22%20height%3D%22180%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-1%22%20value%3D%22arceus%22%20style%3D%22outlineConnect%3D0%3BfontColor%3D%23232F3E%3BgradientColor%3Dnone%3BfillColor%3D%23D05C17%3BstrokeColor%3Dnone%3Bdashed%3D0%3BverticalLabelPosition%3Dbottom%3BverticalAlign%3Dtop%3Balign%3Dcenter%3Bhtml%3D1%3BfontSize%3D12%3BfontStyle%3D0%3Baspect%3Dfixed%3BpointerEvents%3D1%3Bshape%3Dmxgraph.aws4.container_2%3B%22%20parent%3D%221%22%20vertex%3D%221%22%3E%3CmxGeometry%20x%3D%22520%22%20y%3D%22295%22%20width%3D%2278%22%20height%3D%2250%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-10%22%20style%3D%22edgeStyle%3DorthogonalEdgeStyle%3Brounded%3D0%3BorthogonalLoop%3D1%3BjettySize%3Dauto%3Bhtml%3D1%3B%22%20parent%3D%221%22%20source%3D%22F8LnftqW3-IVp29scHy7-2%22%20target%3D%22F8LnftqW3-IVp29scHy7-1%22%20edge%3D%221%22%3E%3CmxGeometry%20relative%3D%221%22%20as%3D%22geometry%22%3E%3CArray%20as%3D%22points%22%3E%3CmxPoint%20x%3D%22440%22%20y%3D%22345%22%2F%3E%3CmxPoint%20x%3D%22440%22%20y%3D%22315%22%2F%3E%3C%2FArray%3E%3C%2FmxGeometry%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-11%22%20value%3D%22cmd%20via%26lt%3Bbr%26gt%3Bnetwork%26lt%3Bbr%26gt%3Bport%202223%22%20style%3D%22edgeLabel%3Bhtml%3D1%3Balign%3Dcenter%3BverticalAlign%3Dmiddle%3Bresizable%3D0%3Bpoints%3D%5B%5D%3B%22%20parent%3D%22F8LnftqW3-IVp29scHy7-10%22%20vertex%3D%221%22%20connectable%3D%220%22%3E%3CmxGeometry%20x%3D%220.5743%22%20y%3D%22-2%22%20relative%3D%221%22%20as%3D%22geometry%22%3E%3CmxPoint%20x%3D%22-17%22%20y%3D%223%22%20as%3D%22offset%22%2F%3E%3C%2FmxGeometry%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-2%22%20value%3D%22celebi%22%20style%3D%22outlineConnect%3D0%3BfontColor%3D%23232F3E%3BgradientColor%3Dnone%3BfillColor%3D%23D05C17%3BstrokeColor%3Dnone%3Bdashed%3D0%3BverticalLabelPosition%3Dbottom%3BverticalAlign%3Dtop%3Balign%3Dcenter%3Bhtml%3D1%3BfontSize%3D12%3BfontStyle%3D0%3Baspect%3Dfixed%3BpointerEvents%3D1%3Bshape%3Dmxgraph.aws4.container_2%3B%22%20parent%3D%221%22%20vertex%3D%221%22%3E%3CmxGeometry%20x%3D%22320%22%20y%3D%22320%22%20width%3D%2278%22%20height%3D%2250%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-8%22%20style%3D%22edgeStyle%3DorthogonalEdgeStyle%3Brounded%3D0%3BorthogonalLoop%3D1%3BjettySize%3Dauto%3Bhtml%3D1%3B%22%20parent%3D%221%22%20source%3D%22F8LnftqW3-IVp29scHy7-3%22%20target%3D%22F8LnftqW3-IVp29scHy7-2%22%20edge%3D%221%22%3E%3CmxGeometry%20relative%3D%221%22%20as%3D%22geometry%22%3E%3CArray%20as%3D%22points%22%3E%3CmxPoint%20x%3D%22290%22%20y%3D%22440%22%2F%3E%3CmxPoint%20x%3D%22290%22%20y%3D%22345%22%2F%3E%3C%2FArray%3E%3C%2FmxGeometry%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-9%22%20value%3D%22cmd%26lt%3Bbr%26gt%3Bvia%26lt%3Bbr%26gt%3Bsocket%22%20style%3D%22edgeLabel%3Bhtml%3D1%3Balign%3Dcenter%3BverticalAlign%3Dmiddle%3Bresizable%3D0%3Bpoints%3D%5B%5D%3Bspacing%3D2%3BlabelBackgroundColor%3D%23ffffff%3BlabelBorderColor%3D%23FFFFFF%3BspacingTop%3D0%3B%22%20parent%3D%22F8LnftqW3-IVp29scHy7-8%22%20vertex%3D%221%22%20connectable%3D%220%22%3E%3CmxGeometry%20x%3D%220.2716%22%20y%3D%22-2%22%20relative%3D%221%22%20as%3D%22geometry%22%3E%3CmxPoint%20x%3D%22-2%22%20y%3D%229%22%20as%3D%22offset%22%2F%3E%3C%2FmxGeometry%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-3%22%20value%3D%22receptorctl%22%20style%3D%22outlineConnect%3D0%3BfontColor%3D%23232F3E%3BgradientColor%3Dnone%3BfillColor%3D%23D05C17%3BstrokeColor%3Dnone%3Bdashed%3D0%3BverticalLabelPosition%3Dbottom%3BverticalAlign%3Dtop%3Balign%3Dcenter%3Bhtml%3D1%3BfontSize%3D12%3BfontStyle%3D0%3Baspect%3Dfixed%3BpointerEvents%3D1%3Bshape%3Dmxgraph.aws4.container_2%3B%22%20parent%3D%221%22%20vertex%3D%221%22%3E%3CmxGeometry%20x%3D%22320%22%20y%3D%22430%22%20width%3D%2278%22%20height%3D%2250%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-12%22%20style%3D%22edgeStyle%3DorthogonalEdgeStyle%3Brounded%3D0%3BorthogonalLoop%3D1%3BjettySize%3Dauto%3Bhtml%3D1%3B%22%20parent%3D%221%22%20source%3D%22F8LnftqW3-IVp29scHy7-7%22%20target%3D%22F8LnftqW3-IVp29scHy7-3%22%20edge%3D%221%22%3E%3CmxGeometry%20relative%3D%221%22%20as%3D%22geometry%22%3E%3CArray%20as%3D%22points%22%3E%3CmxPoint%20x%3D%22239%22%20y%3D%22460%22%2F%3E%3CmxPoint%20x%3D%22239%22%20y%3D%22460%22%2F%3E%3C%2FArray%3E%3C%2FmxGeometry%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-7%22%20value%3D%22ctl.sh%22%20style%3D%22aspect%3Dfixed%3BpointerEvents%3D1%3Bshadow%3D0%3Bdashed%3D0%3Bhtml%3D1%3BstrokeColor%3Dnone%3BlabelPosition%3Dcenter%3BverticalLabelPosition%3Dbottom%3BverticalAlign%3Dtop%3Balign%3Dcenter%3BfillColor%3D%2300188D%3Bshape%3Dmxgraph.azure.script_file%22%20parent%3D%221%22%20vertex%3D%221%22%3E%3CmxGeometry%20x%3D%22191%22%20y%3D%22430%22%20width%3D%2247%22%20height%3D%2250%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-14%22%20style%3D%22edgeStyle%3DorthogonalEdgeStyle%3Brounded%3D0%3BorthogonalLoop%3D1%3BjettySize%3Dauto%3Bhtml%3D1%3B%22%20parent%3D%221%22%20source%3D%22F8LnftqW3-IVp29scHy7-13%22%20target%3D%22F8LnftqW3-IVp29scHy7-7%22%20edge%3D%221%22%3E%3CmxGeometry%20relative%3D%221%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3CmxCell%20id%3D%22F8LnftqW3-IVp29scHy7-13%22%20value%3D%22user%22%20style%3D%22pointerEvents%3D1%3Bshadow%3D0%3Bdashed%3D0%3Bhtml%3D1%3BstrokeColor%3Dnone%3BlabelPosition%3Dcenter%3BverticalLabelPosition%3Dbottom%3BverticalAlign%3Dtop%3Balign%3Dcenter%3BfillColor%3D%23505050%3Bshape%3Dmxgraph.mscae.intune.user_group%3BlabelBackgroundColor%3D%23ffffff%3B%22%20parent%3D%221%22%20vertex%3D%221%22%3E%3CmxGeometry%20x%3D%2290%22%20y%3D%22436.5%22%20width%3D%2250%22%20height%3D%2237%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3C%2Froot%3E%3C%2FmxGraphModel%3E%3C%2Fdiagram%3E%3C%2Fmxfile%3E4 IDATx^]xU~o:{L*H -*(Dv PVe hYҔ,WTd).@e6snI|I}˗;}m7!#dT2*&zx)1ѣTIQ_DD̢s[[k_fnvrjnvd'mW:c/9mٗf8۹_{l9>WL9oZH`K0#0F@$8g_#&˘H>ɓ0#`G6`f#0II{ަoyҳČ#xtX& 7`Fp:D$8h^`F4$x|Z^ u14yF`\G7f^DqQyF`AA & nХKT;$ąG]w5(N,~\9Z1- DGr^{`=ptWWkZYRJZɧcck;b6-Qo& ޢO#;I!0 Bo˭}ɍB~9DE8\Ǿ9` 4ON|@YL}_켑|M s"`pJ}_e%Z:^$e1sGmN @Od`,.?pxKDPv$ #a3a\963)D>bL!ͥ?SMS!DAD JBR|ji ~GDo 7J' $>] ƀ@3be) {\Z}'`==}nV|$$?~b$'Cc\1%%Q8IR/7K:c|RDq'>xqm;,0IRyu-$A<|^I1(&zdQ@>$a> _%)M ~ytdcSFBI!C"!&:=ow`!cD$D[Ɏ1咀loHcncRbubų.X&' ~f3%`〉5꓌#~fQHŖϋڡWDlLtd@x?f%WLtdi D?|d?X<$Z VP,ɜ;|.< ;(Q-% 饪^&2Nm1 (ݷ0&It]qb$Hah/A1ёA9#& Xi6Od|NZ$xSȥ!`°"&:@ qT 5% F.E |ȉQ L!e+!a?JV)ea;kP; (`% TSx %m0Iv ۶L$@ fȇ|^D%B`yd8\GI)%ˤ/0_i(*NI0IX~_w- BTK$ĺ{^ΔO`k6**gHH.wze1ёE G+v], & 8#o  !C1l~VZ>>)MbmLζi{L|A^Gn%j$!䗕dKCIS$T"@Df!Cf~ W`Z+%n S|ttb R>b8,p~S&׀7$0tkȘ‰ ^Ԛ$d],IB“I# |BFG{Ab`MfqYn1U>: 1Fx Fk7xXxFosRʍ7y $:vF=vH.Bb#{)/~Rq0=(aHNjfFGR N\ )1$șYgHBvr7H [t}or4$z_:1_T81&|Fxɣm/cdx DGl;.y`/AI咓eɇ.fj1yQy=⵾S|ˣJNԏHBR@b)C_|,>\{L/p(4^\dNC;grY֧\ /s!#r_{x LE>jv&-$&Qܪ ^\pt.B؆;.چ;]FOJ h(2&!sHF`EOנ$Gd]!$AW`aCl0#J8hZ#0!,`Fp%L\6  <`E݋?0;.=nLP 0^/SnU/F'Rt7e }jbF`܈;.|^`F3L-|G`W" Drv\t@0@`E}?W**/ #Ss4?00S'J=`;ӱi͒> VyںS-XZ]еcL&?}'9 .Qߴ(*Zvh1>]J9Aa=eFVCbEs% իݓjpC & K[\eIG&gP.8ȮXhȟp'1)]4/]*U(A7NFRӤ#8u:A䔙,/u#L ̘)W^SD2RdJ$TXA9i1IНJX g#@ o7{3& ތ-{vĮV2-ݫv\QO0Ip(oqhT}0kU-o,;$ l$I2I$m ȓ9.fk-}'.߄zeI_(P%n2I#OW6 $j94xG9iQ^Jd]#߸&-ۦAILJ[g#|;$vZ쒹ϙ$8VT0IпXB =**70u@u&O.թg@/[*+m\m$aJmaKoْYz3NRef|7*p[*8ɛIA]$j'Pv/R[x\.9Yrں;DMW .oL\yN /l0ЕP Y{xmIm8q/] `kWx7jJj1"˰vG3p_h! #Y$P U^YqS!Լ^ QVj _b="#c/@i$P1[!fQ4*/qe(]$:|"THI!iH`a smlÉ{[A|O/_|-/p@W;v}="Av$!֊#ǿZ ڶQWDV&:)kHDe*b,gKAUT$Fr:٦ +̆/V$&GEj._jL^*$NA 1SWBy0RbP v=<`,#g@xuO *+=vDI=(GUa(2[#CrArv|}J%M7@6/k7bǮ9-z [+L/>TYrUk!rC"~CIBNU'o߹={1u~ U0؉JD'I!$_)dt%9zJ&q(Lt9KE)#"F@|v T $Y["fnTH9zH)-2Eеq#Y4z m^B֯70kGs'CLu\4VM4ȶ:d2̋m3U}fIȩ=?"V Bx֠de4& n=E3Z$dc$tuYmpbA#ѬICEY^c'MWw>J$2`VxL%zd2&ˀSgk ]PTΙI܆/0iZ"ʖ).O@Ą"T1"aT$ERh_۬Th&YDqfLAw& "Llq0 aͽ<w*ڈOԺ*PSJTB9UM( TtZH~ hK˩:#Y2'OQY+K,*"NipKs1ITCpAO& & Ľn0%})n!@~Ms_i/" D"?pI!he9ew z,EM\.;D/ [Yԉ9~ILX21_r4:1du_~RR !KVXP',' v):.˒@`8y /^VܴCYURMjL^DF#,YkYp!\u`I1(^Sd]vFkDz=AKOggؼQ'wӱ̽~0Yoم蹔J.]Ԭ\aIAkcl @Re SAo6|4vܹipQV>@=_Gϣo߾L֒i7'wL܃ׯf@ցEի(Rٳgq 6 G\;yx Hڌ3weIh߾"4'O?HW_}'N( IDATkYzo`ҤIn! 9.d)?.;doB _T Y;l~R&8)ٰ@mϹ,M8qL =-OH2U]_ N3$8!Zs|$`yZp3I8|0>ܡy&._^dߴiS*I9Y,$!88XY,·GČ7oСC1rHݺuHIXr%eAxiTge{-`F:1 G:#aS m;(CZlY Iq'~7 nLC$+NVWwIڵ+ʗ/[Yڴi f$Piq%k]oXHGWW\[ФI4om$َ΅.50]ȉ$\=^}0Ip2qUD"dJH/eˢZjǮ]':u%, d r>GyD]'dI ǹs/_>Շ,SO=Tڰw^T^=,9.v@sYauClY/$ = EVwYIp̒"[6w |yI n߾ʔ)c (UJS;~JP"&rv,V 3HeRd]J-gqudI< }#֏J idI$= #Yp6I\,U*-, ]7pt'v"$Nm0I  ,·}:@"PJƗVɔ$pt;[k7T{]$^[4vs;i]_%vڔSGC kd2H)W.ѿy9"D;Hgv3}@rt.BVE& .R-=I0Q=HW(gA 9ȒLKBv κn::IHJ+& .Ke@9(x)u$7vq@"9'IB$hs8[ؒtUHt8S-R6$PtC+t $it_GgU-~ox!:SfpE*EK.U vލt-[ĬYeU @3JLŞ~GU+R(#= j>۷O%M2dkx衇駟BO 4P$2@ ywLM#KIldeUّ?UĶm)HMO)ӧnz qƊhd\Aؒs c~"UTdmI$!\v\s_@I(IC$hFSyu n`ѯOll$ٚLIB:xݠ*ݐ6.9mi}nـJ7~JHNxWM:P.IBo^KR)XYVIk;tAP著R.[}t4i=wq?>G2D7d$ g?-g٪(?vKX\\1&y/]ʼ+Ac89i<ڿOIS!,U  i2=RrPcKnԙ*+H&> Z7QNL 1([ nGQ0 YSwYz/g ٳ޲ 3)-*J|2FJ ~xĀOh$8ߒPvA<ᬿbR{ZKu@?ߒs׍0IH lIк8?#z#P\:F^d ȭeT: ժUSɕJ.6 ̄~H!r&PmE"+HƑ#GTBSJz dI&|NBVE& i \B6}es44oځ;w|U!)YREN,3T޻wMŘ߬,J&?ʼH}>3E?~VUYrzNYJSH`K1& I TJҎ;^,K@>Q&4z^r,7kLU߿HJ ]Z5W\Mnxwӕlx#gMJ-g[- /g& HOUv$_n4TС֯_ ɟUnl߾}߯"EСC$dl[nų>9E=?CU@1zh$游Sf)j|eg3n@xGL8NKyV,/|b}{2.CP?)r1G҄>IU& iZ%!Y$!*|ݐ3IhҤ N>J:ӷyfK1& AN8˧҃3Is;v$tZIJysq$Y$Ph5& ј$M$G|q!H3/ܢhj%ێEo8|*'je̓pI3ޒpuIN DxRcm9FR> 'KOI͟KMrqW'!_ rF 7E/& ||[<$S^O 8Jdij4& iX0I_s7$<;ڛLk7{ IY Æ l%ݧo:$I7~jK9Z+!Kڶ_v\ 'Qx I P< N8͟?*Y >}$d}<.v\S:뾿cqoXH<<F0I ' #&5>7a& ^j]m<ȁѿ4hpim$A*|!(s<+ywh_:*J\zۡYo$\vpbEp5?΃*W%W> KB6%.^X)'eG78USs$,ÆdI r@Kdc#䅎~`,dy^MpWt, âX@c$A_ n v> " 1S-Qe$A߿i,#ޯcܡTs3.B> pL*$lnкv, lKe}on((x@ϟ3:@tVlu\d(WtO< 6,]} K Lvxp*JI8޿&99*0yph}g<& *4IX SNm/]IXĽY!J][pP >I0]>F7HVOE@ 4tݐ%- N`Ag^ClI9Rnw4=ZhBX~lIp<#|i;I[Ho?F۶~J^B,C@L/ X@eX~b̑_RI_O5~߹޺JG|d0BB:{U51 ZrY{s?ًc$8!2!Ogz s;!L5-|  ~Eҗ¦52$LB_˴<@j+'4 % # 0UK =qLEVI/6X,;Wi[TT#LP_GG|d7ѻf"G[=!9,ȑ;_r8v!:.fҚwpuo~<kqa۩ˍ܀ 5t/2yNP~o G$?;.+]@:ḸZ ݨ%_ ѴRINEzd߷?Ը r-NsFFUM=ټ&aүSFg} B<I8H?)g?`˹.B& t i2=>H=<~[=W{6i"aIW@Ek`C{F= >g [q[)媌H*$ZH㙐2YiT,vd- "s [?uIh lIr\G=C}7&cuzH+XLfGrk۷'רtd4K)\c0!"]y:8iYh0Y-߹j통 ~/DQqϞ=iu]0np9传*-sbp.+\r.\q.4 9@*M3Bt;o6|R|* 敋a;]7XHBZ>/~;g ̀DPS$K>?v`B C% KH!HDd礐zP^fl^{F0]e8)C`}"5$!{6 )R_(ҸS&$7ݚh0,řo^KR)XYVIk;tzT I< QWro#@VA JJI0KSMKACʅw70.~{zH#]aDHyS=teHGJT uR"G$Gvt$v^4oGL=r|nEBjZ[rW>g_ѴY'KM9Vݴݟ3 QG `c{=ma:ĪI2.Y}f?rz {J+ 88:` lFVEw|˖-Cz$جU;2IpC v\0IHJsL\2/0Ii{mnǒ Ą 0c TT CE׮]/ZjX~=ʔ)={b8{,&N.]ʕ+ѨQ#|/ؒ#$ŀr>G7}g_~عs'Ο?Mرcx'PbE̟?ÇG||<{ߊH$$$(@æM0b|κn :SΓ~<HC4x!"ܹs&MRH(_<}Q?2AXf N: *qDEEUVHLLD|4% ׶k&z%J]g$8Wq<`CH!`ktCV˓@5kTVjDJ(ubɒ%_>fΜ, ,HGLᩧlFűc, w3!k㥮$ W_}^xAU|0`" ?:–-$y" }DҥJ0IЗ>X $ Z!`ktC^Iپ}{Ԯ][e]'O?J]-[pm6mQo$d2aђX ŋū-Vj/7Fܼu -?:Ȓ`! .^bϾzU5$\rS(Z0ޛ5O?zS{*lZ(Q cǬe SIY :u+?A H)|1vه>OPW7bއb5L@$x|Qd[$8|;wS$ٯ_?ea @Ucccxb˹Do ZYdc?q2ݳ 墰 IDATpB1nJ=#:<֣{lIpF@/pt^4rh$!7QΜ9E`q*Dd"7нxhj}!8 ׭S^ .1t 2.GTǎ\PY|2o" ȿ͛ʖ  v\ԕ:Xmp6Iȫz# 1شeN[ѮK]6~ lsW`yȗ/P.X, M>fc@nx^ Ucel޲ ?ѥC[& y=<<p L;/7lnO{e_A dQ+ko1/Djޏ":OW[w"r{];S yK}FMPa'6p/F@pt>R؄Ll3sQxq>#G g+@]nݾSPt)hq%ux$usonpxyw!B׵ [I_7+w& " & ^Poݎ郇~oM0/ZHQ*U zIi) & ||n9†yлwoEwnAAAoTG& 6ANLI㢓i݉@v$)N4 OFDD*ܔ?~l޼yV?C72d_:JK &d-_0p@=z-[ĬY0eUZjؽ{7^Q$8 8$x|Y`[k7dE ʯ7Qb$*DT\r ʕ+Avݺu#+_F K9s&֬Yzȑ#Uz*O?aÆ,D<8ej|'q _VIw`'`'`ݝ긘%^dEx7ϫQQQLuꔲ.ٳgqAuw^uA:tH9q" |SL4$qѓò:J(;"lϔJyӦM*C=+d'N 7 &&FFeEFF?PWdxx"Fo&o:LI;``X}8 IxG?Wix L>][TeKKPNuA>T 9]ՠM6׀j:uY ~'3gA|!h ;.h2M`I< n$m6"u]#!#y._<p59bBOʖ--Z(u%K?x7N]]P#PC@`JnUwq1H"S$ЋJ*Svcǎ)b@veH"停f5 [rP$[?,`PyJg!HtCbS;KƜe5\I`TG7xXJI5L{*詚1$]& z"$h^1npd%⥫Lc& ޠEރ]pt]pqg@ +EG$/Q.]<` Z=؅;.w $x>UmWg2! DQ1#3f_4^G{;y<u+n`Kˠؒ[ݦ"$W!$ԩ0IЏ.X"$pbFI$yL xLJ %q)L\ 7/l$8aI*$πW!$ԩ0IЏ.X"$pbFI$yL xLJ %q)L\ 7/l$8aI*$πW!$ԩ0IЏ.X"$pbFI$yL xLJ %q)L\ 7/l$8aI*$πW!$ԩ0IЏ.X"$pbFI$yL xLJ %q)L\ 7/l$8aI*$πW!$ԩ0IЏ.X"$pbFI$yL< 4LG P-" Qh&Nm\ԛHB?p7fn!!}IspYu(xTʭEud-ۇBzYr{;^>& c3zL;4ԗb3 n/Ҥpmb\\^a֭\%_"M:k/1=& zUSb.!az[HխIzX2xe>U{$h)0IУ*O)D `).&ײXs5 ibYr][IBwvxpɗN4c_;_g#$er0ƹt)?= fˑ0CkwM3 W?%@Z$0IZ[<#`?I(h,I2Ùȃs5mݜN@Lr %83E+7;$Z<6 ΅k€{X, `;ܓ ZBCC!F9F1w{h4e$!h^#1 C(& y.\7doI8qoU:!*b}X3U09"` I%=0i<39CMgL@o2PMr{@eK']J-g[- p%44tbm4SQ6Γ y8ÜO~(#83$믔 $Vl`&Y$!*|`בΌ[$hѢh|~p_nR ׮]K/L-,,21V\\﹭;JB°YbQ~ziEJ+8=d@ji0W0ವ348S$в/r9)w,d%!)3,,9_ڻ )vEXػ$Ԋ "S %uno,e% M RnEkq(*&L <_7\ΝI>iE[m@d۱(Ҙc-\\MZ)Ly0.iW81y4# l& aZjUϯQ!Dr`!..n-R yј9ܖg"v I(7H0gd <=  wR! q[ NrSnz/H f& 9g[npuC*$쓠)!`7IpT$mkhtk 崧6zd^4}DBdCr }L# de )$ܗIBT|`ᎌdI^~f9BDٔ)JJBC&קK(jJSj4kkOHoRȂ$`-`)'! Ak]$ fdXvF  [A 9 f [[}Ɩ & #pL$M6ä 7c";U[n]ϯ/wʢ*<ֺ/X" `, e{)rx<KZ,.x3* >#hf+@.Xd07qvH ȪM!RR43n_yi}iM $hp'$pZf O t$!,,jve&*QJlm0H5w[8,X%ܕƍ)_TJY>RYn]YXr*XQ^\v4r3(%]$w{7SGID"aȔ\A*83Cؿa62Ttƙ!i^n- žζ$XdH#UD6n J^F3ck|8MvAYr3:BbDb ;N=+@ABT9 U0ӹ r$!W͟Cҙ꠩lIHC < F@I}{O@B|GWR@/^}ӑ& irtCa4B@0z x`INn0yBPr_uVI??jBZSdEN!̻nI T"=9$>Q&Va!0}DNmvT},^yAwq'i`$$xf Y-ƖUĉU󧒅" vRL'nF}P*ʏݒikbqqEJj¿te3[lT[{ ٔw\\ʼFi~8ڵksșO~ a6wa) }064ſ>,O@D !#I./B^M\ȁѿ4hpI¹p@Z̹Ʈ5;L4',,$a:8(z x$!RI)'{ZTCJ,JKNEN>RЌw 0J}ScZ2%zχ4϶eAARlI܇yǏ$dIFZ,"rtb`#P=v`sZ4>,,,criF1k fS'H]+W @1ғ\1$D$ -2%hFPH1I,,ÆdJI r@KQ$%Ywa`'`[_`kn4m6mڬR]fsue]UBhN^He}`(3S 9GxzJ'vzZ,r`l> vIqwoA@m۶XRRRvP1=..n ;n>;&qCXåC!  -P-ўͪ:$[l/6f?| m$ؔ*Dmiʚ[ZX;݇[ΉvOa5y*T{,})t SQ(LWDŽj41=Ѻu!8h4.lѢE|ej!5-P4, Nٌ\0BȤfumkݺ'GؕF1W YX)E CD A HLN+jHPO ϯ~6y@ât:V:l = ,7xCuiSRu|O\pӣ󝽤e8eȗ !<{*X3e&W|Ų|_zP׫0Qlj{.]z}4PXʿ+o=Mp۪9w9_va%QSѰkgcR_\ * > jNۼy ^I4LYEI\Jw W^ShgO3ݲZ4ҝF7,;kKi.JH^kT kK&Q@ ˊ!-v}I`@'N0M4'ТE/mcBqJHiW ;KQ@ R2ٹ ^vr |KF-)~[ $xKy+ݥU( `Xe=Hս tw$0%Z1<& g߸BJ&8a $'7< &pلMp}Hk*}B)<sm f*H8YaCnT§n^[篺?\r neò2N 8FNQc $(1?J>3`B IDATDpN4Y*]ŪpV] PJP Kw) .eŮpe,`Ț=Hpbߠ $40c $8Ժ 2DL@IHz_ ^4-%KXO:ŬYK,T>c͛GQQ]veٲetЁs1g^z%7n̴iӈٓ'޽{(Ү* ໱3 8>TYb.--ӧO_ 2 g ̟?rضmBJJ 鍁m $kd//@nvѧO=РAOnݨ[.NѣGl Vb̘1o oW ch$2,p$$''sY=o2b=J~;-[֣ÇvZ {[oHpܯBY o* >.qV wb%T TW%66VCP/.]g z6!44tÇم6mhHOر@kB KfEty<&P|wt䒆\Ǘvʈ@SIeUaHɱfeeoUbbbMn:Ǒ#G4nݚ˗S\\Ltt4gHp&Xgok-\ALMJtVq˯xr4vʈ@SIeUaH8t萵w|״jՊmҹsg=sh֬<3L0ݻwkpPuTQyyyj@C¾}.IՀ @C+@;PHW@ABW Bg^xج3 ,-73 {d\!1:~(D9pK2s_ѳ;Ǎ-8qs'][&OKcM&+ {zte .ekF3*>Ukֳ~ˤ ԫLUՃλӎI13g.v= $xC$Mv 8 EBII -Z/zrI :u"0P.ǁhԨ@Bh DY@=@ժ4~W3 '9<#`Z9 f7h#ㇲW5 L~x,gΞ/xΝ:3|,=dн]5m2  iP۹k_b+Yj6n+MH֭x*s1C~w9}=SGYGÏԆYOv|13$!18v$w kֿ̦5]&uHKϨ&I|ԯWOWE%8maѲU_}3SP܉HLo!ҿ_bD6Ҵɍ)9 FfSaH0Po#-@T2fnn~,-K  nptT$1zmdg;~)=v:uPz8LgHַ-7䑔ټשa}Ef,[F/~ы3꛿zDEf܆ͯ;3WTg0i۷3 A_6Lǚg.|?1 YjNX(eG PԌ0V'Pw 50_sM_MHǎ0> OxݟKWN v|S~R^.m^J`@fвy8ϜE4mLHOY&$&Ϡ_^ueK8@#\dƠx $~\{%L6v;k5snA`6Xo;H5a2rÀ}{t΅GSrڴETDrxY:g$~ԦN{9BƎwBȯޱY 1nFTە>΍5$g~Ի&P!}&79 F~@XmiΧv]RgmSLsۖGVĬPSTBoJhp]}"/gΜee4T7mrׇ,4$]Mo3E%"Mk֤ڮ'z?Q9{{rNaB!xV-Y \ jki@iCo $}Y\PuAppޮh"`Л f~? Q#`HV@ ÂKsU@ ֺ@iCo $}Y,F@Q"!~xX .͹WkZ ;.`gH; Fa<,4^ܫi $6f@G_ j$%⇇HҜ{Hp.`Л f~? Q#`HV@ ÂKsUĩVZd}bc?y3WWkj6*չWjϑz7*~`!3)?h)~IENDB`receptor-1.5.3/tools/examples/simple-network/socks/000077500000000000000000000000001475465740700224465ustar00rootroot00000000000000receptor-1.5.3/tools/examples/simple-network/socks/.gitignore000066400000000000000000000000161475465740700244330ustar00rootroot00000000000000* !.gitignore