pax_global_header 0000666 0000000 0000000 00000000064 14754657407 0014535 g ustar 00root root 0000000 0000000 52 comment=122aa3e74aae8929741a7f6c7629237d8b2bf66b
receptor-1.5.3/ 0000775 0000000 0000000 00000000000 14754657407 0013366 5 ustar 00root root 0000000 0000000 receptor-1.5.3/.dockerignore 0000664 0000000 0000000 00000000363 14754657407 0016044 0 ustar 00root root 0000000 0000000 .idea
receptor
receptor.exe
receptor.app
recepcert
net
receptorctl-test-venv/
.container-flag*
.VERSION
kubectl
/receptorctl/AUTHORS
/receptorctl/ChangeLog
/receptor-python-worker/ChangeLog
/receptor-python-worker/AUTHORS
.vagrant/
Dockerfile
receptor-1.5.3/.github/ 0000775 0000000 0000000 00000000000 14754657407 0014726 5 ustar 00root root 0000000 0000000 receptor-1.5.3/.github/codecov.yml 0000664 0000000 0000000 00000002151 14754657407 0017072 0 ustar 00root root 0000000 0000000 ---
coverage:
status:
project:
default:
target: auto
threshold: 5%
patch:
default:
target: auto
threshold: 5%
comment:
layout: "header, diff, files, components" # PR comment layout
behavior: default
require_changes: false
require_base: false
require_head: true
after_n_builds: 1
codecov:
branch: devel
notify:
wait_for_ci: false
after_n_builds: 1
component_management:
default_rules: # default rules that will be inherited by all components
statuses: # Status definitions
- type: project # Default status = project
target: auto
threshold: 5%
branches:
- "!devel"
individual_components: # Actual component breakdown
- component_id: go # Required unique identifier for component (Should not be changed)
name: Go # Display name (can be changed)
paths:
- cmd/**
- internal/**
- pkg/**
- component_id: receptorctl
name: Receptorctl
paths:
- receptorctl/**
ignore:
- "**/mock_*"
- "**/*_test.go"
- "receptorctl/tests"
- "tests"
receptor-1.5.3/.github/dependabot.yml 0000664 0000000 0000000 00000001501 14754657407 0017553 0 ustar 00root root 0000000 0000000 ---
version: 2
updates:
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "daily"
labels:
- "dependencies"
- "go"
ignore:
- dependency-name: "golang"
versions: ["1.23", "1.24"]
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
labels:
- "dependencies"
- "github-actions"
- package-ecosystem: "pip"
directory: "/receptor-python-worker"
groups:
dependencies:
patterns:
- "*"
schedule:
interval: "daily"
labels:
- "dependencies"
- "pip"
- package-ecosystem: "pip"
directory: "/receptorctl"
groups:
dependencies:
patterns:
- "*"
schedule:
interval: "daily"
labels:
- "dependencies"
- "pip"
receptor-1.5.3/.github/issue_labeler.yml 0000664 0000000 0000000 00000000037 14754657407 0020267 0 ustar 00root root 0000000 0000000 ---
needs_triage:
- '.*'
...
receptor-1.5.3/.github/workflows/ 0000775 0000000 0000000 00000000000 14754657407 0016763 5 ustar 00root root 0000000 0000000 receptor-1.5.3/.github/workflows/artifact-k8s-logs.sh 0000775 0000000 0000000 00000000507 14754657407 0022566 0 ustar 00root root 0000000 0000000 #!/bin/bash
PODS_DIR=/tmp/receptor-testing/K8sPods
mkdir "$PODS_DIR"
PODS="$(kubectl get pods --template '{{range.items}}{{.metadata.name}}{{"\n"}}{{end}}')"
for pod in $PODS ; do
mkdir "$PODS_DIR/$pod"
kubectl get pod "$pod" --output=json > "$PODS_DIR/$pod/pod"
kubectl logs "$pod" > "$PODS_DIR/$pod/logs"
done
receptor-1.5.3/.github/workflows/build_binary_from_ref.yml 0000664 0000000 0000000 00000002316 14754657407 0024032 0 ustar 00root root 0000000 0000000 ---
name: "Build binary from arbitratry repo / ref"
on: # yamllint disable-line rule:truthy
workflow_dispatch:
inputs:
repository:
description: 'The receptor repository to build from.'
required: true
default: 'ansible/receptor'
ref:
description: 'The ref to build. Can be a branch or any other valid ref.'
required: true
default: 'devel'
jobs:
build:
name: build
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
repository: ${{ github.event.inputs.repository }}
ref: ${{ github.event.inputs.ref }}
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: build-all target
run: make receptor
- name: Upload binary
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
run: |
pip install boto3
ansible -i localhost, -c local all -m aws_s3 \
-a "bucket=receptor-nightlies object=refs/${{ github.event.inputs.ref }}/receptor src=./receptor mode=put"
receptor-1.5.3/.github/workflows/codeql-analysis.yml 0000664 0000000 0000000 00000005367 14754657407 0022611 0 ustar 00root root 0000000 0000000 ---
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on: # yamllint disable-line rule:truthy
push:
branches: ["devel", release_*]
pull_request:
# The branches below must be a subset of the branches above
branches: ["devel"]
schedule:
- cron: '18 2 * * 5'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
security-events: write
strategy:
fail-fast: false
matrix:
language: ['go', 'python']
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
# Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support
steps:
- name: Checkout repository
uses: actions/checkout@v4
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v3
# Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
# If the Autobuild fails above, remove it and uncomment the following three lines.
# modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance.
# - run: |
# echo "Run, Build Application using script"
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
receptor-1.5.3/.github/workflows/coverage_reporting.yml 0000664 0000000 0000000 00000003502 14754657407 0023372 0 ustar 00root root 0000000 0000000 ---
name: Codecov
on: # yamllint disable-line rule:truthy
pull_request: # yamllint disable-line rule:empty-values
push:
branches: [devel]
env:
DESIRED_GO_VERSION: '1.22'
jobs:
go_test_coverage:
name: go test coverage
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.DESIRED_GO_VERSION }}
- name: build and install receptor
run: |
make build-all
sudo cp ./receptor /usr/local/bin/receptor
- name: Download kind binary
run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 && chmod +x ./kind
- name: Create k8s cluster
run: ./kind create cluster
- name: Interact with the cluster
run: kubectl get nodes
- name: Run receptor tests with coverage
run: make coverage
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
fail_ci_if_error: true
verbose: true
env:
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- name: get k8s logs
if: ${{ failure() }}
run: .github/workflows/artifact-k8s-logs.sh
- name: remove sockets before archiving logs
if: ${{ failure() }}
run: find /tmp/receptor-testing -name controlsock -delete
- name: Artifact receptor data
uses: actions/upload-artifact@v4.4.3
if: ${{ failure() }}
with:
name: test-logs
path: /tmp/receptor-testing
- name: Archive receptor binary
uses: actions/upload-artifact@v4.4.3
with:
name: receptor
path: /usr/local/bin/receptor
receptor-1.5.3/.github/workflows/dependency_review.yml 0000664 0000000 0000000 00000000526 14754657407 0023210 0 ustar 00root root 0000000 0000000 ---
name: 'Dependency Review'
on: [pull_request] # yamllint disable-line rule:truthy
permissions:
contents: read
jobs:
dependency-review:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4
...
receptor-1.5.3/.github/workflows/devel_image.yml 0000664 0000000 0000000 00000003041 14754657407 0021745 0 ustar 00root root 0000000 0000000 ---
name: Publish devel image
on: # yamllint disable-line rule:truthy
push:
branches: [devel]
jobs:
release:
runs-on: ubuntu-latest
name: Push devel image
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
# setup qemu and buildx
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Install build dependencies
run: |
pip install build
# we will first build the image for x86 and load it on the host for testing
- name: Build Image
run: |
export CONTAINERCMD="docker buildx"
export EXTRA_OPTS="--platform linux/amd64 --load"
make container REPO=quay.io/${{ github.repository }} TAG=devel
- name: Test Image
run: docker run --rm quay.io/${{ github.repository }}:devel receptor --version
- name: Login To Quay
uses: docker/login-action@v3
with:
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_TOKEN }}
registry: quay.io/${{ github.repository }}
# Since x86 image is built in previous step
# buildx will use cached image, hence overall time will not be affected
- name: Build Multiarch Image & Push To Quay
run: |
export CONTAINERCMD="docker buildx"
export EXTRA_OPTS="--platform linux/amd64,linux/ppc64le,linux/arm64 --push"
make container REPO=quay.io/${{ github.repository }} TAG=devel
receptor-1.5.3/.github/workflows/devel_whl.yml 0000664 0000000 0000000 00000001514 14754657407 0021460 0 ustar 00root root 0000000 0000000 ---
name: Publish nightly wheel
on: # yamllint disable-line rule:truthy
push:
branches: [devel]
jobs:
sdist:
runs-on: ubuntu-latest
name: Build wheel
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Install build dependencies
run: |
pip install build
- name: Build wheel
run: |
make clean receptorctl_wheel
- name: Upload wheel
env:
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
run: |
pip install boto3
ansible -i localhost, -c local all -m aws_s3 \
-a "bucket=receptor-nightlies object=receptorctl/receptorctl-0.0.0-py3-none-any.whl src=$(ls receptorctl/dist/*.whl | head -n 1) mode=put"
receptor-1.5.3/.github/workflows/promote.yml 0000664 0000000 0000000 00000006426 14754657407 0021203 0 ustar 00root root 0000000 0000000 ---
name: Promote Release
on: # yamllint disable-line rule:truthy
release:
types: [published]
permissions:
contents: write
jobs:
promote:
runs-on: ubuntu-latest
env:
TAG: ${{github.event.release.tag_name}}
steps:
- name: Checkout Receptor
uses: actions/checkout@v4
- name: Log in to GHCR
run: |
echo ${{ secrets.GITHUB_TOKEN }} | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Log in to Quay
run: |
echo ${{ secrets.QUAY_TOKEN }} | docker login quay.io -u ${{ secrets.QUAY_USER }} --password-stdin
- name: Copy Image to Quay
uses: akhilerm/tag-push-action@v2.2.0
with:
src: ghcr.io/${{ github.repository }}:${{env.TAG}}
dst: |
quay.io/${{ github.repository }}:${{env.TAG}}
quay.io/${{ github.repository }}:latest
- name: Check if floating tag is needed
run: |
if [[ $TAG == *"dev"* ]];
then
echo "FLOATING_TAG=$(echo $TAG | sed 's/[0-9]\+$//')" >> $GITHUB_ENV
else
echo "FLOATING_TAG=$TAG" >> $GITHUB_ENV
fi
- name: Push floating tag to Quay
uses: akhilerm/tag-push-action@v2.2.0
with:
src: ghcr.io/${{ github.repository }}:${{env.TAG}}
dst: quay.io/${{ github.repository }}:${{env.FLOATING_TAG}}
- name: Install python
uses: actions/setup-python@v5
- name: Install dependencies
run: |
python3 -m pip install twine build
- name: Set official pypi info
run: echo pypi_repo=pypi >> $GITHUB_ENV
if: ${{ github.repository_owner == 'ansible' }}
- name: Set unofficial pypi info
run: echo pypi_repo=testpypi >> $GITHUB_ENV
if: ${{ github.repository_owner != 'ansible' }}
- name: Set receptor pypi version
run: echo RECEPTORCTL_PYPI_VERSION=$(curl --silent https://pypi.org/pypi/receptorctl/json | jq --raw-output '"v" + .info.version') >> $GITHUB_ENV
- name: Build receptorctl and upload to pypi
if: ${{ env.RECEPTORCTL_PYPI_VERSION != env.TAG }}
run: |
make receptorctl_wheel receptorctl_sdist VERSION=$TAG
twine upload \
-r ${{ env.pypi_repo }} \
-u ${{ secrets.PYPI_USERNAME }} \
-p ${{ secrets.PYPI_PASSWORD }} \
receptorctl/dist/*
publish:
runs-on: ubuntu-latest
env:
VERSION: ${{github.event.release.tag_name}}
steps:
- name: Checkout Receptor
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: "1.22"
- name: Build packages
run: |
make build-package GOOS=linux GOARCH=amd64 BINNAME=receptor
make build-package GOOS=linux GOARCH=arm64 BINNAME=receptor
make build-package GOOS=darwin GOARCH=amd64 BINNAME=receptor
make build-package GOOS=darwin GOARCH=arm64 BINNAME=receptor
make build-package GOOS=windows GOARCH=amd64 BINNAME=receptor.exe
make build-package GOOS=windows GOARCH=arm64 BINNAME=receptor.exe
- name: Publish packages
uses: softprops/action-gh-release@v2
with:
files: |-
dist/checksums.txt
dist/*.tar.gz
receptor-1.5.3/.github/workflows/pull_request.yml 0000664 0000000 0000000 00000013641 14754657407 0022237 0 ustar 00root root 0000000 0000000 ---
name: CI
on: # yamllint disable-line rule:truthy
pull_request: # yamllint disable-line rule:empty-values
env:
DESIRED_GO_VERSION: '1.22'
DESIRED_GOLANGCI_LINT_VERSION: 'v1.60'
DESIRED_PYTHON_VERSION: '3.12'
jobs:
lint-receptor:
name: lint-receptor
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-go@v5
with:
go-version: ${{ env.DESIRED_GO_VERSION }}
- name: golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: ${{ env.DESIRED_GOLANGCI_LINT_VERSION }}
receptor:
name: receptor (Go ${{ matrix.go-version }})
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
go-version: ["1.22"]
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
- name: build and install receptor
run: |
make build-all
sudo cp ./receptor /usr/local/bin/receptor
- name: Download kind binary
run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 && chmod +x ./kind
- name: Create k8s cluster
run: ./kind create cluster
- name: Interact with the cluster
run: kubectl get nodes
- name: Run receptor tests
run: make test
- name: get k8s logs
if: ${{ failure() }}
run: .github/workflows/artifact-k8s-logs.sh
- name: remove sockets before archiving logs
if: ${{ failure() }}
run: find /tmp/receptor-testing -name controlsock -delete
- name: Artifact receptor data for ${{ matrix.go-version }}
uses: actions/upload-artifact@v4.4.3
if: ${{ failure() }}
with:
name: test-logs-${{ matrix.go-version }}
path: /tmp/receptor-testing
- name: Archive receptor binary for ${{ matrix.go-version }}
uses: actions/upload-artifact@v4.4.3
with:
name: receptor-${{ matrix.go-version }}
path: /usr/local/bin/receptor
receptorctl:
name: Run receptorctl tests${{ '' }} # Nest jobs under the same sidebar category
needs: receptor
strategy:
fail-fast: false
matrix:
python-version:
# NOTE: The highest and the lowest versions come
# NOTE: first as their statuses are most likely to
# NOTE: signal problems early:
- 3.12
- 3.8
- 3.11
- "3.10"
- 3.9
uses: ./.github/workflows/reusable-nox.yml
with:
python-version: ${{ matrix.python-version }}
session: tests-${{ matrix.python-version }}
download-receptor: true
go-version: '1.22'
lint-receptorctl:
name: Lint receptorctl${{ '' }} # Nest jobs under the same sidebar category
strategy:
fail-fast: false
matrix:
session:
- check_style
- check_format
uses: ./.github/workflows/reusable-nox.yml
with:
python-version: '3.12'
session: ${{ matrix.session }}
container:
name: container
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.DESIRED_PYTHON_VERSION }}
- name: Install python dependencies
run: pip install build
- name: Build container
run: make container REPO=receptor LATEST=yes
- name: Write out basic config
run: |
cat << EOF > test.cfg
---
- local-only:
- control-service:
service: control
filename: /tmp/receptor.sock
- work-command:
worktype: cat
command: cat
EOF
- name: Run receptor (and wait a few seconds for it to boot)
run: |
podman run --name receptor -d -v $PWD/test.cfg:/etc/receptor/receptor.conf:Z localhost/receptor
sleep 3
podman logs receptor
- name: Submit work and assert the output we expect
run: |
output=$(podman exec -i receptor receptorctl work submit cat -l 'hello world' -f)
echo $output
if [[ "$output" != "hello world" ]]; then
echo "Output did not contain expected value"
exit 1
fi
receptorctl-test-coverage:
name: Receptorctl test coverage
needs: receptor
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
session:
- coverage
steps:
- name: Download the `receptor` binary
uses: actions/download-artifact@v4
with:
name: receptor-${{ env.DESIRED_GO_VERSION }}
path: /usr/local/bin/
- name: Set executable bit on the `receptor` binary
run: sudo chmod a+x /usr/local/bin/receptor
- name: Set up nox
uses: wntrblm/nox@2025.02.09
with:
python-versions: ${{ env.DESIRED_PYTHON_VERSION }}
- name: Check out the source code from Git
uses: actions/checkout@v4
with:
fetch-depth: 0 # Needed for the automation in Nox to find the last tag
- name: Provision nox environment for ${{ matrix.session }}
run: nox --install-only --session ${{ matrix.session }}
working-directory: ./receptorctl
- name: Run `receptorctl` nox ${{ matrix.session }} session
run: nox --no-install --session ${{ matrix.session }}
working-directory: ./receptorctl
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v5
with:
directory: receptorctl
files: receptorctl_coverage.xml
fail_ci_if_error: true
token: ${{ secrets.CODECOV_TOKEN }}
verbose: true
receptor-1.5.3/.github/workflows/reusable-nox.yml 0000664 0000000 0000000 00000003433 14754657407 0022115 0 ustar 00root root 0000000 0000000 ---
name: Receptorctl nox sessions
on: # yamllint disable-line rule:truthy
workflow_call:
inputs:
python-version:
type: string
description: The Python version to use.
required: true
session:
type: string
description: The nox session to run.
required: true
download-receptor:
type: boolean
description: Whether to perform go binary download.
required: false
default: false
go-version:
type: string
description: The Go version to use.
required: false
env:
FORCE_COLOR: 1
NOXSESSION: ${{ inputs.session }}
jobs:
nox:
runs-on: ubuntu-latest
name: >- # can't use `env` in this context:
Run `receptorctl` ${{ inputs.session }} session
steps:
- name: Download the `receptor` binary
if: fromJSON(inputs.download-receptor)
uses: actions/download-artifact@v4
with:
name: receptor-${{ inputs.go-version }}
path: /usr/local/bin/
- name: Set executable bit on the `receptor` binary
if: fromJSON(inputs.download-receptor)
run: sudo chmod a+x /usr/local/bin/receptor
- name: Set up nox
uses: wntrblm/nox@2025.02.09
with:
python-versions: ${{ inputs.python-version }}
- name: Check out the source code from Git
uses: actions/checkout@v4
with:
fetch-depth: 0 # Needed for the automation in Nox to find the last tag
sparse-checkout: receptorctl
- name: Provision nox environment for ${{ env.NOXSESSION }}
run: nox --install-only
working-directory: ./receptorctl
- name: Run `receptorctl` nox ${{ env.NOXSESSION }} session
run: nox --no-install
working-directory: ./receptorctl
receptor-1.5.3/.github/workflows/scorecard.yml 0000664 0000000 0000000 00000006006 14754657407 0021455 0 ustar 00root root 0000000 0000000 # This workflow uses actions that are not certified by GitHub. They are provided
# by a third-party and are governed by separate terms of service, privacy
# policy, and support documentation.
name: Scorecard supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '41 15 * * 4'
push:
branches: ["devel"]
# Declare default permissions as read only.
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
# Uncomment the permissions below if installing in a private repository.
# contents: read
# actions: read
steps:
- name: "Checkout code"
uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
with:
results_file: results.sarif
results_format: sarif
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
# - you want to enable the Branch-Protection check on a *public* repository, or
# - you are installing Scorecard on a *private* repository
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
# Public repositories:
# - Publish results to OpenSSF REST API for easy access by consumers
# - Allows the repository to include the Scorecard badge.
# - See https://github.com/ossf/scorecard-action#publishing-results.
# For private repositories:
# - `publish_results` will always be set to `false`, regardless
# of the value entered here.
publish_results: true
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@184d73b71b93c222403b2e7f1ffebe4508014249 # v4.4.2
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: results.sarif
receptor-1.5.3/.github/workflows/stage.yml 0000664 0000000 0000000 00000006034 14754657407 0020614 0 ustar 00root root 0000000 0000000 ---
name: Stage Release
on: # yamllint disable-line rule:truthy
workflow_dispatch:
inputs:
version:
description: 'Version to release. (x.y.z) Will create a tag / draft release.'
required: true
default: ''
ref:
description: 'The ref to tag. Can only be the 40 character SHA.'
required: true
default: ''
confirm:
description: 'Are you sure? Set this to yes.'
required: true
default: 'no'
name:
description: 'Name of the person in single quotes who will create a tag / draft release.'
required: true
default: ''
type: string
email:
description: 'Email of the person who will create a tag / draft release.'
required: true
default: ''
env:
DESIRED_PYTHON_VERSION: '3.12'
jobs:
stage:
runs-on: ubuntu-latest
permissions:
packages: write
contents: write
steps:
- name: Verify inputs
run: |
set -e
if [[ ${{ github.event.inputs.confirm }} != "yes" ]]; then
>&2 echo "Confirm must be 'yes'"
exit 1
fi
if [[ ${{ github.event.inputs.version }} == "" ]]; then
>&2 echo "Set version to continue."
exit 1
fi
exit 0
- name: Checkout receptor
uses: actions/checkout@v4
with:
ref: ${{ github.event.inputs.ref }}
- name: Install python
uses: actions/setup-python@v5
with:
python-version: ${{ env.DESIRED_PYTHON_VERSION }}
- name: Install dependencies
run: |
python3 -m pip install build
# setup qemu and buildx
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to registry
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin
- name: Lowercase github owner
run: |
echo "OWNER=$(echo 'console.log("${{ github.repository_owner }}".toLowerCase())' | node -)" >> $GITHUB_ENV
- name: Build container image
run: |
make container CONTAINERCMD="docker buildx" EXTRA_OPTS="--platform linux/amd64,linux/ppc64le,linux/arm64 --push" REPO=ghcr.io/${{ env.OWNER }}/receptor VERSION=v${{ github.event.inputs.version }} LATEST=yes
- name: Get current time
uses: josStorer/get-current-time@v2
id: current-time
- name: Create draft release
run: |
ansible-playbook tools/ansible/stage.yml \
-e version="${{ github.event.inputs.version }}" \
-e repo=${{ env.OWNER }}/receptor \
-e github_token=${{ secrets.GITHUB_TOKEN }} \
-e target_commitish="${{ github.event.inputs.ref }}" \
-e tagger_name="${{ github.event.inputs.name }}" \
-e tagger_email="${{ github.event.inputs.email }}" \
-e time="${{ steps.current-time.outputs.time }}" \
-v
receptor-1.5.3/.github/workflows/test-reporting.yml 0000664 0000000 0000000 00000004736 14754657407 0022506 0 ustar 00root root 0000000 0000000 ---
name: Generate junit test report
on: # yamllint disable-line rule:truthy
pull_request: # yamllint disable-line rule:empty-values
push:
branches: [devel]
env:
DESIRED_GO_VERSION: '1.22'
jobs:
generate_junit_test_report:
name: go test coverage
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: ${{ env.DESIRED_GO_VERSION }}
- name: build and install receptor
run: |
make build-all
sudo cp ./receptor /usr/local/bin/receptor
- name: Download kind binary
run: curl -Lo ./kind https://kind.sigs.k8s.io/dl/latest/kind-linux-amd64 && chmod +x ./kind
- name: Create k8s cluster
run: ./kind create cluster
- name: Interact with the cluster
run: kubectl get nodes
- name: Install go junit reporting
run: go install github.com/jstemmer/go-junit-report/v2@latest
- name: Run receptor tests
run: go test -v 2>&1 $(go list ./... | grep -vE '/tests/|mock_|example') | go-junit-report > report.xml
- name: Upload test results to dashboard
if: >-
!cancelled()
&& github.event_name == 'push'
&& github.repository == 'ansible/receptor'
&& github.ref_name == github.event.repository.default_branch
run: >-
curl -v --user "${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_USER }}:${{ secrets.PDE_ORG_RESULTS_UPLOAD_PASSWORD }}"
--form "xunit_xml=@report.xml"
--form "component_name=receptor"
--form "git_commit_sha=${{ github.sha }}"
--form "git_repository_url=https://github.com/${{ github.repository }}"
"${{ vars.PDE_ORG_RESULTS_AGGREGATOR_UPLOAD_URL }}/api/results/upload/"
- name: get k8s logs
if: ${{ failure() }}
run: .github/workflows/artifact-k8s-logs.sh
- name: remove sockets before archiving logs
if: ${{ failure() }}
run: find /tmp/receptor-testing -name controlsock -delete
- name: Artifact receptor data
uses: actions/upload-artifact@v4
if: ${{ failure() }}
with:
name: test-logs
path: /tmp/receptor-testing
- name: Archive receptor binary
uses: actions/upload-artifact@v4
with:
name: receptor
path: /usr/local/bin/receptor
receptor-1.5.3/.github/workflows/triage_new.yml 0000664 0000000 0000000 00000000735 14754657407 0021637 0 ustar 00root root 0000000 0000000 ---
name: Triage
on: # yamllint disable-line rule:truthy
issues:
types:
- opened
jobs:
triage:
runs-on: ubuntu-latest
name: Label
steps:
- name: Label issues
uses: github/issue-labeler@v3.4
with:
repo-token: "${{ secrets.GITHUB_TOKEN }}"
not-before: 2021-12-07T07:00:00Z
configuration-path: .github/issue_labeler.yml
enable-versioned-regex: 0
if: github.event_name == 'issues'
receptor-1.5.3/.gitignore 0000664 0000000 0000000 00000000626 14754657407 0015362 0 ustar 00root root 0000000 0000000 .DS_Store
.idea
.vscode
kind
receptor
receptor.exe
receptor.app
recepcert
/net
.container-flag*
.VERSION
kubectl
/receptorctl/.nox
/receptorctl/.VERSION
/receptorctl/AUTHORS
/receptorctl/ChangeLog
/receptor-python-worker/.VERSION
/receptor-python-worker/ChangeLog
/receptor-python-worker/AUTHORS
/receptorctl/venv/
receptorctl-test-venv/
.vagrant/
/docs/build
/dist
/test-configs
coverage.txt
venv
/vendor
receptor-1.5.3/.gitleaks.toml 0000664 0000000 0000000 00000000221 14754657407 0016137 0 ustar 00root root 0000000 0000000 [allowlist]
description = "Global Allowlist"
paths = [
'''pkg/certificates/ca_test.go''',
'''pkg/netceptor/tlsconfig_test.go''',
]
receptor-1.5.3/.golangci.yml 0000664 0000000 0000000 00000006327 14754657407 0015762 0 ustar 00root root 0000000 0000000 ---
run:
timeout: 10m
linters:
disable-all: true
enable:
- asciicheck
- bodyclose
- depguard
- dogsled
- durationcheck
- exportloopref
- gci
- gocritic
- godot
- gofmt
- gofumpt
- goheader
- goimports
- gomodguard
- gosec
- gosimple
- govet
- importas
- ineffassign
- makezero
- misspell
- nakedret
- nilerr
- nlreturn
- noctx
- nolintlint
- prealloc
- predeclared
- rowserrcheck
- sqlclosecheck
- staticcheck
- stylecheck
- tparallel
- typecheck
- unconvert
- unused
- wastedassign
- whitespace
linters-settings:
depguard:
rules:
main:
files:
- "$all"
- "!$test"
- "!**/functional/**/*.go"
allow:
- "$gostd"
- "github.com/ansible/receptor/internal/version"
- "github.com/ansible/receptor/cmd"
- "github.com/ansible/receptor/pkg"
- "github.com/creack/pty"
- "github.com/fsnotify/fsnotify"
- "github.com/ghjm/cmdline"
- "github.com/golang-jwt/jwt/v4"
- "github.com/google/shlex"
- "github.com/gorilla/websocket"
- "github.com/jupp0r/go-priority-queue"
- "github.com/minio/highwayhash"
- "github.com/pbnjay/memory"
- "github.com/quic-go/quic-go"
- "github.com/rogpeppe/go-internal/lockedfile"
- "github.com/songgao/water"
- "github.com/vishvananda/netlink"
- "github.com/spf13/viper"
- "github.com/spf13/cobra"
- "k8s.io/api/core"
- "k8s.io/apimachinery/pkg"
- "k8s.io/client-go"
- "github.com/grafana/pyroscope-go"
- "github.com/sirupsen/logrus"
tests:
files:
- "$test"
- "**/functional/**/*.go"
allow:
- "$gostd"
- "github.com/ansible/receptor/pkg"
- "github.com/ansible/receptor/tests/utils"
- "github.com/fortytw2/leaktest"
- "github.com/fsnotify/fsnotify"
- "github.com/gorilla/websocket"
- "github.com/prep/socketpair"
- "github.com/google/go-cmp/cmp"
- "k8s.io/api/core/v1"
- "k8s.io/apimachinery/pkg/api/errors"
- "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/fields"
- "k8s.io/apimachinery/pkg/selection"
- "k8s.io/apimachinery/pkg/watch"
- "k8s.io/client-go/kubernetes"
- "k8s.io/client-go/rest"
- "k8s.io/client-go/tools/remotecommand"
- "github.com/quic-go/quic-go"
- "github.com/quic-go/quic-go/logging"
- "github.com/AaronH88/quic-go"
issues:
# Dont commit the following line.
# It will make CI pass without telling you about errors.
# fix: true
exclude:
- "lostcancel" # TODO: Context is not canceled on multiple occasions. Needs more detailed work to be fixed.
- "SA2002|thelper|testinggoroutine" # TODO: Test interface used outside of its routine, tests need to be rewritten.
- "G306" # TODO: Restrict perms of touched files.
- "G402|G404" # TODO: Make TLS more secure.
- "G204" # gosec is throwing a fit, ignore.
...
receptor-1.5.3/.readthedocs.yaml 0000664 0000000 0000000 00000002035 14754657407 0016615 0 ustar 00root root 0000000 0000000 ---
# Read the Docs configuration file for Sphinx projects
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Set the OS, Python version and other tools you might need
build:
os: ubuntu-lts-latest
tools:
golang: "1.22"
python: "3.12"
# You can also specify other tool versions:
# nodejs: "20"
# rust: "1.70"
# Build documentation in the "docs/" directory with Sphinx
sphinx:
configuration: docs/source/conf.py
# You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs
# builder: "dirhtml"
# Fail on all warnings to avoid broken references
fail_on_warning: true
# Optionally build your docs in additional formats such as PDF and ePub
# formats:
# - pdf
# - epub
# Optional but recommended, declare the Python requirements required
# to build your documentation
# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
install:
- requirements: docs/source/requirements.txt
...
receptor-1.5.3/.sonarcloud.properties 0000664 0000000 0000000 00000000177 14754657407 0017740 0 ustar 00root root 0000000 0000000 sonar.python.version=3.8, 3.9, 3.11, 3.12
sonar.sources=.
sonar.test.inclusions=**/*_test.go, receptorctl/tests/
sonar.tests=.
receptor-1.5.3/LICENSE.md 0000664 0000000 0000000 00000022130 14754657407 0014770 0 ustar 00root root 0000000 0000000 Apache License
==============
_Version 2.0, January 2004_
_<>_
### Terms and Conditions for use, reproduction, and distribution
#### 1. Definitions
“License” shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
“Licensor” shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
“Legal Entity” shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, “control” means **(i)** the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or **(ii)** ownership of fifty percent (50%) or more of the
outstanding shares, or **(iii)** beneficial ownership of such entity.
“You” (or “Your”) shall mean an individual or Legal Entity exercising
permissions granted by this License.
“Source” form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
“Object” form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
“Work” shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
“Derivative Works” shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
“Contribution” shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
“submitted” means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as “Not a Contribution.”
“Contributor” shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
#### 2. Grant of Copyright License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
#### 3. Grant of Patent License
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
#### 4. Redistribution
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
* **(a)** You must give any other recipients of the Work or Derivative Works a copy of
this License; and
* **(b)** You must cause any modified files to carry prominent notices stating that You
changed the files; and
* **(c)** You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
* **(d)** If the Work includes a “NOTICE” text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
#### 5. Submission of Contributions
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
#### 6. Trademarks
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
#### 7. Disclaimer of Warranty
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an “AS IS” BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
#### 8. Limitation of Liability
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
#### 9. Accepting Warranty or Additional Liability
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
receptor-1.5.3/Makefile 0000664 0000000 0000000 00000016176 14754657407 0015041 0 ustar 00root root 0000000 0000000 # If the current git commit has been tagged, use as the version. (e.g. v1.4.5)
# Otherwise the version is +git (e.g. 1.4.5+f031d2)
OFFICIAL_VERSION := $(shell if VER=`git describe --exact-match --tags 2>/dev/null`; then echo $$VER; else echo ""; fi)
ifneq ($(OFFICIAL_VERSION),)
VERSION := $(OFFICIAL_VERSION)
else ifneq ($(shell git tag --list),)
VERSION := $(shell git describe --tags | cut -d - -f -1)+git$(shell git rev-parse --short HEAD)
else ifeq ($(VERSION),)
VERSION := $(error No tags found in git repository)
# else VERSION was passed as a command-line argument to make
endif
# When building Receptor, tags can be used to remove undesired
# features. This is primarily used for deploying Receptor in a
# security sensitive role, where it is desired to have no possibility
# of a service being accidentally enabled. Features are controlled
# using the TAGS environment variable, which is a comma delimeted
# list of zero or more of the following:
#
# no_controlsvc: Disable the control service
#
# no_backends: Disable all backends (except external via the API)
# no_tcp_backend: Disable the TCP backend
# no_udp_backend: Disable the UDP backend
# no_websocket_backend: Disable the websocket backent
#
# no_services: Disable all services
# no_proxies: Disable the TCP, UDP and Unix proxy services
# no_ip_router: Disable the IP router service
#
# no_tls_config: Disable the ability to configure TLS server/client configs
#
# no_workceptor: Disable the unit-of-work subsystem (be network only)
#
# no_cert_auth: Disable commands related to CA and certificate generation
TAGS ?=
ifeq ($(TAGS),)
TAGPARAM=
else
TAGPARAM=--tags $(TAGS)
endif
DEBUG ?=
ifeq ($(DEBUG),1)
DEBUGFLAGS=-gcflags=all="-N -l"
else
DEBUGFLAGS=
endif
GO ?= go
receptor: $(shell find pkg -type f -name '*.go') ./cmd/receptor-cl/receptor.go
CGO_ENABLED=0 GOFLAGS="-buildvcs=false" $(GO) build \
-o receptor \
$(DEBUGFLAGS) \
-ldflags "-X 'github.com/ansible/receptor/internal/version.Version=$(VERSION)'" \
$(TAGPARAM) \
./cmd/receptor-cl
clean:
@rm -fv .container-flag*
@rm -fv .VERSION
@rm -rfv dist/
@rm -fv $(KUBECTL_BINARY)
@rm -fv packaging/container/receptor
@rm -rfv packaging/container/RPMS/
@rm -fv packaging/container/*.whl
@rm -fv receptor receptor.exe receptor.app net
@rm -fv receptorctl/dist/*
@rm -fv receptor-python-worker/dist/*
@rm -rfv receptorctl-test-venv/
ARCH=amd64
OS=linux
KUBECTL_BINARY=./kubectl
STABLE_KUBECTL_VERSION=$(shell curl --silent https://storage.googleapis.com/kubernetes-release/release/stable.txt)
kubectl:
if [ "$(wildcard $(KUBECTL_BINARY))" != "" ]; \
then \
FOUND_KUBECTL_VERSION=$$(./kubectl version --client=true | head --lines=1 | cut --delimiter=' ' --field=3); \
else \
FOUND_KUBECTL_VERSION=; \
fi
if [ "${FOUND_KUBECTL_VERSION}" != "$(STABLE_KUBECTL_VERSION)" ]; \
then \
curl \
--location \
--output $(KUBECTL_BINARY) \
https://storage.googleapis.com/kubernetes-release/release/$(STABLE_KUBECTL_VERSION)/bin/$(OS)/$(ARCH)/kubectl; \
chmod 0700 $(KUBECTL_BINARY); \
fi
lint:
@golint cmd/... pkg/... example/...
receptorctl-lint: receptor receptorctl/.VERSION
@cd receptorctl && nox -s lint
format:
@find cmd/ pkg/ -type f -name '*.go' -exec $(GO) fmt {} \;
fmt: format
pre-commit:
@pre-commit run --all-files
build-all:
@echo "Running Go builds..." && \
GOOS=windows $(GO) build \
-o receptor.exe \
./cmd/receptor-cl && \
GOOS=darwin $(GO) build \
-o receptor.app \
./cmd/receptor-cl && \
$(GO) build \
example/*.go && \
$(GO) build \
-o receptor \
-ldflags "-X 'github.com/ansible/receptor/internal/version.Version=$(VERSION)'" \
./cmd/receptor-cl
BINNAME='receptor'
CHECKSUM_PROGRAM='sha256sum'
GOARCH=$(ARCH)
GOOS=$(OS)
DIST := receptor_$(shell echo '$(VERSION)' | sed 's/^v//')_$(GOOS)_$(GOARCH)
build-package:
@echo "Building and packaging binary for $(GOOS)/$(GOARCH) as dist/$(DIST).tar.gz" && \
mkdir -p dist/$(DIST) && \
GOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=0 $(GO) build \
-o dist/$(DIST)/$(BINNAME) \
$(DEBUGFLAGS) \
-ldflags "-X 'github.com/ansible/receptor/internal/version.Version=$(VERSION)'" \
$(TAGPARAM) \
./cmd/receptor-cl && \
tar -C dist/$(DIST) -zcf dist/$(DIST).tar.gz $(BINNAME) && \
cd dist/ && \
$(CHECKSUM_PROGRAM) $(DIST).tar.gz >> checksums.txt
RUNTEST ?=
ifeq ($(RUNTEST),)
TESTCMD =
else
TESTCMD = -run $(RUNTEST)
endif
BLOCKLIST='/tests/|mock_|example'
COVERAGE_FILE='coverage.txt'
coverage: build-all
PATH="${PWD}:${PATH}" \
$(GO) test $$($(GO) list ./... | grep -vE $(BLOCKLIST)) \
$(TESTCMD) \
-count=1 \
-cover \
-covermode=atomic \
-coverprofile=$(COVERAGE_FILE) \
-race \
-timeout 5m
test: receptor
PATH="${PWD}:${PATH}" \
$(GO) test $$($(GO) list ./...) \
$(TESTCMD) \
-count=1 \
-race \
-timeout 5m
receptorctl-test: receptorctl/.VERSION receptor
@cd receptorctl && nox -s tests
testloop: receptor
@i=1; while echo "------ $$i" && \
make test; do \
i=$$((i+1)); done
kubetest: kubectl
./kubectl get nodes
version:
@echo $(VERSION) > .VERSION
@echo ".VERSION created for $(VERSION)"
receptorctl/.VERSION:
echo $(VERSION) > $@
RECEPTORCTL_WHEEL = receptorctl/dist/receptorctl-$(VERSION:v%=%)-py3-none-any.whl
$(RECEPTORCTL_WHEEL): receptorctl/README.md receptorctl/.VERSION $(shell find receptorctl/receptorctl -type f -name '*.py')
@cd receptorctl && python3 -m build --wheel
receptorctl_wheel: $(RECEPTORCTL_WHEEL)
RECEPTORCTL_SDIST = receptorctl/dist/receptorctl-$(VERSION:v%=%).tar.gz
$(RECEPTORCTL_SDIST): receptorctl/README.md receptorctl/.VERSION $(shell find receptorctl/receptorctl -type f -name '*.py')
@cd receptorctl && python3 -m build --sdist
receptorctl_sdist: $(RECEPTORCTL_SDIST)
receptor-python-worker/.VERSION:
echo $(VERSION) > $@
RECEPTOR_PYTHON_WORKER_WHEEL = receptor-python-worker/dist/receptor_python_worker-$(VERSION:v%=%)-py3-none-any.whl
$(RECEPTOR_PYTHON_WORKER_WHEEL): receptor-python-worker/README.md receptor-python-worker/.VERSION $(shell find receptor-python-worker/receptor_python_worker -type f -name '*.py')
@cd receptor-python-worker && python3 -m build --wheel
# Container command can be docker or podman
CONTAINERCMD ?= podman
# Repo without tag
REPO := quay.io/ansible/receptor
# TAG is VERSION with a '-' instead of a '+', to avoid invalid image reference error.
TAG := $(subst +,-,$(VERSION))
# Set this to tag image as :latest in addition to :$(VERSION)
LATEST :=
EXTRA_OPTS ?=
space := $(subst ,, )
CONTAINER_FLAG_FILE = .container-flag-$(VERSION)$(subst $(space),,$(subst /,,$(EXTRA_OPTS)))
container: $(CONTAINER_FLAG_FILE)
$(CONTAINER_FLAG_FILE): $(RECEPTORCTL_WHEEL) $(RECEPTOR_PYTHON_WORKER_WHEEL)
@tar --exclude-vcs-ignores -czf packaging/container/source.tar.gz .
@cp $(RECEPTORCTL_WHEEL) packaging/container
@cp $(RECEPTOR_PYTHON_WORKER_WHEEL) packaging/container
$(CONTAINERCMD) build $(EXTRA_OPTS) packaging/container --build-arg VERSION=$(VERSION:v%=%) -t $(REPO):$(TAG) $(if $(LATEST),-t $(REPO):latest,)
touch $@
tc-image: container
@cp receptor packaging/tc-image/
@$(CONTAINERCMD) build packaging/tc-image -t receptor-tc
.PHONY: lint format fmt pre-commit build-all test clean testloop container version receptorctl-tests kubetest
receptor-1.5.3/README.md 0000664 0000000 0000000 00000007124 14754657407 0014651 0 ustar 00root root 0000000 0000000 # Receptor
[](https://codecov.io/gh/ansible/receptor)[](https://api.securityscorecards.dev/projects/github.com/ansible/receptor)
Receptor is an overlay network intended to ease the distribution of work across a large and dispersed collection of workers. Receptor nodes establish peer-to-peer connections with each other via existing networks. Once connected, the Receptor mesh provides datagram (UDP-like) and stream (TCP-like) capabilities to applications, as well as robust unit-of-work handling with resiliency against transient network failures.
See the readthedocs page for Receptor at:
## Terminology and Concepts
* _Receptor_: The Receptor application taken as a whole, that typically runs as a daemon.
* _Receptorctl_: A user-facing command line used to interact with Receptor, typically over a Unix domain socket.
* _Netceptor_: The networking part of Receptor. Usable as a Go library.
* _Workceptor_: The unit-of-work handling of Receptor, which makes use of Netceptor. Also usable as a Go library.
* _Node_: A single running instance of Receptor.
* _Node ID_: An arbitrary string identifying a single node, analogous to an IP address.
* _Service_: An up-to-8-character string identifying an endpoint on a Receptor node that can receive messages. Analogous to a port number in TCP or UDP.
* _Backend_: A type of connection that Receptor nodes can pass traffic over. Current backends include TCP, UDP and websockets.
* _Control Service_: A built-in service that usually runs under the name `control`. Used to report status and to launch and monitor work.
## How to Get It
The easiest way to check out Receptor is to run it as a container. Images are kept on the Quay registry. To use this, run:
```bash
[docker|podman] pull quay.io/ansible/receptor
[docker|podman] run -d -v /path/to/receptor.conf:/etc/receptor/receptor.conf:Z receptor
```
## Use as a Go library
This code can be imported and used from Go programs. The main libraries are:
* _Netceptor_:
* _Workceptor_:
See the `example/` directory for examples of using these libraries from Go.
## Use as a command-line tool
The `receptor` command runs a Receptor node with access to all included backends and services. See `receptor --help` for details.
The command line is organized into entities which take parameters, like: `receptor --entity1 param1=value1 param2=value1 --entity2 param1=value2 param2=value2`. In this case we are configuring two things, `entity1` and `entity2`, each of which takes two parameters. Distinct entities are marked with a double dash, and bare parameters attach to the immediately preceding entity.
Receptor can also take its configuration from a file in YAML format. The allowed directives are the same as on the command line, with a top-level list of entities and each entity receiving zero or more parameters as a dict. The above command in YAML format would look like this:
```bash
---
- entity1:
param1: value1
param2: value1
- entity2:
param1: value2
param2: value2
```
## Python Receptor and the 0.6 versions
As of June 25th, this repo is the Go implementation of Receptor. If you are looking for the older Python version of Receptor, including any 0.6.x version, it is now located at .
receptor-1.5.3/cmd/ 0000775 0000000 0000000 00000000000 14754657407 0014131 5 ustar 00root root 0000000 0000000 receptor-1.5.3/cmd/config.go 0000664 0000000 0000000 00000017040 14754657407 0015727 0 ustar 00root root 0000000 0000000 package cmd
import (
"fmt"
"os"
"reflect"
"github.com/ansible/receptor/pkg/backends"
"github.com/ansible/receptor/pkg/certificates"
"github.com/ansible/receptor/pkg/controlsvc"
"github.com/ansible/receptor/pkg/logger"
"github.com/ansible/receptor/pkg/netceptor"
"github.com/ansible/receptor/pkg/services"
"github.com/ansible/receptor/pkg/types"
"github.com/ansible/receptor/pkg/workceptor"
"github.com/ghjm/cmdline"
"github.com/spf13/viper"
)
type Initer interface {
Init() error
}
type Preparer interface {
Prepare() error
}
type Runer interface {
Run() error
}
type Reloader interface {
Reload() error
}
type ReceptorConfig struct {
// Used pointer structs to apply defaults to config
Node *types.NodeCfg
Trace logger.TraceCfg
LogLevel *logger.LoglevelCfg `mapstructure:"log-level"`
ControlServices []*controlsvc.CmdlineConfigUnix `mapstructure:"control-services"`
TLSClients []netceptor.TLSClientConfig `mapstructure:"tls-clients"`
TLSServer []netceptor.TLSServerConfig `mapstructure:"tls-servers"`
WorkCommands []workceptor.CommandWorkerCfg `mapstructure:"work-commands"`
WorkKubernetes []*workceptor.KubeWorkerCfg `mapstructure:"work-kubernetes"`
WorkSigning workceptor.SigningKeyPrivateCfg `mapstructure:"work-signing"`
WorkVerification workceptor.VerifyingKeyPublicCfg `mapstructure:"work-verification"`
IPRouters []services.IPRouterCfg
TCPClients []services.TCPProxyOutboundCfg `mapstructure:"tcp-clients"`
TCPServers []services.TCPProxyInboundCfg `mapstructure:"tcp-servers"`
UDPClients []services.TCPProxyInboundCfg `mapstructure:"udp-clients"`
UDPServers []services.TCPProxyInboundCfg `mapstructure:"udp-servers"`
UnixSocketClients []services.UnixProxyOutboundCfg `mapstructure:"unix-socket-clients"`
UnixSocketServers []services.UnixProxyInboundCfg `mapstructure:"unix-socket-servers"`
}
type CertificatesConfig struct {
InitCA certificates.InitCAConfig `mapstructure:"cert-init"`
MakeReq []certificates.MakeReqConfig `mapstructure:"cert-makereqs"`
SignReq []certificates.SignReqConfig `mapstructure:"cert-signreqs"`
}
type BackendConfig struct {
TCPListeners []*backends.TCPListenerCfg `mapstructure:"tcp-listeners"`
UDPListeners []*backends.UDPListenerCfg `mapstructure:"udp-listeners"`
WSListeners []*backends.WebsocketListenerCfg `mapstructure:"ws-listeners"`
TCPPeers []*backends.TCPDialerCfg `mapstructure:"tcp-peers"`
UDPPeers []*backends.UDPDialerCfg `mapstructure:"udp-peers"`
WSPeers []*backends.WebsocketDialerCfg `mapstructure:"ws-peers"`
LocalOnly backends.NullBackendCfg `mapstructure:"local-only"`
}
func PrintPhaseErrorMessage(configName string, phase string, err error) {
fmt.Printf("ERROR: %s for %s on %s phase\n", err, configName, phase)
}
func ParseReceptorConfig(configFile string) (*ReceptorConfig, error) {
var receptorConfig ReceptorConfig
err := viper.Unmarshal(&receptorConfig)
if err != nil {
return nil, err
}
return &receptorConfig, nil
}
func ParseCertificatesConfig(configFile string) (*CertificatesConfig, error) {
var certifcatesConfig CertificatesConfig
err := viper.Unmarshal(&certifcatesConfig)
if err != nil {
return nil, err
}
return &certifcatesConfig, nil
}
func ParseBackendConfig(configFile string) (*BackendConfig, error) {
var backendConfig BackendConfig
err := viper.Unmarshal(&backendConfig)
if err != nil {
return nil, err
}
return &backendConfig, nil
}
func isConfigEmpty(v reflect.Value) bool {
isEmpty := true
for i := 0; i < v.NumField(); i++ {
if reflect.Value.IsZero(v.Field(i)) {
continue
}
isEmpty = false
}
return isEmpty
}
func RunConfigV2(v reflect.Value) {
phases := []string{"Init", "Prepare", "Run"}
for _, phase := range phases {
for i := 0; i < v.NumField(); i++ {
if reflect.Value.IsZero(v.Field(i)) {
continue
}
switch v.Field(i).Kind() {
case reflect.Slice:
for j := 0; j < v.Field(i).Len(); j++ {
RunPhases(phase, v.Field(i).Index(j))
}
default:
RunPhases(phase, v.Field(i))
}
}
}
}
// RunPhases runs the appropriate function (Init, Prepare, Run) on a command.
func RunPhases(phase string, v reflect.Value) {
cmd := v.Interface()
var err error
if phase == "Init" {
switch c := cmd.(type) {
case Initer:
err = c.Init()
if err != nil {
PrintPhaseErrorMessage(v.Type().Name(), phase, err)
}
default:
}
}
if phase == "Prepare" {
switch c := cmd.(type) {
case Preparer:
err = c.Prepare()
if err != nil {
PrintPhaseErrorMessage(v.Type().Name(), phase, err)
}
default:
}
}
if phase == "Run" {
switch c := cmd.(type) {
case Runer:
err = c.Run()
if err != nil {
PrintPhaseErrorMessage(v.Type().Name(), phase, err)
}
default:
}
}
}
// ReloadServices iterates through key/values calling reload on applicable services.
func ReloadServices(v reflect.Value) {
for i := 0; i < v.NumField(); i++ {
// if the services is not initialised, skip
if reflect.Value.IsZero(v.Field(i)) {
continue
}
var err error
switch v.Field(i).Kind() {
case reflect.Slice:
// iterate over all the type fields
for j := 0; j < v.Field(i).Len(); j++ {
serviceItem := v.Field(i).Index(j).Interface()
switch c := serviceItem.(type) {
// check to see if the selected type field satisfies reload
// call reload on cfg object
case Reloader:
err = c.Reload()
if err != nil {
PrintPhaseErrorMessage(v.Type().Name(), "reload", err)
}
// if cfg object does not satisfy, do nothing
default:
}
}
// runs for non slice fields
default:
switch c := v.Field(i).Interface().(type) {
case Reloader:
err = c.Reload()
if err != nil {
PrintPhaseErrorMessage(v.Type().Name(), "reload", err)
}
default:
}
}
}
}
func RunConfigV1() {
cl := cmdline.NewCmdline()
cl.AddConfigType("node", "Specifies the node configuration of this instance", types.NodeCfg{}, cmdline.Required, cmdline.Singleton)
cl.AddConfigType("local-only", "Runs a self-contained node with no backend", backends.NullBackendCfg{}, cmdline.Singleton)
cl.AddConfigType("pyroscope-client", "Profile Receptor using Pyroscope, client ", types.ReceptorPyroscopeCfg{}, cmdline.Singleton)
// Add registered config types from imported modules
for _, appName := range []string{
"receptor-version",
"receptor-logging",
"receptor-tls",
"receptor-certificates",
"receptor-control-service",
"receptor-command-service",
"receptor-ip-router",
"receptor-proxies",
"receptor-backends",
"receptor-workers",
} {
cl.AddRegisteredConfigTypes(appName)
}
osArgs := os.Args[1:]
err := cl.ParseAndRun(osArgs, []string{"Init", "Prepare", "Run"}, cmdline.ShowHelpIfNoArgs)
if err != nil {
fmt.Printf("Error: %s\n", err)
os.Exit(1)
}
if cl.WhatRan() != "" {
// We ran an exclusive command, so we aren't starting any back-ends
os.Exit(0)
}
configPath := ""
for i, arg := range osArgs {
if arg == "--config" || arg == "-c" {
if len(osArgs) > i+1 {
configPath = osArgs[i+1]
}
break
}
}
// only allow reloading if a configuration file was provided. If ReloadCL is
// not set, then the control service reload command will fail
if configPath != "" {
// create closure with the passed in args to be ran during a reload
reloadParseAndRun := func(toRun []string) error {
return cl.ParseAndRun(osArgs, toRun)
}
err = controlsvc.InitReload(configPath, reloadParseAndRun)
if err != nil {
fmt.Printf("Error: %s\n", err)
os.Exit(1)
}
}
}
receptor-1.5.3/cmd/defaults.go 0000664 0000000 0000000 00000005067 14754657407 0016277 0 ustar 00root root 0000000 0000000 package cmd
import "github.com/ansible/receptor/pkg/types"
func SetTCPListenerDefaults(config *BackendConfig) {
for _, listener := range config.TCPListeners {
if listener.Cost == 0 {
listener.Cost = 1.0
}
if listener.BindAddr == "" {
listener.BindAddr = "0.0.0.0"
}
}
}
func SetUDPListenerDefaults(config *BackendConfig) {
for _, listener := range config.UDPListeners {
if listener.Cost == 0 {
listener.Cost = 1.0
}
if listener.BindAddr == "" {
listener.BindAddr = "0.0.0.0"
}
}
}
func SetWSListenerDefaults(config *BackendConfig) {
for _, listener := range config.WSListeners {
if listener.Cost == 0 {
listener.Cost = 1.0
}
if listener.BindAddr == "" {
listener.BindAddr = "0.0.0.0"
}
if listener.Path == "" {
listener.BindAddr = "/"
}
}
}
func SetUDPPeerDefaults(config *BackendConfig) {
for _, peer := range config.UDPPeers {
if peer.Cost == 0 {
peer.Cost = 1.0
}
if !peer.Redial {
peer.Redial = true
}
}
}
func SetTCPPeerDefaults(config *BackendConfig) {
for _, peer := range config.TCPPeers {
if peer.Cost == 0 {
peer.Cost = 1.0
}
if !peer.Redial {
peer.Redial = true
}
}
}
func SetWSPeerDefaults(config *BackendConfig) {
for _, peer := range config.WSPeers {
if peer.Cost == 0 {
peer.Cost = 1.0
}
if !peer.Redial {
peer.Redial = true
}
}
}
func SetCmdlineUnixDefaults(config *ReceptorConfig) {
for _, service := range config.ControlServices {
if service.Permissions == 0 {
service.Permissions = 0o600
}
if service.Service == "" {
service.Service = "control"
}
}
}
func SetLogLevelDefaults(config *ReceptorConfig) {
if config.LogLevel == nil {
return
}
if config.LogLevel.Level == "" {
config.LogLevel.Level = "error"
}
}
func SetNodeDefaults(config *ReceptorConfig) {
if config.Node == nil {
config.Node = &types.NodeCfg{}
}
if config.Node.DataDir == "" {
config.Node.DataDir = "/tmp/receptor"
}
}
func SetKubeWorkerDefaults(config *ReceptorConfig) {
for _, worker := range config.WorkKubernetes {
if worker.AuthMethod == "" {
worker.AuthMethod = "incluster"
}
if worker.StreamMethod == "" {
worker.StreamMethod = "logger"
}
}
}
func SetReceptorConfigDefaults(config *ReceptorConfig) {
SetCmdlineUnixDefaults(config)
SetLogLevelDefaults(config)
SetNodeDefaults(config)
SetKubeWorkerDefaults(config)
}
func SetBackendConfigDefaults(config *BackendConfig) {
SetTCPListenerDefaults(config)
SetUDPListenerDefaults(config)
SetWSListenerDefaults(config)
SetTCPPeerDefaults(config)
SetUDPPeerDefaults(config)
SetWSPeerDefaults(config)
}
receptor-1.5.3/cmd/receptor-cl/ 0000775 0000000 0000000 00000000000 14754657407 0016350 5 ustar 00root root 0000000 0000000 receptor-1.5.3/cmd/receptor-cl/receptor.go 0000664 0000000 0000000 00000001561 14754657407 0020525 0 ustar 00root root 0000000 0000000 package main
import (
"os"
"github.com/ansible/receptor/cmd"
"github.com/ansible/receptor/pkg/logger"
"github.com/ansible/receptor/pkg/netceptor"
)
func main() {
logger := logger.NewReceptorLogger("")
var isV2 bool
newArgs := []string{}
for _, arg := range os.Args {
if arg == "--config-v2" {
isV2 = true
continue
}
newArgs = append(newArgs, arg)
}
os.Args = newArgs
if isV2 {
logger.Info("Running v2 cli/config")
cmd.Execute()
} else {
cmd.RunConfigV1()
}
for _, arg := range os.Args {
if arg == "--help" || arg == "-h" {
os.Exit(0)
}
}
if netceptor.MainInstance.BackendCount() == 0 {
logger.Warning("Nothing to do - no backends are running.\n")
logger.Warning("Run %s --help for command line instructions.\n", os.Args[0])
os.Exit(1)
}
logger.Info("Initialization complete\n")
<-netceptor.MainInstance.NetceptorDone()
}
receptor-1.5.3/cmd/root.go 0000664 0000000 0000000 00000010101 14754657407 0015434 0 ustar 00root root 0000000 0000000 package cmd
import (
"fmt"
"os"
"reflect"
receptorVersion "github.com/ansible/receptor/internal/version"
"github.com/ansible/receptor/pkg/logger"
"github.com/ansible/receptor/pkg/netceptor"
"github.com/fsnotify/fsnotify"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
cfgFile string
version bool
backendConfig *BackendConfig
)
// rootCmd represents the base command when called without any subcommands.
var rootCmd = &cobra.Command{
Use: "receptor",
Short: "Run a receptor instance.",
Long: `
Receptor is an overlay network intended to ease the distribution of work across a large and dispersed collection of workers.
Receptor nodes establish peer-to-peer connections with each other via existing networks.
Once connected, the receptor mesh provides datagram (UDP-like) and stream (TCP-like) capabilities to applications, as well as robust unit-of-work handling with resiliency against transient network failures.`,
Run: handleRootCommand,
}
func Execute() {
err := rootCmd.Execute()
if err != nil {
os.Exit(1)
}
}
func init() {
cobra.OnInitialize(initConfig)
rootCmd.Flags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/receptor.yaml)")
rootCmd.Flags().BoolVar(&version, "version", false, "Show the Receptor version")
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
l := logger.NewReceptorLogger("")
if cfgFile != "" {
viper.SetConfigFile(cfgFile)
} else {
home, err := os.UserHomeDir()
cobra.CheckErr(err)
viper.AddConfigPath(home)
viper.SetConfigType("yaml")
viper.SetConfigName("receptor")
}
viper.AutomaticEnv()
viper.OnConfigChange(func(e fsnotify.Event) {
l.Info("Config file changed: %s\n", e.Name)
var newConfig *BackendConfig
viper.Unmarshal(&newConfig)
// used because OnConfigChange runs twice for some reason
// allows to skip empty first config
isEmpty := isConfigEmpty(reflect.ValueOf(*newConfig))
if isEmpty {
return
}
SetBackendConfigDefaults(newConfig)
isEqual := reflect.DeepEqual(*backendConfig, *newConfig)
if !isEqual {
// fmt.Println("reloading backends")
// this will do a reload of all reloadable services
// TODO: Optimize to only reload services that have config change
// NOTE: Make sure to account for two things
// if current config had two services then new config has zero cancel those backends
// if services has two items in a slice and one of them has changed iterate and reload on changed service
netceptor.MainInstance.CancelBackends()
l.Info("Reloading backends")
ReloadServices(reflect.ValueOf(*newConfig))
backendConfig = newConfig
return
}
l.Info("No reloadable backends were found.")
})
// TODO: use env to turn off watch config
viper.WatchConfig()
err := viper.ReadInConfig()
if err == nil {
fmt.Fprintln(os.Stdout, "Using config file:", viper.ConfigFileUsed())
}
}
func handleRootCommand(cmd *cobra.Command, args []string) {
if version {
fmt.Println(receptorVersion.Version)
os.Exit(0)
}
if cfgFile == "" && viper.ConfigFileUsed() == "" {
fmt.Fprintln(os.Stderr, "Could not locate config file (default is $HOME/receptor.yaml)")
os.Exit(1)
}
receptorConfig, err := ParseReceptorConfig(cfgFile)
if err != nil {
fmt.Printf("unable to decode into struct, %v", err)
os.Exit(1)
}
certifcatesConfig, err := ParseCertificatesConfig(cfgFile)
if err != nil {
fmt.Printf("unable to decode into struct, %v", err)
os.Exit(1)
}
backendConfig, err = ParseBackendConfig(cfgFile)
if err != nil {
fmt.Printf("unable to decode into struct, %v", err)
os.Exit(1)
}
isEmptyReceptorConfig := isConfigEmpty(reflect.ValueOf(*receptorConfig))
isEmptyReloadableServicesConfig := isConfigEmpty(reflect.ValueOf(*backendConfig))
RunConfigV2(reflect.ValueOf(*certifcatesConfig))
if isEmptyReceptorConfig && isEmptyReloadableServicesConfig {
fmt.Println("empty receptor config, skipping...")
os.Exit(0)
}
SetReceptorConfigDefaults(receptorConfig)
SetBackendConfigDefaults(backendConfig)
RunConfigV2(reflect.ValueOf(*receptorConfig))
RunConfigV2(reflect.ValueOf(*backendConfig))
}
receptor-1.5.3/docs/ 0000775 0000000 0000000 00000000000 14754657407 0014316 5 ustar 00root root 0000000 0000000 receptor-1.5.3/docs/Makefile 0000664 0000000 0000000 00000001771 14754657407 0015764 0 ustar 00root root 0000000 0000000 # Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
@echo -e " doc8 doc8 linter"
@echo -e " lint All documentation linters (including linkcheck)"
@echo -e " rstcheck rstcheck linter"
.PHONY: doc8 help lint Makefile rstcheck server
doc8:
doc8 --ignore D001 .
# Documentation linters
lint: doc8 linkcheck rstcheck
rstcheck:
-rstcheck --recursive --warn-unknown-settings .
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
# Includes linkcheck
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
server:
python3 -m http.server
receptor-1.5.3/docs/diagrams/ 0000775 0000000 0000000 00000000000 14754657407 0016105 5 ustar 00root root 0000000 0000000 receptor-1.5.3/docs/diagrams/AddListenerBackend.md 0000664 0000000 0000000 00000007137 14754657407 0022105 0 ustar 00root root 0000000 0000000 ```mermaid
sequenceDiagram
participant Application
participant Netceptor
participant Netceptor closures
participant Netceptor connectInfo ReadChan
participant Netceptor connectInfo WriteChan
Application->>+Netceptor: New
Netceptor-->>-Application: netceptor instance n1
Application->>+Backend tcp.go: NewTCPListener
Backend tcp.go-->>-Application: backendsTCPListener b1
Application->>+Netceptor: AddBackend(b1)
Netceptor->>Netceptor: backendCancel(context)
Netceptor->>+Backend tcp.go:(Backend Interface) b1.Start(context, waitgroup)
Backend tcp.go->>+Backend utils.go:listenerSession(context, waitgroup, logger, ListenFunc, AcceptFunc, CancelFunc)
Backend utils.go->>+Backend tcp.go:ListenFunc()
Backend tcp.go-->>-Backend utils.go:error
Backend utils.go->>Backend utils.go:create chan BackendSession
Backend utils.go-)+Backend sessChan Closure:go closure
loop Every time AcceptFunc is called
Backend sessChan Closure->>Backend sessChan Closure:AcceptFunc
end
Backend utils.go-->>-Backend tcp.go:chan (Backend Interface) BackendsSession
Backend tcp.go-->>-Netceptor: chan (Backend Interface) BackendsSession
Netceptor->>+Netceptor closures: go closure
Netceptor closures->>+Netceptor closures: go closure
Netceptor closures->>Netceptor closures: runProtocol(context, backend session, backendInfo)
Netceptor closures->>+Netceptor connectInfo ReadChan: make (chan []byte)
Netceptor closures->>+Netceptor connectInfo WriteChan: make (chan []byte)
Netceptor closures-)Netceptor closures: protoReader(backend session)
loop
Backend sessChan Closure->>Netceptor closures: sess.Recv(1 * time.Second)
Netceptor closures->>Netceptor connectInfo ReadChan: ci.ReadChan <- buf
end
loop
Netceptor connectInfo WriteChan->>Netceptor closures: message, more = <-ci.WriteChan
Netceptor closures->>Backend sessChan Closure: sess.Send(message)
end
Netceptor closures-)Netceptor closures: protoWriter(backend session)
Netceptor closures-)+Netceptor closures: s.sendInitialConnectMessage(ci, initDoneChan)
Netceptor closures-)Netceptor closures: s.translateStructToNetwork(MsgTypeRoute, s.makeRoutingUpdate(0))
Netceptor closures-)-Netceptor closures: context.Done()
Netceptor connectInfo ReadChan->>Netceptor closures: data = <-ci.ReadChan
alt established
alt MsgTypeData
Netceptor closures->>Netceptor closures: translateDataToMessage(data []byte)
else MsgTypeRoute
Netceptor closures->>Netceptor closures: s.handleRoutingUpdate(ri, remoteNodeID)
else MsgTypeServiceAdvertisement
Netceptor closures->>Netceptor closures: s.handleServiceAdvertisement(data, remoteNodeID)
else MsgTypeReject
Netceptor closures->>Netceptor closures: return error
end
else !established
alt msgType == MsgTypeRoute
Netceptor closures->>Netceptor closures: add connection
else msgType == MsgTypeReject
Netceptor closures->>Netceptor closures: return error
end
end
Netceptor closures->>Netceptor connectInfo WriteChan: ci.WriteChan <- ConnectMessage
Netceptor closures->>-Netceptor closures: wg Done()
Backend sessChan Closure->>Backend sessChan Closure:CancelFunc
Backend sessChan Closure-->>-Application:context.Done()
Netceptor->>Netceptor closures: wg Done()
Netceptor closures-->>-Netceptor:
Netceptor connectInfo ReadChan-->>-Netceptor closures: context.Done()
Netceptor connectInfo WriteChan-->>-Netceptor closures: context.Done()
```
receptor-1.5.3/docs/make.bat 0000664 0000000 0000000 00000001437 14754657407 0015730 0 ustar 00root root 0000000 0000000 @ECHO OFF
pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=source
set BUILDDIR=build
if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd
receptor-1.5.3/docs/requirements.txt 0000664 0000000 0000000 00000000067 14754657407 0017605 0 ustar 00root root 0000000 0000000 doc8
pbr
rstcheck >= 6
six
sphinx
sphinx_ansible_theme
receptor-1.5.3/docs/source/ 0000775 0000000 0000000 00000000000 14754657407 0015616 5 ustar 00root root 0000000 0000000 receptor-1.5.3/docs/source/conf.py 0000664 0000000 0000000 00000013207 14754657407 0017120 0 ustar 00root root 0000000 0000000 # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "receptor"
copyright = "Red Hat Ansible"
author = "Red Hat Ansible"
# The full version, including alpha/beta/rc tags
# release = '0.0.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autosectionlabel",
"pbr.sphinxext",
]
autosectionlabel_prefix_document = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["Thumbs.db", ".DS_Store"]
pygments_style = "ansible"
language = "en"
master_doc = "index"
source_suffix = ".rst"
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_ansible_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
sidebar_collapse = False
# -- Options for HTML output -------------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "receptorrdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_documents = [
(master_doc, "receptor.tex", "receptor Documentation", "Red Hat Ansible", "manual"),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("receptorctl/receptorctl_index", "receptorctl", "receptor client", [author], 1),
(
"receptorctl/receptorctl_connect",
"receptorctl-connect",
"Establishes a connection between local client and a Receptor node.",
[author],
1,
),
("receptorctl/receptorctl_ping", "receptorctl-ping", "Tests the network reachability of Receptor nodes.", [author], 1),
("receptorctl/receptorctl_reload", "receptorctl-reload", "Reloads the Receptor configuration for the connected node.", [author], 1),
("receptorctl/receptorctl_status", "receptorctl-status", "Displays the status of the Receptor network.", [author], 1),
(
"receptorctl/receptorctl_traceroute",
"receptorctl-traceroute",
"Displays the network route that packets follow to Receptor nodes.",
[author],
1,
),
(
"receptorctl/receptorctl_version",
"receptorctl-version",
"Displays version information for receptorctl and\
the Receptor node to which it is connected.",
[author],
1,
),
("receptorctl/receptorctl_work_cancel", "receptorctl-work-cancel", "Terminates one or more units of work.", [author], 1),
("receptorctl/receptorctl_work_list", "receptorctl-work-list", "Displays known units of work.", [author], 1),
("receptorctl/receptorctl_work_release", "receptorctl-work-release", "Deletes one or more units of work.", [author], 1),
("receptorctl/receptorctl_work_results", "receptorctl-work-results", "Gets results for units of work.", [author], 1),
("receptorctl/receptorctl_work_submit", "receptorctl-work-submit", "Requests a Receptor node to run a unit of work.", [author], 1),
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"receptor",
"receptor Documentation",
author,
"receptor",
"Overlay network to establish a persistent mesh.",
"Miscellaneous",
),
]
# -- Options for QtHelp output -------------------------------------------
# -- Options for linkcheck builder ---------------------------------------
linkcheck_report_timeouts_as_broken = False
linkcheck_timeout = 30
# -- Options for xml builder ---------------------------------------------
xml_pretty = True
# -- Options for C domain ------------------------------------------------
# -- Options for C++ domain ----------------------------------------------
# -- Options for Python domain -------------------------------------------
# -- Options for Javascript domain ---------------------------------------
receptor-1.5.3/docs/source/contributing.rst 0000664 0000000 0000000 00000004460 14754657407 0021063 0 ustar 00root root 0000000 0000000 ******************
Contributor guide
******************
Receptor is an open source project that lives at https://github.com/ansible/receptor
.. contents::
:local:
===============
Code of conduct
===============
All project contributors must abide by the `Ansible Code of Conduct `_.
============
Contributing
============
Receptor welcomes community contributions! See the :ref:`dev_guide` for information about receptor development.
-------------
Pull requests
-------------
Contributions to Receptor go through the Github pull request process.
An initial checklist for your change to increase the likelihood of acceptance:
- No issues when running linters/code checkers
- No issues from unit/functional tests
- Write descriptive and meaningful commit messages. See `How to write a Git commit message `_ and `Learn to write good commit message and description `_.
===============
Release process
===============
Before starting the release process verify that `make test` and `go test tests/goroutines/simple_config.go` tests pass.
Maintainers have the ability to run the `Stage Release`_ workflow. Running this workflow will:
- Build and push the container image to ghcr.io. This serves as a staging environment where the image can be tested.
- Create a draft release at ``_
After the draft release has been created, edit it and populate the description. Once you are done, click "Publish release".
After the release is published, the `Promote Release `_ workflow will run automatically. This workflow will:
- Publish ``receptorctl`` to `PyPI `_.
- Pull the container image from ghcr.io, re-tag, and push to `Quay.io `_.
- Build binaries for various OSes/platforms, and attach them to the `release `_.
.. note::
If you need to re-run `Stage Release`_ more than once, delete the tag beforehand to prevent the workflow from failing.
.. _Stage Release: https://github.com/ansible/receptor/actions/workflows/stage.yml
receptor-1.5.3/docs/source/developer_guide.rst 0000664 0000000 0000000 00000031121 14754657407 0021510 0 ustar 00root root 0000000 0000000 .. _dev_guide:
===============
Developer guide
===============
Receptor is an open source project that lives at `ansible/receptor repository `
.. contents::
:local:
See the :ref:`contributing:contributing` for more general details.
---------
Debugging
---------
^^^^^^^^^^^
Unix Socket
^^^^^^^^^^^
If you don't want to use receptorctl to control nodes, `socat ` can be used to interact with unix sockets directly.
Example:
.. code-block:: bash
echo -e '{"command": "work", "subcommand": "submit", "node": "execution", "worktype": "cat", }\n"Hi"' | socat - UNIX-CONNECT:/tmp/control.sock
-------
Linters
-------
All code must pass a suite of Go linters.
There is a pre-commit yaml file in the Receptor repository that points to the linter suite.
It is strongly recommmended to install the pre-commit yaml so that the linters run locally on each commit.
.. code-block:: bash
cd $HOME
go get github.com/golangci/golangci-lint/cmd/golangci-lint
pip install pre-commit
cd receptor
pre-commit install
See `Pre commit `_ and `Golangci-lint `_ for more details on installing and using these tools.
-------
Testing
-------
^^^^^^^^^^^
Development
^^^^^^^^^^^
Write unit tests for new features or functionality.
Add/Update tests for bug fixes.
^^^^^^^
Mocking
^^^^^^^
We are using gomock to generate mocks for our unit tests. The mocks are living inside of a package under the real implementation, prefixed by ``mock_``. An example is the package mock_workceptor under pkg/workceptor.
In order to genenerate a mock for a particular file, run:
.. code-block:: bash
mockgen -source=pkg/filename.go -destination=pkg/mock_pkg/mock_filename.go
For example, to create/update mocks for Workceptor, we can run:
.. code-block:: bash
mockgen -source=pkg/workceptor/workceptor.go -destination=pkg/workceptor/mock_workceptor/workceptor.go
^^^^^^^^^^
Kubernetes
^^^^^^^^^^
Some of the tests require access to a Kubernetes cluster; these tests will load in the kubeconfig file located at ``$HOME/.kube/config``. One simple way to make these tests work is to start minikube locally before running ``make test``. See https://minikube.sigs.k8s.io/docs/start/ for more information about minikube.
To skip tests that depend on Kubernetes, set environment variable ``export SKIP_KUBE=1``.
^^^^^^^^^
Execution
^^^^^^^^^
Pull requests must pass a suite of unit and integration tests before being merged into ``devel``.
``make test`` will run the full test suite locally.
-----------
Source code
-----------
The following sections help orient developers to the Receptor code base and provide a starting point for understanding how Receptor works.
^^^^^^^^^^^^^^^^^^^^^
Parsing receptor.conf
^^^^^^^^^^^^^^^^^^^^^
Let's see how items in the config file are mapped to Golang internals.
As an example, in tcp.go
.. code-block:: go
cmdline.RegisterConfigTypeForApp("receptor-backends",
"tcp-peer", "Make an outbound backend connection to a TCP peer", TCPDialerCfg{}, cmdline.Section(backendSection))
"tcp-peer" is a top-level key (action item) in receptor.conf
.. code-block:: yaml
tcp-peers:
- address: localhost:2222
``RegisterConfigTypeForApp`` tells the cmdline parser that "tcp-peer" is mapped to the ``TCPDialerCfg{}`` structure.
``main()`` in ``receptor.go`` is the entry point for a running Receptor process.
In ``receptor.go`` (modified for clarity):
.. code-block:: go
cl.ParseAndRun("receptor.conf", []string{"Init", "Prepare", "Run"})
A Receptor config file has many action items, such as ```node``, ``work-command``, and ``tcp-peer``. ``ParseAndRun`` is how each of these items are instantiated when Receptor starts.
Specifically, ParseAndRun will run the Init, Prepare, and Run methods associated with each action item.
Here is the Prepare method for ``TCPDialerCfg``. By the time this code executes, the cfg structure has already been populated with the data provided in the config file.
.. code-block:: go
// Prepare verifies the parameters are correct.
func (cfg TCPDialerCfg) Prepare() error {
if cfg.Cost <= 0.0 {
return fmt.Errorf("connection cost must be positive")
}
return nil
}
This simply does a check to make sure the provided Cost is valid.
The Run method for the ``TCPDialerCfg`` object:
.. code-block:: go
// Run runs the action.
func (cfg TCPDialerCfg) Run() error {
logger.Debug("Running TCP peer connection %s\n", cfg.Address)
host, _, err := net.SplitHostPort(cfg.Address)
if err != nil {
return err
}
tlscfg, err := netceptor.MainInstance.GetClientTLSConfig(cfg.TLS, host, "dns")
if err != nil {
return err
}
b, err := NewTCPDialer(cfg.Address, cfg.Redial, tlscfg)
if err != nil {
logger.Error("Error creating peer %s: %s\n", cfg.Address, err)
return err
}
err = netceptor.MainInstance.AddBackend(b, cfg.Cost, nil)
if err != nil {
return err
}
return nil
}
This gets a new TCP dialer object and passes it to the netceptor ``AddBackend`` method, so that it can be processed further.
``AddBackend`` will start proper Go routines that periodically dial the address defined in the TCP dialer structure, which will lead to a proper TCP connection to another Receptor node.
In general, when studying how the start up process works in Receptor, take a look at the ``Init``, ``Prepare``, and ``Run`` methods throughout the code, as these are the entry points to running those specific components of Receptor.
^^^^
Ping
^^^^
Studying how pings work in Receptor will provide a useful glimpse into the internal workings of netceptor -- the main component of Receptor that handles connections and data traffic over the mesh.
``receptorctl --socket /tmp/foo.sock ping bar``
The control-service on `foo` will receive this command and subsequently call the following,
**ping.go::ping**
.. code-block:: go
func ping(nc *netceptor.Netceptor, target string, hopsToLive byte) (time.Duration, string, error) {
pc, err := nc.ListenPacket("")
``target`` is the target node, "bar" in this case.
``nc.ListenPacket("")`` starts a new ephemeral service and returns a ``PacketConn`` object. This is a datagram connection that has a WriteTo() and ReadFrom() method for sending and receiving data to other nodes on the mesh.
**packetconn.go::ListenPacket**
.. code-block:: go
pc := &PacketConn{
s: s,
localService: service,
recvChan: make(chan *messageData),
advertise: false,
adTags: nil,
connType: ConnTypeDatagram,
hopsToLive: s.maxForwardingHops,
}
s.listenerRegistry[service] = pc
return pc, nil
``s`` is the main netceptor object, and a reference to the PacketConn object is stored in netceptor's ``listenerRegistry`` map.
**ping.go::ping**
.. code-block:: go
_, err = pc.WriteTo([]byte{}, nc.NewAddr(target, "ping"))
Sends an empty message to the address "bar:ping" on the mesh. Recall that nodes are analogous to DNS names, and services are like port numbers.
``WriteTo`` calls ``sendMessageWithHopsToLive``
**netceptor.go::sendMessageWithHopsToLive**
.. code-block:: go
md := &messageData{
FromNode: s.nodeID,
FromService: fromService,
ToNode: toNode,
ToService: toService,
HopsToLive: hopsToLive,
Data: data,
}
return s.handleMessageData(md)
Here the message is constructed with essential information such as the source node and service, and the destination node and service. The Data field contains the actual message, which is empty in this case.
``handleMessageData`` calls ``forwardMessage`` with the ``md`` object.
**netceptor.go::forwardMessage**
.. code-block:: go
nextHop, ok := s.routingTable[md.ToNode]
The current node might not be directly connected to the target node, and thus netceptor needs to determine what is the next hop to pass the data to. ``s.routingTable`` is a map where the key is a destination ("bar"), and the value is the next hop along the path to that node. In a simple two-node setup with `foo` and `bar`, ``s.routingTable["bar"] == "bar"``.
**netceptor.go::forwardMessage**
.. code-block:: go
c, ok := s.connections[nextHop]
c.WriteChan <- message
``c`` here is a ``ConnInfo`` object, which interacts with the various backend connections (UDP, TCP, websockets).
``WriteChan`` is a golang channel. Channels allows communication between separate threads (Go routines) running in the application. When `foo` and `bar` had first started, they established a backend connection. Each node runs the netceptor runProtocol go routine, which in turn starts a protoWriter go routine.
**netceptor.go::protoWriter**
.. code-block:: go
case message, more := <-ci.WriteChan:
err := sess.Send(message)
So before the "ping" command was issued, this protoWriter Go routine was already running and waiting to read messages from WriteChan.
``sess`` is a BackendSession object. BackendSession is an abstraction over the various available backends. If `foo` and `bar` are connected via TCP, then ``sess.Send(message)`` will pass along data to the already established TCP session.
**tcp.go::Send**
.. code-block:: go
func (ns *TCPSession) Send(data []byte) error {
buf := ns.framer.SendData(data)
n, err := ns.conn.Write(buf)
``ns.conn`` is net.Conn object, which is part of the Golang standard library.
At this point the message has left the node via a backend connection, where it will be received by `bar`.
Let's review the code from `bar`'s perspective and how it handles the incoming message that is targeting its "ping" service.
On the receiving side, the data will first be read here
**tcp.go::Recv**
.. code-block:: go
n, err := ns.conn.Read(buf)
ns.framer.RecvData(buf[:n])
Recv was called in protoReader Go routine, similar to the protoWriter when the message sent from `foo`.
Note that ``ns.conn.Read(buf)`` might not contain the full message, so the data is buffered until the ``messageReady()`` returns true. The size of the message is tagged in the message itself, so when Recv has received N bytes, and the message is N bytes, Recv will return.
**netceptor.go::protoReader**
.. code-block:: go
buf, err := sess.Recv(1 * time.Second)
ci.ReadChan <- buf
The data is passed to a ReadChan channel.
**netceptor.go::runProtocol**
.. code-block:: go
case data := <-ci.ReadChan:
message, err := s.translateDataToMessage(data)
err = s.handleMessageData(message)
The data is read from the channel, and deserialized into an actual message format in ``translateDataToMessage``.
**netceptor.go::handleMessageData**
.. code-block:: go
if md.ToNode == s.nodeID {
handled, err := s.dispatchReservedService(md)
This checks whether the destination node indicated in the message is the current node. If so, the message can be dispatched to the service.
"ping" is a reserved service in the netceptor instance.
.. code-block:: go
s.reservedServices = map[string]func(*messageData) error{
"ping": s.handlePing,
}
**netceptor.go::handlePing**
.. code-block:: go
func (s *Netceptor) handlePing(md *messageData) error {
return s.sendMessage("ping", md.FromNode, md.FromService, []byte{})
}
This is the ping reply handler. It sends an empty message to the FromNode (`foo`).
The FromService here is not "ping", but rather the ephemeral service that was created from ``ListenPacket("")`` in ping.go on `foo`.
With ``trace`` enabled in the Receptor configuration, the following log statements show the reply from ``bar``,
.. code-block:: bash
TRACE --- Received data length 0 from foo:h73opPEh to bar:ping via foo
TRACE --- Sending data length 0 from bar:ping to foo:h73opPEh
So the ephemeral service on `foo` is called h73opPEh (randomly generated string).
From here, the message from `bar` will passed along in a very similar fashion as the original ping message sent from `foo`.
Back on node `foo`, the message is received receive the message where it is finally handled in ping.go
**ping.go::ping**
.. code-block:: go
_, addr, err := pc.ReadFrom(buf)
.. code-block:: go
case replyChan <- fromNode:
.. code-block:: go
case remote := <-replyChan:
return time.Since(startTime), remote, nil
The data is read from the PacketConn object, written to a channel, where it is read later by the ping() function, and ping() returns with the roundtrip delay, ``time.Since(startTime)``.
receptor-1.5.3/docs/source/getting_started_guide/ 0000775 0000000 0000000 00000000000 14754657407 0022162 5 ustar 00root root 0000000 0000000 receptor-1.5.3/docs/source/getting_started_guide/creating_a_basic_network.rst 0000664 0000000 0000000 00000003304 14754657407 0027722 0 ustar 00root root 0000000 0000000
.. _creating_a_basic_network:
###############################
Creating a basic 3-node network
###############################
In this section, we will create a three-node network.
The three nodes are: foo, bar, and baz.
`foo -> bar <- baz`
foo and baz are directly connected to bar with TCP connections.
foo can reach baz by sending network packets through bar.
***********************
Receptor configurations
***********************
1. Create three configuration files, one for each node.
``foo.yml``
.. code-block:: yaml
- node:
id: foo
- control-service:
service: control
filename: /tmp/foo.sock
- tcp-peer:
address: localhost:2222
- log-level:
level: debug
...
``bar.yml``
.. code-block:: yaml
---
- node:
id: bar
- control-service:
service: control
filename: /tmp/bar.sock
- tcp-listener:
port: 2222
- log-level:
level: debug
...
``baz.yml``
.. code-block:: yaml
---
- node:
id: baz
- control-service:
service: control
filename: /tmp/baz.sock
- tcp-peer:
address: localhost:2222
- log-level:
level: debug
- work-command:
workType: echo
command: bash
params: "-c \"while read -r line; do echo $line; sleep 1; done\""
allowruntimeparams: true
...
2. Run the services in separate terminals.
.. code-block:: bash
./receptor --config foo.yml
.. code-block:: bash
./receptor --config bar.yml
.. code-block:: bash
./receptor --config baz.yml
.. seealso::
:ref:`configuring_receptor_with_a_config_file`
Configuring Receptor with a configuration file
:ref:`connecting_nodes`
Detail on connecting receptor nodes
receptor-1.5.3/docs/source/getting_started_guide/index.rst 0000664 0000000 0000000 00000001472 14754657407 0024027 0 ustar 00root root 0000000 0000000 #############################
Getting started with Receptor
#############################
Receptor is an overlay network that distributes work across large and dispersed collections of worker nodes.
Receptor nodes establish peer-to-peer connections through existing networks.
Once connected, the Receptor mesh provides datagram (UDP-like) and stream (TCP-like) capabilities to applications, as well as robust unit-of-work handling with resiliency against transient network failures.
.. image:: mesh.png
.. toctree::
:maxdepth: 1
:caption: Contents:
introduction
installing_receptor
creating_a_basic_network
trying_sample_commands
.. seealso::
:ref:`interacting_with_nodes`
Further examples of working with nodes
:ref:`connecting_nodes`
Detail on connecting receptor nodes
receptor-1.5.3/docs/source/getting_started_guide/installing_receptor.rst 0000664 0000000 0000000 00000000666 14754657407 0026773 0 ustar 00root root 0000000 0000000
.. _installing_receptor:
###################
Installing Receptor
###################
1. `Download receptor `_
2. Install receptor (per installation guide below)
3. Install receptorctl
.. code-block:: bash
pip install receptorctl
.. seealso::
:ref:`installing`
Detailed installation instructions
:ref:`using_receptor_containers`
Using receptor in containers
receptor-1.5.3/docs/source/getting_started_guide/introduction.rst 0000664 0000000 0000000 00000000741 14754657407 0025437 0 ustar 00root root 0000000 0000000 ########################
Introduction to receptor
########################
Receptor is an overlay network.
It eases the work distribution across a large and dispersed collection
of workers
Receptor nodes establish peer-to-peer connections with each other through
existing networks
Once connected, the receptor mesh provides:
* Datagram (UDP-like) and stream (TCP-like) capabilities to applications
* Robust unit-of-work handling
* Resiliency against transient network failures
receptor-1.5.3/docs/source/getting_started_guide/mesh.png 0000664 0000000 0000000 00000056350 14754657407 0023635 0 ustar 00root root 0000000 0000000 PNG
IHDR Ea oiCCPicc (u;KA?%D"jbB"BPKM|5%le7A`c!X6
(?W#a$2{?̹̜GXײVk
W7Ifs3Qz?z$-aCp%PF**NWSLC8Z
fV;7[\/s ::F$`7O^<
JHo@ԢtMJMOrvŶ?]9)8*W%/w⺮rzHNT
ϡcZͪ#D7neoֶgI pHYs ~ IDATx^Sֆ
ĮHQTT
Q/"
^^"Bbǎ(4EE*w?$'9)z<''{;Y{ZS/b$" " " " " "P@Kl-" " " " " MQpĪ@D@D@D@D@D@@ H(8bU " " " " " Cs@D@D@D@D@Dx*9 " " " " "PpR<
XJ`" uL@Gz%:vhgۛE @X)Ѷ" " " B`.0|}
@Qn#@;wYkW_}eXc2&G@>7-ƍ]'m1(WR-A."PڬNu䁀<@T" " " " " /@"PnzO?c=֬Jfȑ'4?Ymn5h AI&|̚5zr!^zaoE-2vy%~3/G9uQfu-[R?(+6IԶCɯ-Zv_vޱy(K.]L^}ՔiIkƦ]r%p
B76Uh
"o .[l>]p'*=)Ze(?~B{x
iӦ}ݗ*?]W;jҟhV
>-\0lذ*OU`*F)|M|VQ6lsuYE3eVƌSV:#"%LȪ\()c눀W<`e
>}zлwpwI'%E}2/_曁}*]PqO
L]?/ا\7k-,H[o w؞G}4l'|aAѱNBPgK}~C{S`-gws\}vGǵȳZk}_]> 'Xc[o
lcko}2VX-ě7oܝ~igHM}8Ү6m;?C߾}Cv"nR$ ţGMmW